query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns the plate scale as an `~astropy.units.Quantity`.
def plate_scale(self): return 206265 * uu.arcsec / (self.diameter.to('mm') * self.f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def getScale(self):\n return _libsbml.Unit_getScale(self)", "def scale(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"scale\")", "def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")", "def plate_scale(platescale):\n if platescale.unit.is_equivalent(si.arcsec / si.m):\n platescale_val = platescale.to_value(si.radian / si.m)\n elif platescale.unit.is_equivalent(si.m / si.arcsec):\n platescale_val = (1 / platescale).to_value(si.radian / si.m)\n else:\n raise UnitsError(\"The pixel scale must be in angle/distance or distance/angle\")\n\n return Equivalency(\n [(si.m, si.radian, lambda d: d * platescale_val, lambda a: a / platescale_val)],\n \"plate_scale\",\n {\"platescale\": platescale},\n )", "def scale(self):\n return self._gev_bijector.scale", "def scale(self):\n return self._scale", "def GetScale(self):\n ...", "def getnscale(self):\n return self.nscale", "def get_scale():\r\n\r\n \r\n return 0.5", "def scale(self):\n return self.distribution.scale", "def get_scale(units, compartmentId, volume, extracellularVolume):\r\n if compartmentId == 'c':\r\n V = volume\r\n else:\r\n V = extracellularVolume\r\n\r\n if units == 'uM':\r\n return 1. / N_AVOGADRO / V * 1e6\r\n elif units == 'mM':\r\n return 1. / N_AVOGADRO / V * 1e3\r\n elif units == 'molecules':\r\n return 1.\r\n else:\r\n raise Exception('Invalid units \"%s\"' % units)", "def castSize(self, scale):\n return self.camera.sensorSize * scale", "def getScale(self):\n \n dag_node = OpenMaya.MFnDagNode(self.thisObj)\n transform_node = OpenMaya.MFnTransform(dag_node.parent( 0 ))\n \n util = OpenMaya.MScriptUtil()\n util.createFromDouble(0.0, 0.0, 0.0)\n pointeur = util.asDoublePtr()\n transform_node.getScale(pointeur)\n \n sx = util.getDoubleArrayItem(pointeur, 0)\n sy = util.getDoubleArrayItem(pointeur, 1)\n sz = util.getDoubleArrayItem(pointeur, 2)\n\n return sx, sy, sz", "def scale(self):\n return self._a", "def getScale(self):\n return self.factor**self.turnOn", "def scale(self) -> Tuple[float, float]:\n return self._scale", "def scaling(self):\n return self.__scaling", "def get_scale(self):\n\n if not hasattr(self, \"scale\"):\n raise NotImplementedError(\"\"\"All end-use subclasses of Color should define\n a get_scale method.\"\"\")\n\n return self.scale", "def scaling(self):\n return self._scaling", "def scaling(self):\n return self._scaling", "def scale(self) -> Optional[pulumi.Input['ScaleArgs']]:\n return pulumi.get(self, \"scale\")", "def temperature_scale(self) -> str:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"temperature_scale\"))\r\n return self._temperature_scale", "def scale_value(self):\n return self._scale_value[2]", "def scale_to_factor(scale):\n return (B.pi / 2) / (2 * scale**2)", "def scale_parameter(self):\n return self._scale_parameter", "def factor_to_scale(factor):\n return 1 / B.sqrt(4 * factor / B.pi)", "def scaling(self):\n return self.stacked._box_scaling[1]", "def get_scale(self):\r\n try: return self.scale[0], self.scale[1], self.scale[2]\r\n except: return self.scale, self.scale, self.scale", "def _scale(x):\n scaleFactor = 1\n _ret = int(x/scaleFactor)\n return _ret", "def colorscale(self):\n return self[\"colorscale\"]", "def colorscale(self):\n return self['colorscale']", "def scaling_factor(self):\n bin_scale = self.spabins * self.spebins\n return bin_scale * self.int_time", "def get_scale_factor(rec, stack):\n \n rec_pixel_size = get_pixel_size_rec(rec)\n stack_pixel_size = get_pixel_size_stack(stack)\n \n return rec_pixel_size / stack_pixel_size", "def scale(self, value):\n return (float(value) - float(self.minimum)) / \\\n float(self.maximum - self.minimum) * 2.0 - 1.0", "def setScale(self, *args):\n return _libsbml.Unit_setScale(self, *args)", "def GetUserScale(*args, **kwargs):\n return _gdi_.DC_GetUserScale(*args, **kwargs)", "def get_zoom(self) -> float:\n transform = self.transform()\n cur_scale = (transform.m11(), transform.m22())\n return float(f\"{cur_scale[0] - 1.0:0.2f}\")", "def parallel_scale(self):\n return self.camera.parallel_scale", "def getScale(self, mode='ACC'):\t#good\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscaleSetting = (currentVal[4]*2) + (currentVal[3]*1) \r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\tscale = 2**(scaleSetting+1) \r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\tscale = (2**(scaleSetting+1))*125\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\treturn scale,scaleSetting", "def imageScale(scale):\n\t\treturn max(1, int(scale * (InterfaceTools.getCanvasSize()[0] / height)))", "def pixel_scale(self):\n return np.abs(float(self.header[\"CDELT1\"]))", "def auto_scale_factor(self):\r\n return self.gref.auto_scale_factor", "def scale_quantity_gen(stock_size_gen, scale_factor):\n\n if scale_factor is not None:\n return stock_size_gen\\\n .map(f_vect=operations.scale(factor=scale_factor)) \\\n .map(f=operations.bound_value(lb=1))\n\n return stock_size_gen", "def scale(self):", "def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0", "def _get_scaling(root):\n dpi = root.winfo_fpixels(\"1i\")\n scaling = dpi / 72.0\n logger.debug(\"dpi: %s, scaling: %s'\", dpi, scaling)\n return scaling", "def get_unit(scale):\n scale2unit = { 1e-9: 'nm',\n 1e-6: u'\\N{MICRO SIGN}m', #or hex id (lookup): u'\\u00B5'\n 1e-3: 'mm',\n 0.01: 'cm',\n 0.1:'dm',\n 1:'m',\n 1000:'km',\n # time\n 8.6400e4:'day',\n 3.1536e7:'yr',\n 3.1536e10:'ka',\n 3.1536e13:'Ma',\n #Pressure\n 1e9: 'GPa',\n 1e6: 'MPa',\n }\n return scale2unit[scale]", "def get_scale_op(self):\n\t\treturn self.variables.get('scale')", "def Scale(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Scale(*args, **kwargs)", "def img_scale(self):\n return min(400, abs(self.size))", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def rscale(mag=10.0):\n if mag > 11.5:\n return 0.5\n elif mag > 11.0:\n return 1.0\n elif mag > 10.5:\n return 1.5\n elif mag > 10.0:\n return 1.5\n elif mag > 9.5:\n return 2.0\n elif mag > 9.0:\n return 2.5\n elif mag > 8.5:\n return 3.0\n else:\n return 3.5", "def scale(self, value):\n\t\toldscale = self.oldmax - self.oldmin\n\t\tnewscale = self.newmax - self.newmin\n\t\treturn (newscale * (value - self.oldmin) / oldscale) + self.newmin", "def _get_scale_from_magzp(self, magzp):\n scale = 10.0**( 0.4*(self['magzp_ref']-magzp) )\n return scale", "def get_scale_parameter(self):\n\n if self.scale_parameter == 0.0:\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\n gamma_func = special.gamma(shape_in_gamma_func)\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\n return self.scale_parameter\n else:\n return self.scale_parameter", "def doppler_scale(self):\n return self._dopplerscale", "def fraction_full_scale(self):\n return self._fraction_full_scale", "def max_capacity_per_scale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_capacity_per_scale\")", "def get_scale_parameter(self):\r\n \r\n if self.scale_parameter == 0.0: \r\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\r\n gamma_func = special.gamma(shape_in_gamma_func)\r\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\r\n return self.scale_parameter\r\n else:\r\n return self.scale_parameter", "def reversescale(self):\n return self[\"reversescale\"]", "def ScaleFactor(aNum, nScaleBits=3, nMantBits=5):\n #Notes:\n #The scale factor should be the number of leading zeros\n\n ### YOUR CODE STARTS HERE ###\n R = 2**nScaleBits - 1 + nMantBits\n s_code = QuantizeUniform(aNum, R)\n code = s_code & (2**(R - 1) - 1)\n # First bit is sign bit\n mask = 1 << (R - 2)\n zeros = 0\n while mask:\n if mask & code == 0:\n zeros += 1\n mask >>= 1\n else:\n break\n if zeros < 2**nScaleBits - 1:\n scale = zeros\n else:\n scale = 2**nScaleBits - 1\n ### YOUR CODE ENDS HERE ###\n\n return int(scale)", "def reversescale(self):\n return self['reversescale']", "def scale(input):\n return (input - np.min(input)) / ((np.max(input) - np.min(input)))", "def getscales(self):\n return self.scales", "def showscale(self):\n return self[\"showscale\"]", "def _scale_param(self, resid_us):\n return((resid_us**2).sum().sum() / self.dof)", "def __get_size_multiplier(self, multiplier):\n if multiplier is None:\n result = 1\n elif multiplier in ['k', 'K']:\n result = self.__k_multiplier\n elif multiplier in ['m', 'M']:\n result = self.__m_multiplier\n elif multiplier in ['g', 'G']:\n result = self.__g_multiplier\n else:\n result = 0\n return result", "def loss_scale(self):\n return self._loss_scale", "def get_scale_factor(value_dict, max_length=os.get_terminal_size().columns):\n max_value = max(value_dict.values(), key=abs)\n try:\n scale = max_length / abs(max_value)\n except ZeroDivisionError:\n scale = 1\n return scale", "def getMultiplier(self):\n return _libsbml.Unit_getMultiplier(self)", "def amplitude_scale(self):\n return self._amplitude_scale", "def showscale(self):\n return self['showscale']", "def get_loss_scale(self):\n return self._loss_scale", "def rolloff_scale(self):\n return self._rolloffscale", "def get_wcs_pscale(wcs):\n from numpy import linalg\n det = linalg.det(wcs.wcs.cd)\n pscale = np.sqrt(np.abs(det))*3600.\n return pscale", "def cart_to_meter(self, cart):\n return cart*(1/self.scale)*149.6e9", "def scale_row(self):\n return self._scale_row", "def GetLogicalScale(*args, **kwargs):\n return _gdi_.DC_GetLogicalScale(*args, **kwargs)", "def get_scaled_value(self, value):\r\n raise NotImplementedError()", "def pressures_in_mb( pressures ):\n if not hasattr( pressures, 'units' ): return None\n if pressures.units=='mb':\n pressures.units = 'mbar' # udunits uses mb for something else\n return pressures[:]\n tmp = udunits(1.0,pressures.units)\n s,i = tmp.how('mbar')\n pressmb = s*pressures[:] + i\n return pressmb", "def scale_min_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"scale_min_capacity\")", "def scale_min_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"scale_min_capacity\")", "def unsetScale(self):\n return _libsbml.Unit_unsetScale(self)", "def getScaledWaveform(self):\n return self.data*self.volt_gain - self.volt_offset", "def get_multiplier(quality):\n\n if quality == \"low\":\n return 5\n elif quality == \"medium\":\n return 6\n elif quality == \"good\":\n return 7\n elif quality == \"high\":\n return 8\n return 6", "def any_scale(scale):\n return scale", "def getxscale(self) -> int:\n if not self.debug:\n # TODO: Verificar\n self.myFieldFox.write(\"TRAC:SPEC:AMPL:SCAL?\")\n ret = self.myFieldFox.read()\n else:\n ret = 'LOG'\n return ret", "def _genLenScale(self):\n # TODO: this function is an interface to specify non-stationary length scale field\n #pdb.set_trace()\n lenXField = self.lenXYZ[0, 0] * np.ones(self.nCell_kl)\n lenYField = self.lenXYZ[0, 1] * np.ones(self.nCell_kl)\n lenZField = self.lenXYZ[0, 2] * np.ones(self.nCell_kl)\n\n return lenXField, lenYField, lenZField", "def _pixel_scale(self, width=None, height=None, scale=None):\n if numpy.count_nonzero([width is not None, height is not None, scale is not None]) > 1:\n raise ValueError(\"Specify only one of width, height, or scale.\")\n if width is not None:\n scale = width / self._width\n elif height is not None:\n scale = height / self._height\n elif scale is None:\n scale = 1.0\n return scale", "def world_to_pixel_width(scale, width, pixels_per_meter = PIXELS_PER_METER):\n return int(scale * pixels_per_meter * width)", "def scale(self,s):\n return Vector(self.x * s, self.y * s, self.z * s)", "def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res", "def scale(structure):\n from numpy.linalg import det\n if \"O\" in [atom.type for atom in structure]: spvol = 8.5**3/4e0\n elif \"Se\" in [atom.type for atom in structure]: spvol = 9.5**3/4e0\n elif \"Te\" in [atom.type for atom in structure]: spvol = 10.5**3/4e0\n else: raise ValueError(\"unknown atom.type: %s\" % (atom.type,))\n\n nfu = float(len(structure)/7)*0.5 # 0.5 because 2 f.u. in spinel unit-cell.\n vol = det(structure.cell)\n return (nfu * spvol / vol)**(1e0/3e0)", "def get_apply_scale(self, applyScaleFactor, scale_quality = 1.0):\n v = self.scale * self.scale_quality * scale_quality\n if applyScaleFactor:\n v *= self.scale_factor\n return v", "def get_scale_parameter(self):\n\n shape_in_gamma_func = float(1 + (1 / self._shape_parameter))\n gamma_func = special.gamma(shape_in_gamma_func)\n self._scale_parameter = self._mean_fire_recurrence / gamma_func", "def overlay_scale(self):\n return self._overlay_scale", "def adjust_quantity(self, quantity, min_is_zero=True):\n if self.min_size > 0.0 and quantity < self.min_size:\n if min_is_zero:\n return 0.0\n\n return self.min_size\n\n if self.max_size > 0.0 and quantity > self.max_size:\n return self.max_size\n\n if self.step_size > 0:\n precision = self._size_limits[3]\n return max(round(self.step_size * round(quantity / self.step_size), precision), self.min_size)\n\n return quantity", "def removeScale(*args):\n return _libsbml.Unit_removeScale(*args)", "def get_scale(scale='major', key=60):\n SCALE_DICT = get_keys()\n notes = [key] + [(key + i) for i in np.cumsum(SCALE_DICT[scale])]\n return notes" ]
[ "0.7407171", "0.73354304", "0.7272574", "0.7260109", "0.72580636", "0.71984255", "0.7091628", "0.7008195", "0.69788766", "0.6805889", "0.6759866", "0.67273027", "0.67187494", "0.6713132", "0.6710965", "0.6695409", "0.6635227", "0.6599181", "0.65680814", "0.6560833", "0.6560833", "0.6483989", "0.6483741", "0.64718527", "0.64370936", "0.6424952", "0.6417758", "0.63952535", "0.63668525", "0.6326123", "0.62714326", "0.6247941", "0.6232374", "0.6215905", "0.6181004", "0.6168018", "0.61677635", "0.61670804", "0.616255", "0.61457354", "0.6144036", "0.6125785", "0.6107309", "0.60538346", "0.60460585", "0.60429835", "0.6041712", "0.60201323", "0.5998965", "0.59784585", "0.59601396", "0.5922963", "0.5919793", "0.5911657", "0.58643574", "0.58327854", "0.5831614", "0.58315766", "0.58032584", "0.5793307", "0.5772511", "0.5756408", "0.57535434", "0.5752791", "0.5752703", "0.5746635", "0.57415766", "0.57410425", "0.57343733", "0.57271457", "0.570904", "0.57038283", "0.5694237", "0.5690058", "0.56781876", "0.5676739", "0.56707937", "0.5669154", "0.5660472", "0.5660006", "0.565926", "0.5655131", "0.5655131", "0.5648027", "0.5640961", "0.5638843", "0.56375206", "0.56155986", "0.5614031", "0.5604005", "0.55993396", "0.55984914", "0.5597647", "0.5596471", "0.55914867", "0.5585995", "0.5582581", "0.5581006", "0.5572195", "0.5553782" ]
0.7882506
0
Identifies genes that are significantly enriched for insertions (CTGs). This function takes a DataFrame of insertions, coming from multiple samples, and identifies if any genes are more frequently affected by an insertion than would be expected by chance. These genes are called Commonly Targeted Genes (CTGs). CTGs are selected by comparing the number of insertions within the gene to the number of insertions that would be expected from the background insertion rate, which is modeled using a Poisson distribution.
def test_ctgs( insertions, # type: List[Insertion] reference, # type: Reference gene_ids=None, # type: Set[str] chromosomes=None, # type: Set[str] pattern=None, # type: str per_sample=True, # type: bool window=None #type: Tuple[int, int] ): # Default to shared chromosome sequences (typically drops some # of the more esoteric extra scaffold/patch sequences). if chromosomes is None: reference_seq = pyfaidx.Fasta(str(reference.fasta_path)) reference_gtf = GtfIterator(reference.indexed_gtf_path) chromosomes = list( set(reference_seq.keys()) & set(reference_gtf.contigs)) if len(chromosomes) == 0: ValueError('No chromosomes are shared between the reference ' 'sequence and reference gtf files') if len(chromosomes) == 0: raise ValueError('At least one chromosome must be given') # Determine gene windows using GTF. logging.info('Generating gene windows') gene_windows = _build_gene_windows( reference.indexed_gtf_path, window=window, chromosomes=chromosomes) # Subset insertions to gene intervals. insertions = _subset_to_windows(insertions, gene_windows) if gene_ids is None: gene_ids = set(ins.metadata['gene_id'] for ins in insertions) # Collapse insertions per gene/sample (recommended). # Corrects for hopping/multiple detection issues. if per_sample: logging.info('Collapsing insertions') insertions = list(_collapse_per_sample(insertions)) # Calculate total number of pattern occurrences within intervals. logging.info('Counting pattern occurrences') reference_seq = pyfaidx.Fasta(str(reference.fasta_path)) total = count_total( reference_seq, pattern=pattern, intervals=gene_windows.values()) # Calculate p-values for each gene. logging.info('Calculating significance for genes') insertion_trees = GenomicIntervalTree.from_objects_position( insertions, chrom_attr='seqname') p_values = { gene_id: test_region( insertions=insertions, reference_seq=reference_seq, region=gene_windows[gene_id], total=total, pattern=pattern, filters=[lambda ins, gid=gene_id: ins.metadata['gene_id'] == gid], insertion_trees=insertion_trees) for gene_id in gene_ids } # Build result frame. result = pd.DataFrame.from_records( iter(p_values.items()), columns=['gene_id', 'p_value']) # Calculate corrected p-value using bonferroni correction. result['q_value'] = (result['p_value'] * len(result)).clip_upper(1.0) # Sort by q-value and p-value. result.sort_values(by=['q_value', 'p_value'], inplace=True) if len(insertions) > 0: # Annotate with gene_name if possible. if 'gene_name' in insertions[0].metadata: name_map = { ins.metadata['gene_id']: ins.metadata['gene_name'] for ins in insertions } result.insert(1, 'gene_name', result['gene_id'].map(name_map)) else: result['gene_name'] = np.nan # Annotate with frequency. frequency = (Insertion.to_frame(insertions) .groupby('gene_id')['sample'].nunique() .reset_index(name='n_samples')) result = pd.merge(result, frequency, on='gene_id', how='left') else: result['gene_name'] = np.nan result['n_samples'] = np.nan return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def genes_GT():\n df1=pd.read_csv(config['geneInfo'], sep=\" \")\n df1=df1[df1.chr == '22']\n df2=pd.read_csv(config['counts'], sep=\" \")\n genes=df1.merge(df2.gene_id, on=\"gene_id\")\n return list(set(genes['gene_id']))", "def process_cgc(path, return_dataframe=False, fusions=False):\n # read in data\n df = pd.read_table(path)\n\n # keep small somatic variants\n if not fusions:\n s = df['Mutation Types']\n is_small = s.str.contains('Mis|F|N|S').fillna(False)\n is_somatic = ~df['Tumour Types(Somatic)'].isnull()\n df = df[is_small & is_somatic].copy()\n\n # label oncogenes / TSG\n df['Is Oncogene (CGC)'] = 'No'\n df.loc[df['Role in Cancer'].fillna('').str.contains('oncogene'), 'Is Oncogene'] = 'Yes'\n df['Is Tumor Suppressor Gene (CGC)'] = 'No'\n df.loc[df['Role in Cancer'].fillna('').str.contains('TSG'), 'Is Tumor Suppressor Gene'] = 'Yes'\n df['Is Driver Gene (CGC)'] = 'Yes'\n\n # rename columns\n df = df.rename(columns={'Entrez GeneId': 'Entrez Gene ID', 'Gene Symbol': 'Hugo Symbol'})\n\n # get gene names\n if not return_dataframe:\n cgc_genes = df['Gene Symbol'].tolist()\n else:\n cgc_genes = df\n\n return cgc_genes\n else:\n # return fusion gene information\n has_fus_partner = ~df['Translocation Partner'].isnull()\n output_list = []\n for ix, row in df[has_fus_partner].iterrows():\n g1 = row[\"Gene Symbol\"]\n for g2 in row['Translocation Partner'].split(', '):\n output_list.append([g1, g2])\n output_df = pd.DataFrame(output_list, columns=[\"Gene1\", \"Gene2\"])\n output_df['GENE_ID'] = output_df['Gene1'] + '--' + output_df['Gene2']\n\n if not return_dataframe:\n cgc_genes = list(set(output_df[\"Gene1\"].unique()) | set(output_df[\"Gene2\"]))\n else:\n cgc_genes = output_df\n\n return cgc_genes", "def check_chromosomes(fasta_chromosomes, gtf_chromosomes):\n fasta_unique = fasta_chromosomes - gtf_chromosomes\n gtf_unique = gtf_chromosomes - fasta_chromosomes\n if fasta_unique:\n logger.warning((\n 'The following chromosomes were found in the FASTA but doens\\'t have '\n 'any \"transcript\" features in the GTF: {}. '\n 'No sequences will be generated for these chromosomes.'\n ).format(', '.join(fasta_unique)))\n if gtf_unique:\n logger.warning((\n 'The following chromosomes were found to have \"transcript\" features '\n 'in the GTF but doens\\'t exist in the FASTA. '\n 'No sequences will be generated for these chromosomes.'\n ).format(', '.join(fasta_unique)))\n chromosomes = set.intersection(fasta_chromosomes, gtf_chromosomes)\n\n return chromosomes", "def gene_finder(dna, threshold):\n\n # YOUR IMPLEMENTATION HERE", "def check_gene_coverage(sequence_records, check_for_overlap=True):\n length_total = 0\n gene_length_total = 0\n total_length_by_feature = defaultdict(lambda: 0)\n for sequence_record in sequence_records:\n length_total += len(sequence_record.seq)\n for gene in sequence_record.features:\n gene_length_total += gene.location.end.position - gene.location.start.position\n # this section tries to keep track of subfeature types\n for feature in gene.sub_features:\n total_length_by_feature[feature.type] += len(feature)\n for subfeature in feature.sub_features:\n total_length_by_feature[subfeature.type] += len(subfeature)\n gene_coverage_fraction = float(gene_length_total)/length_total\n feature_coverage_fractions = [(feature,float(length)/gene_length_total) for feature,length \n in total_length_by_feature.items()]\n\n # TODO the by-feature coverage doesn't work because I'm only parsing the file for genes, not features!!! If I want to parse for features, I need to split things up into multiple passes etc again...\n #print total_length_by_feature\n\n # Check for overlapping genes and print a warning, since overlapping genes will make the measurement inaccurate\n if check_for_overlap:\n if check_for_overlapping_genes(sequence_record):\n print \"WARNING: There are overlapping genes! %% of length covered by genes may not be accurate.\"\n # MAYBE-TODO actually adjust the measurement for overlapping genes? Nah, too much work, not enough need for now.\n\n return gene_coverage_fraction, feature_coverage_fractions", "def findInteractions( targetGenes, geneTable ):\n pass", "def stats_gene(df):\n # res.write(f'Gene,Total,total[%],SGT\\n')\n taxa_count = len(df)\n df = df.sum().to_frame()\n df = df.rename(columns={0: 'Number of Taxa'})\n df[f'Percent of Total Taxa (out of {taxa_count})'] = round((df['Number of Taxa'] / taxa_count) * 100, 2)\n df = df.rename_axis('Gene Name')\n df = df.sort_values(by=['Number of Taxa'], ascending=False)\n df['SGT'] = ['yes'] * len(df)\n df.to_csv(f'{output_fold}/gene_stats.tsv', sep='\\t')", "def routine():\n genes = g.genes\n gene_db = db['ncbi_gene_docs']\n for gene in genes:\n count = gene_db.count({\"gene_id\": gene})\n if count is not 1:\n logger.debug(\"FixMe: {0};\\tCount: {1}\".format(gene, count))", "def simulate_generations(self, generations=DEFAULT_GENERATIONS):\n for i in range(generations):\n logging.getLogger().debug(self)\n self.__simulate_generation()\n\n if i < generations - 1:\n self.__delete_duplicates()\n\n return self.fittest_chromosome", "def per_gene_coverage(genes,df):\n\n sub_genes =[]\n\n #For every gene in the list, check the average coverage, if less than 100 add it to the final list.\n for gene in genes:\n coverage = average(df[df['GeneSymbol;Accession'] == gene]['percentage30'])\n\n if coverage < 100:\n sub_genes.append([gene.split(';')[0],round(coverage,2)])\n \n return sub_genes", "def matched_gc_bedfile(bedfile, matchfile, genome, number, size=None, min_bin_size=100):\n g = Genome(genome)\n genome_fa = g.filename\n try:\n fa = Fasta(matchfile)\n gc = [\n (seq.upper().count(\"C\") + seq.upper().count(\"G\")) / len(seq)\n for seq in fa.seqs\n ]\n sizes = [len(seq) for seq in fa.seqs]\n except Exception:\n try:\n # pylint: disable=unexpected-keyword-arg\n fields = pd.read_csv(matchfile, comment=\"#\", nrows=10, sep=\"\\t\").shape[1]\n tmp = (\n pybedtools.BedTool(matchfile).filter(lambda x: len(x) >= 10).saveas().fn\n )\n bed = pybedtools.BedTool(tmp)\n gc = np.array(\n [float(x[fields + 1]) for x in bed.nucleotide_content(fi=genome_fa)]\n )\n sizes = np.array([x.length for x in bed])\n gc = [round(x, 2) for x in gc]\n except Exception:\n logger.error(\"Please provide input file in BED or FASTA format\")\n raise\n\n # Get the median size of the sequences\n if size is None or size == 0:\n size = int(np.median(sizes))\n if np.std(sizes) > size * 0.05:\n logger.info(\"Sequences do not seem to be of equal size.\")\n logger.info(\n f\"GC% matched sequences of the median size ({size}) will be created\"\n )\n\n bins = [(0.0, 0.2), (0.8, 1)]\n for b in np.arange(0.2, 0.799, 0.05):\n bins.append((b, b + 0.05))\n\n fraction = number / len(gc)\n gc = np.array(gc)\n # print(\"GC\", gc)\n bin_count = []\n for b_start, b_end in bins:\n bin_count.append(\n int(np.sum((gc > round(b_start, 2)) & (gc <= round(b_end, 2))) * fraction)\n )\n\n # To make te requested number, divide remaining over\n # all bins that have counts\n rest = number - sum(bin_count)\n i = 0\n for _ in range(rest):\n while bin_count[i % len(bins)] == 0:\n i += 1\n bin_count[i % len(bins)] += 1\n i += 1\n\n nseqs = max(bin_count) * len(bins)\n\n with NamedTemporaryFile(delete=False) as tmp:\n gc_bin_bedfile(\n tmp.name,\n genome,\n nseqs,\n length=size,\n bins=bins,\n random_state=None,\n min_bin_size=min_bin_size,\n )\n df = pd.read_csv(tmp.name, sep=\"\\t\", names=[\"chrom\", \"start\", \"end\", \"bin\"])\n # print(tmp.name)\n with open(bedfile, \"w\") as f:\n pass\n with open(bedfile, \"a\") as f:\n for (b_start, b_end), n in zip(bins, bin_count):\n if n == 0:\n continue\n # print(b_start, b_end, n)\n b = f\"{b_start:.2f}-{b_end:.2f}\"\n df.loc[df[\"bin\"] == b, [\"chrom\", \"start\", \"end\"]].sample(n).to_csv(\n f, sep=\"\\t\", header=False, index=False\n )", "def get_GO_presence_labels(genes_of_interest, min_GO_size=200, max_GO_size=300):\n genes = pd.Series(genes_of_interest)\n go_group_presence = {}\n\n for GO in go2geneIDs:\n gene_ids = go2geneIDs[GO]\n\n # boolean vector (length is num of genes in embedding)\n in_go_group_vector = genes.isin(gene_ids)\n\n if (in_go_group_vector.sum() > min_GO_size) & (in_go_group_vector.sum() < max_GO_size):\n go_group_presence[GO] = in_go_group_vector\n\n result = pd.DataFrame(go_group_presence)\n result.index = genes\n result.index.name = 'entrezgene'\n return result", "def check_multi_exon(tr_nc_index_dict, ncdf):\n\n\tfor gene in tr_nc_index_dict:\n\t\n\t\ttempdf = ncdf.iloc[tr_nc_index_dict[gene][0]:tr_nc_index_dict[gene][1]]\n\t\texon_count = 0\n\t\t\n\t\tfor i in tempdf.index:\n\t\t\tif tempdf.loc[i,'feature'] == 'exon':\n\t\t\t\texon_count += 1\n\t# print exon_count\n\t\tif exon_count >1 :\n\t\t\tprint \" more than one exon for %s\" % gene\n\t\t\tsys.exit()\t# prevent writing fasta if there is multi exon transcript", "def find_entropy(less_than_threshold,more_than_threshold):\n\n ''' Storing total number of records '''\n total_records = len(less_than_threshold) + len(more_than_threshold)\n\n ''' Calculating the probability '''\n less_than_probability = len(less_than_threshold) / total_records\n more_than_probability = len(more_than_threshold) / total_records\n\n ''' Converting the dataframe to numpy arrays '''\n less_than_threshold_values = less_than_threshold.values\n more_than_threshold_values = more_than_threshold.values\n\n ''' Storing the target attribute values (Muffin or Cupcake) for threshold values '''\n target_for_less_than = less_than_threshold_values[:, -1]\n target_for_more_than = more_than_threshold_values[:, -1]\n\n ''' Finding the counts of muffin and cupcake for values lower than and greater than threshold value '''\n recipe_type, less_than_cupcake_muffin_count = np.unique(target_for_less_than, return_counts=True)\n recipe_type, more_than_cupcake_muffin_count = np.unique(target_for_more_than, return_counts=True)\n\n # print(recipe_type, more_than_cupcake_muffin_count, len(more_than_cupcake_muffin_count))\n ''' To ensure there are at least 5 records in each node '''\n if less_than_cupcake_muffin_count.sum() < 5 or more_than_cupcake_muffin_count.sum() < 5:\n ''' Return horrible badness '''\n return math.inf\n else:\n ''' Find the entropies for less than threshold values and more than threshold values '''\n less_than_entropy = sum((less_than_cupcake_muffin_count / less_than_cupcake_muffin_count.sum()) * - np.log2(\n less_than_cupcake_muffin_count / less_than_cupcake_muffin_count.sum()))\n more_than_entropy = sum((more_than_cupcake_muffin_count / more_than_cupcake_muffin_count.sum()) * - np.log2(\n more_than_cupcake_muffin_count / more_than_cupcake_muffin_count.sum()))\n\n ''' Calculate the total weighted entropy '''\n total_weighted_entropy = less_than_probability * less_than_entropy + more_than_probability * more_than_entropy\n\n return total_weighted_entropy", "def find_common_genes(input_fp):\n trait_genes = {}\n all_genes = []\n common_genes = []\n snp_count = {}\n traits = {}\n matrix = []\n print('Extracting genes from eQTL interactions for...')\n _,_,t_files = next(os.walk(input_fp), (None, None, []))\n for trait_file in t_files:\n trait = trait_file[:len(trait_file)-4]\n print('\\t' + trait)\n tfile = open(os.path.join(input_fp, trait_file), 'r')\n eqtls= csv.reader(tfile, delimiter = '\\t') \n next(tfile, None)\n for line in eqtls:\n genes = []\n if trait in trait_genes.keys():\n genes = trait_genes[trait]\n genes.append(line[3])\n trait_genes[trait] = genes\n all_genes.append(line[3])\n tfile.close()\n \n for trait in trait_genes:\n trait_genes[trait] = list(set(trait_genes[trait]))\n all_genes = list(set(all_genes))\n print(len(all_genes))\n\n done_genes = []\n \"\"\"\n for snp in all_snps:\n occur = all_snps.count(snp)\n if occur > 1 and snp not in done_snps:\n done_snps.append(snp)\n for record in trait_snps:\n if snp == record[1] and record not in common_snps:\n common_snps.append(record)\n snp_count[snp] = occur\n to_dict = []\n if record[0] not in traits.keys():\n to_dict.append(snp)\n traits[record[0]] = to_dict\n else:\n to_dict = traits[record[0]]\n to_dict.append(snp)\n traits[record[0]] = to_dict\n \"\"\"\n for trait in trait_genes.keys():\n gene_count = {}\n genes_total = len(trait_genes[trait])\n compare_traits = trait_genes.keys()\n if genes_total > 3:\n for trait_gene in trait_genes[trait]:\n for compare in compare_traits:\n if trait_gene in trait_genes[compare]:\n if compare not in gene_count.keys():\n gene_count[compare] = 1\n else:\n gene_count[compare] += 1\n #else:\n # gene_count[compare] = 0\n row = []\n row.append(trait)\n for t in gene_count:\n ratio = round(gene_count[t]/float(genes_total), 7)\n matrix.append([trait, t, genes_total, gene_count[t], ratio])\n\n \"\"\"\n with open (output_fp + '/' + 'common_snps_count.txt', 'wb') as cluster_file:\n writer = csv.writer(cluster_file, delimiter = '\\t')\n writer.writerow(['snp', 'count'])\n for snp in snp_count:\n writer.writerow([snp,snp_count[snp]])\n \"\"\"\n\n with open ('gene_matrix.txt', 'w') as cluster_file:\n writer = csv.writer(cluster_file, delimiter = '\\t')\n writer.writerow(['trait_x', 'trait_y', '#total_genes', '#common_snps', \\\n 'ratio'])\n writer.writerows(matrix)", "def coxen_single_drug_gene_selection(\n source_data,\n target_data,\n drug_response_data,\n drug_response_col,\n tumor_col,\n prediction_power_measure=\"pearson\",\n num_predictive_gene=100,\n generalization_power_measure=\"ccc\",\n num_generalizable_gene=50,\n multi_drug_mode=False,\n):\n\n if isinstance(drug_response_col, str):\n drug_response_col = np.where(drug_response_data.columns == drug_response_col)[\n 0\n ][0]\n\n if isinstance(tumor_col, str):\n tumor_col = np.where(drug_response_data.columns == tumor_col)[0][0]\n\n drug_response_data = drug_response_data.copy()\n drug_response_data = drug_response_data.iloc[\n np.where(np.isin(drug_response_data.iloc[:, tumor_col], source_data.index))[0],\n :,\n ]\n\n source_data = source_data.copy()\n source_data = source_data.iloc[\n np.where(np.isin(source_data.index, drug_response_data.iloc[:, tumor_col]))[0],\n :,\n ]\n\n source_std_id = select_features_by_variation(\n source_data, variation_measure=\"std\", threshold=0.00000001\n )\n target_std_id = select_features_by_variation(\n target_data, variation_measure=\"std\", threshold=0.00000001\n )\n std_id = np.sort(np.intersect1d(source_std_id, target_std_id))\n source_data = source_data.iloc[:, std_id]\n target_data = target_data.copy()\n target_data = target_data.iloc[:, std_id]\n\n # Perform the first step of COXEN approach to select predictive genes. To avoid exceeding the memory limit,\n # the prediction power of genes is calculated in batches.\n batchSize = 1000\n numBatch = int(np.ceil(source_data.shape[1] / batchSize))\n prediction_power = np.empty((source_data.shape[1], 1))\n prediction_power.fill(np.nan)\n for i in range(numBatch):\n startIndex = i * batchSize\n endIndex = min((i + 1) * batchSize, source_data.shape[1])\n\n if prediction_power_measure == \"pearson\":\n cor_i = np.corrcoef(\n np.vstack(\n (\n np.transpose(\n source_data.iloc[:, startIndex:endIndex]\n .loc[drug_response_data.iloc[:, tumor_col], :]\n .values\n ),\n np.reshape(\n drug_response_data.iloc[:, drug_response_col].values,\n (1, drug_response_data.shape[0]),\n ),\n )\n )\n )\n prediction_power[startIndex:endIndex, 0] = abs(cor_i[:-1, -1])\n\n if prediction_power_measure == \"mutual_info\":\n mi = mutual_info_regression(\n X=source_data.iloc[:, startIndex:endIndex]\n .loc[drug_response_data.iloc[:, tumor_col], :]\n .values,\n y=drug_response_data.iloc[:, drug_response_col].values,\n )\n prediction_power[startIndex:endIndex, 0] = mi\n\n if multi_drug_mode:\n indices = np.argsort(-prediction_power[:, 0])\n return std_id[indices]\n\n num_predictive_gene = int(min(num_predictive_gene, source_data.shape[1]))\n gid1 = np.argsort(-prediction_power[:, 0])[:num_predictive_gene]\n\n # keep only predictive genes for source and target data\n source_data = source_data.iloc[:, gid1]\n target_data = target_data.iloc[:, gid1]\n num_generalizable_gene = int(min(num_generalizable_gene, len(gid1)))\n # perform the second step of COXEN approach to select generalizable genes among the predictive genes\n gid2 = generalization_feature_selection(\n source_data.values,\n target_data.values,\n generalization_power_measure,\n num_generalizable_gene,\n )\n\n indices = std_id[gid1[gid2]]\n\n return np.sort(indices)", "def match_gc_content(pos_one_hot, neg_one_hot, neg_pos_ratio=1):\n N, L, A = pos_one_hot.shape\n gc_pos = np.sum(np.sum(pos_one_hot[:,:,[1,2]], axis=2), axis=1)/L\n gc_neg = np.sum(np.sum(neg_one_hot[:,:,[1,2]], axis=2), axis=1)/L\n print(' Average GC content for positive sequences: %.3f'%(np.mean(gc_pos)))\n print(' Average GC content for negative sequences: %.3f'%(np.mean(gc_neg)))\n\n pos_index = np.argsort(gc_pos)\n neg_index = np.argsort(gc_neg)\n num_neg = len(neg_index)\n num_pos = len(pos_index)\n\n match_index = []\n if num_neg > num_pos:\n k = 0\n status = True\n for i in pos_index:\n for j in range(k, num_neg):\n if gc_pos[i] < gc_neg[neg_index[j]]:\n if k > num_neg:\n status = False\n break\n else:\n # print(\"%.2f vs %.2f\"%(gc_pos[i], gc_neg[neg_index[j]]))\n match_index.append(neg_index[j])\n k = j+1\n break\n if not status:\n break\n\n remainder = int(num_pos*neg_pos_ratio) - len(match_index)\n print(' Found %d GC-matched sequences.'%(len(match_index)))\n if remainder > 0:\n print(' Adding %d more random negative sequences.'%(remainder))\n remain_index = np.array(list(set(range(num_neg)) - set(match_index)))\n index = np.random.permutation(len(remain_index))[:remainder] \n # index = np.argsort(gc_neg[remain_index])[::-1]\n for n in remain_index[index[:remainder]]:\n match_index.append(n)\n \n match_index = np.array(match_index)\n print(' Average GC content for sub-sampled negative sequences: %.3f'%(np.mean(gc_neg[match_index])))\n\n return neg_one_hot[match_index], match_index", "def coxen_multi_drug_gene_selection(\n source_data,\n target_data,\n drug_response_data,\n drug_response_col,\n tumor_col,\n drug_col,\n prediction_power_measure=\"lm\",\n num_predictive_gene=100,\n generalization_power_measure=\"ccc\",\n num_generalizable_gene=50,\n union_of_single_drug_selection=False,\n):\n\n if isinstance(drug_response_col, str):\n drug_response_col = np.where(drug_response_data.columns == drug_response_col)[\n 0\n ][0]\n\n if isinstance(tumor_col, str):\n tumor_col = np.where(drug_response_data.columns == tumor_col)[0][0]\n\n if isinstance(drug_col, str):\n drug_col = np.where(drug_response_data.columns == drug_col)[0][0]\n\n drug_response_data = drug_response_data.copy()\n drug_response_data = drug_response_data.iloc[\n np.where(np.isin(drug_response_data.iloc[:, tumor_col], source_data.index))[0],\n :,\n ]\n drugs = np.unique(drug_response_data.iloc[:, drug_col])\n\n source_data = source_data.copy()\n source_data = source_data.iloc[\n np.where(np.isin(source_data.index, drug_response_data.iloc[:, tumor_col]))[0],\n :,\n ]\n\n source_std_id = select_features_by_variation(\n source_data, variation_measure=\"std\", threshold=0.00000001\n )\n target_std_id = select_features_by_variation(\n target_data, variation_measure=\"std\", threshold=0.00000001\n )\n std_id = np.sort(np.intersect1d(source_std_id, target_std_id))\n source_data = source_data.iloc[:, std_id]\n target_data = target_data.copy()\n target_data = target_data.iloc[:, std_id]\n\n num_predictive_gene = int(min(num_predictive_gene, source_data.shape[1]))\n\n if union_of_single_drug_selection:\n if (\n prediction_power_measure != \"pearson\"\n and prediction_power_measure != \"mutual_info\"\n ):\n print(\n \"pearson or mutual_info must be used as prediction_power_measure for taking the union of selected genes of every drugs\"\n )\n sys.exit(1)\n gid1 = np.array([]).astype(np.int64)\n for d in drugs:\n idd = np.where(drug_response_data.iloc[:, drug_col] == d)[0]\n response_d = drug_response_data.iloc[idd, :]\n gid2 = coxen_single_drug_gene_selection(\n source_data,\n target_data,\n response_d,\n drug_response_col,\n tumor_col,\n prediction_power_measure,\n num_predictive_gene,\n generalization_power_measure,\n num_generalizable_gene,\n )\n gid1 = np.union1d(gid1, gid2)\n return np.sort(std_id[gid1])\n\n if prediction_power_measure == \"lm\":\n pvalue = np.empty((source_data.shape[1], 1))\n pvalue.fill(np.nan)\n drug_m = np.identity(len(drugs))\n drug_m = pd.DataFrame(drug_m, index=drugs)\n drug_sample = drug_m.loc[drug_response_data.iloc[:, drug_col], :].values\n for i in range(source_data.shape[1]):\n ge_sample = (\n source_data.iloc[:, i].loc[drug_response_data.iloc[:, tumor_col]].values\n )\n sample = np.hstack(\n (np.reshape(ge_sample, (len(ge_sample), 1)), drug_sample)\n )\n sample = sm.add_constant(sample)\n mod = sm.OLS(drug_response_data.iloc[:, drug_response_col].values, sample)\n try:\n res = mod.fit()\n pvalue[i, 0] = res.pvalues[1]\n except ValueError:\n pvalue[i, 0] = 1\n\n gid1 = np.argsort(pvalue[:, 0])[:num_predictive_gene]\n\n elif (\n prediction_power_measure == \"pearson\"\n or prediction_power_measure == \"mutual_info\"\n ):\n gene_rank = np.empty((len(drugs), source_data.shape[1]))\n gene_rank.fill(np.nan)\n gene_rank = pd.DataFrame(gene_rank, index=drugs)\n for d in range(len(drugs)):\n idd = np.where(drug_response_data.iloc[:, drug_col] == drugs[d])[0]\n response_d = drug_response_data.iloc[idd, :]\n temp_rank = coxen_single_drug_gene_selection(\n source_data,\n target_data,\n response_d,\n drug_response_col,\n tumor_col,\n prediction_power_measure,\n num_predictive_gene=None,\n generalization_power_measure=None,\n num_generalizable_gene=None,\n multi_drug_mode=True,\n )\n gene_rank.iloc[d, : len(temp_rank)] = temp_rank\n for i in range(\n int(np.ceil(num_predictive_gene / len(drugs))), source_data.shape[1] + 1\n ):\n gid1 = np.unique(\n np.reshape(gene_rank.iloc[:, :i].values, (1, gene_rank.shape[0] * i))[\n 0, :\n ]\n )\n gid1 = gid1[np.where(np.invert(np.isnan(gid1)))[0]]\n if len(gid1) >= num_predictive_gene:\n break\n gid1 = gid1.astype(np.int64)\n\n # keep only predictive genes for source and target data\n source_data = source_data.iloc[:, gid1]\n target_data = target_data.iloc[:, gid1]\n num_generalizable_gene = int(min(num_generalizable_gene, len(gid1)))\n\n # perform the second step of COXEN approach to select generalizable genes among the predictive genes\n gid2 = generalization_feature_selection(\n source_data.values,\n target_data.values,\n generalization_power_measure,\n num_generalizable_gene,\n )\n\n indices = std_id[gid1[gid2]]\n\n return np.sort(indices)", "def _cmd_genemetrics(args):\n cnarr = read_cna(args.filename)\n segarr = read_cna(args.segment) if args.segment else None\n is_sample_female = verify_sample_sex(cnarr, args.sample_sex, args.male_reference, args.diploid_parx_genome)\n # TODO use the stats args\n table = do_genemetrics(\n cnarr,\n segarr,\n args.threshold,\n args.min_probes,\n args.drop_low_coverage,\n args.male_reference,\n is_sample_female,\n args.diploid_parx_genome,\n )\n logging.info(\"Found %d gene-level gains and losses\", len(table))\n write_dataframe(args.output, table)", "def cis_insertions():\n\n return [\n # 1000 bp upstream of Trp53bp2.\n Insertion(id='INS1', chromosome='1', position=182408172,\n strand=1, support=2, sample='s1',\n metadata=frozendict({'cis_id': 'CIS1'})),\n # Different chromosome.\n Insertion(id='INS2', chromosome='4', position=77843175,\n strand=1, support=2, sample='s1',\n metadata=frozendict({'cis_id': 'CIS2'}))\n ] # yapf: disable", "def lof_sig_scores(table, samples, verbose=True):\n mut_probdam = 'Missense:Probably'\n mut_syn = 'Synonymous'\n mut_trunc = ['Nonsense', 'Frameshift', 'Splice-site']\n mut_other = ['Missense:Benign', 'Missense:Possibly', 'MissenseNA', 'Indel']\n mut_all = [mut_probdam, mut_syn] + mut_trunc + mut_other\n\n # Calculate the global nonsynonymous:synonymous ratio ---------------------\n # Within each mutation category, sum counts (across all genes)\n tot_count_probdam = sum(table[mut_probdam])\n tot_count_syn = sum(table[mut_syn])\n tot_count_trunc = sum(itertools.chain(*(list(table[col])\n for col in mut_trunc)))\n tot_count_other = sum(itertools.chain(*(list(table[col])\n for col in mut_other)))\n\n # Global mutation count across all categories and genes (= 3504)\n tot_count_all = sum((tot_count_probdam, tot_count_syn, tot_count_trunc,\n tot_count_other))\n if verbose:\n print(\"Counted\", tot_count_all, \"mutations across\", len(table), \"genes\",\n \"and\", len(samples), \"samples\", file=sys.stderr)\n\n # Fraction of global mutations in each category of interest\n tot_frac_probdam = tot_count_probdam / tot_count_all\n tot_frac_syn = tot_count_syn / tot_count_all\n tot_frac_trunc = tot_count_trunc / tot_count_all\n\n # Global nonsynonymous:synonymous ratio = (1-syn)/syn (= 2.13697)\n tot_ns_s_ratio = (1 - tot_frac_syn) / tot_frac_syn\n\n # Calculate each gene's mutation score ------------------------------------\n for _idx, row in table.iterrows():\n gene_count_all = sum([row[col] for col in mut_all])\n if not gene_count_all:\n # Gene is not mutated at all --> zero score\n yield (row['Gene'], 0.0)\n continue\n\n # Initial score is the sum the 'Normalized' values across all samples\n raw_score = sum(row[sid] for sid in samples)\n\n # Adjust for NS:S ratio\n gene_count_syn = row[mut_syn]\n syn_factor = max(1 - tot_ns_s_ratio * gene_count_syn / gene_count_all,\n 0)\n new_score = raw_score * syn_factor\n\n # Adjust for \"probably damaging\" missense and truncating mutations\n gene_frac_probdam = row[mut_probdam] / gene_count_all\n probdam_factor = 1 + gene_frac_probdam - tot_frac_probdam\n gene_frac_trunc = sum([row[col] for col in mut_trunc]) / gene_count_all\n trunc_factor = gene_frac_trunc / tot_frac_trunc\n final_score = new_score * probdam_factor * trunc_factor\n yield (row['Gene'], final_score)", "def selection_profiles_by_chance(true, compare):\n n_neurons, M = true.shape\n probabilities = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n n = np.count_nonzero(true[neuron])\n N = np.count_nonzero(compare[neuron])\n rv = hypergeom(M=M, n=n, N=N)\n\n overlap = np.count_nonzero(true[neuron] * compare[neuron])\n probabilities[neuron] = 1 - rv.cdf(x=overlap)\n\n return probabilities", "def test_genomic(self):\n self.c.execute(\"\"\"select expIds,expScores from genomic_test\"\"\")\n rows = self.c.fetchall()\n self.assertEqual(len(rows), 1) # one probe\n self.assertEqual(rows[0][0], '0,1,2,3,4') # ordered by sample id\n values = map(lambda x: float(x), rows[0][1].split(',')) # scores are in correct order\n self.assertTrue(values[0] - 0.479005065149792 < self.tolerance)\n self.assertTrue(values[1] - 25.1 < self.tolerance)\n self.assertTrue(values[2] - 5.3 < self.tolerance)\n self.assertTrue(values[3] - 3.1 < self.tolerance)\n self.assertTrue(values[4] - -1.23 < self.tolerance)", "def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models", "def gc_content(sequence):\n gc = sequence.count('G') + sequence.count('C')\n atgc = sequence.count('A') + sequence.count('T') + sequence.count('G') + sequence.count('C')\n \n return (gc/atgc) * 100", "def feature_selection_information_gain(df, string_cols, threshold = 0.01, label_col = 'label', pcg = 1.0):\n\n df = df.select(string_cols + [label_col]).sample(withReplacement=False, fraction=pcg)\n\n df = only_categorical_columns(df, label_col=label_col)\n\n df.cache()\n\n print \"[Info] Number of rows in the DF: \" + str(df.count())\n\n string_cols = list(set(df.columns) - set([label_col]))\n\n # First pipeline: string indexer variables -> necessary to use them in models\n print('[INFO] Indexing categorical variables: ' + str(len(string_cols)))\n\n ig_df = information_gain(df=df, var_list=string_cols, label_col = label_col)\n\n cat_cols = ig_df\\\n .filter(col('ig') >= (threshold)*col('init_entropy'))\\\n .select('feature').rdd.map(lambda r: r['feature']).collect()\n\n # [ig[0] for ig in ig_results if (ig[1] >= threshold_abs)]\n\n return cat_cols", "def genes_feature_selection(methyl_data, cancer_genes):\n\n overlap_genes = cancer_genes.intersection(methyl_data.index)\n\n return methyl_data.ix[overlap_genes]", "def get_gene_sets(table, dominant):\n \n known = table[table[\"hgnc\"].isin(dominant)]\n gwide = set(known[\"hgnc\"][known[\"genomewide\"]])\n sugg = set(known[\"hgnc\"][known[\"suggestive\"]])\n \n gene_sets = {\"genomewide\": gwide, \"suggestive\": sugg}\n \n return gene_sets", "def test_simulated_gene_data(self):\n np.random.seed(0)\n\n sim_mat, cell_type, sim_de = simulate_matrix()\n\n # get scale\n scale = np.array(sim_mat.sum(axis=0)).squeeze()\n depth = (scale + 1) / np.median(scale)\n cov = [np.log(depth)]\n\n # precompute distribution params\n ntfmatrix = normalize_matrix(sim_mat, scale)\n alpha = atac_de.empirical_dispersion(ntfmatrix)\n\n # sseq_params = cr_de.compute_sseq_params(sim_mat)\n # alpha = sseq_params['phi_g']\n\n de_res = atac_de.NBGLM_differential_expression(sim_mat, np.flatnonzero(cell_type == 0), np.flatnonzero(cell_type == 1),\n model='nb', test_params={'cov': cov, 'alpha': alpha},\n verbose=False)\n\n sensitivity, ppv = evaluate_de_res(de_res, sim_de)\n\n assert sensitivity >= 0.94\n assert ppv >= 0.94", "def anno_gene_stats(anno_gene, loc_file, gene_file, isConvert):\r\n LocationNum = collections.Counter()\r\n LocationGene = collections.defaultdict(list)\r\n\r\n\r\n GeneCatSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n CatGeneSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n\r\n allLocations = set()\r\n anno_h = open(anno_gene, \"r\")\r\n for line in anno_h:\r\n lines = line.strip().split(\"\\t\")\r\n sample, location, number, gene = lines[:4]\r\n number = int(number)\r\n\r\n ### whether convert the category to \"Exon\" or \"Intron\"\r\n if isConvert == \"True\":\r\n if location == \"Intron\":\r\n newLoc = \"Intron\"\r\n else:\r\n newLoc = \"Exon\"\r\n elif isConvert == \"False\":\r\n newLoc = location\r\n else:\r\n print(\"Please check whether convert the original category to 'Intron' or 'Exon' based on True of False.\")\r\n sys.exit(1)\r\n\r\n allLocations.add(newLoc)\r\n ### get the dict of gene -> location -> sample\r\n genes = gene.split(\",\")\r\n for g in genes:\r\n GeneCatSample[g][newLoc].append(sample)\r\n\r\n ### get the location -> gene -> sample\r\n CatGeneSample[newLoc][g].append(sample)\r\n anno_h.close()\r\n\r\n\r\n ## output gene and number in samples\r\n ### sort all locations\r\n sortedAllLocation = sorted(list(allLocations))\r\n\r\n gene_h = open(gene_file, \"w\")\r\n\r\n headerSample = [l + \"_samples\" for l in sortedAllLocation]\r\n gene_h.write(\"Gene\\tTotal\\t%s\\t%s\\n\" % (\"\\t\".join(sortedAllLocation), \"\\t\".join(headerSample)))\r\n\r\n GeneRecord = {}\r\n GeneNumber = {}\r\n\r\n allGenes = sorted(list(GeneCatSample.keys()))\r\n for ge in allGenes:\r\n ### get the number and samples for each location of each gene\r\n GeneNum = []\r\n GeneSample = []\r\n\r\n for loc in sortedAllLocation:\r\n if loc in GeneCatSample[ge]:\r\n samples = GeneCatSample[ge][loc]\r\n ##############################\r\n ####### unique for samples\r\n samples = sorted(list(set(samples)))\r\n sampleNum = len(samples)\r\n else:\r\n sampleNum = 0\r\n samples = [\"-\"]\r\n\r\n GeneNum.append(sampleNum)\r\n GeneSample.append(samples)\r\n\r\n GeneNumSum = sum(GeneNum)\r\n CatNumOut = \"\\t\".join([str(g) for g in GeneNum])\r\n CatSampleOut = \"\\t\".join([\",\".join(s) for s in GeneSample])\r\n\r\n record = \"%s\\t%d\\t%s\\t%s\\t\" % (ge, GeneNumSum, CatNumOut, CatSampleOut)\r\n GeneNumber[ge] = GeneNumSum\r\n GeneRecord[ge] = record\r\n \r\n ### output\r\n GeneNumSorted = sort_dict_value(GeneNumber)\r\n for g, n in GeneNumSorted:\r\n r = GeneRecord[g]\r\n gene_h.write(\"%s\\n\" % r)\r\n\r\n gene_h.close() \r\n\r\n\r\n ### location and genes\r\n loc_h = open(loc_file, \"w\")\r\n loc_h.write(\"Location\\tGeneNumber\\tGenes\\tSampleNumber\\tSamples\\n\")\r\n for loc in sortedAllLocation:\r\n geneSample = CatGeneSample[loc]\r\n genes = sorted(list(geneSample.keys()))\r\n geneNum = len(genes)\r\n samNum = 0\r\n samList = []\r\n for ge in geneSample:\r\n sam = geneSample[ge]\r\n samList.append(sam)\r\n samNum += len(sam)\r\n samOut = \";\".join([\",\".join(s) for s in samList])\r\n loc_h.write(\"%s\\t%d\\t%s\\t%d\\t%s\\n\" % (loc, geneNum, \",\".join(genes), samNum, samOut))\r\n loc_h.close()", "def fitness(dna):\n fitness = 0\n for c in range(DNA_SIZE):\n if dna[c] == OPTIMAL[c]:\n fitness += 1\n return fitness", "def compare_gene_predictors(GM_genes, Glim_genes):\n GM_starts = []\n Glim_starts = []\n GM_only = []\n Glim_only = []\n shared_starts = []\n # GM_stops = []\n # Glim_stops = []\n Glim_unique = 0\n GM_unique = 0\n\n for i in range(1,GM_genes[\"total genes\"]+1):\n GM_starts.append(GM_genes[\"gene\" + str(i)][\"start\"])\n for j in range(1,Glim_genes[\"total genes\"]+1):\n Glim_starts.append (Glim_genes[\"gene\"+ str(j)][\"start\"])\n for i in range(0,len(GM_starts)):\n if GM_starts[i] not in Glim_starts:\n print(\"start at pos. \" + str(GM_starts[i]) + \" is unique to GM genes\")\n GM_only.append(GM_starts[i])\n GM_unique += 1\n else:\n shared_starts.append(GM_starts[i])\n for j in range(0,len(Glim_starts)):\n if Glim_starts[j] not in GM_starts:\n print (\"start at pos. \" + str(Glim_starts[j]) + \" is unique to Glim genes\")\n Glim_only.append(Glim_starts[j])\n Glim_unique += 1\n else:\n if GM_starts[j] not in shared_starts:\n shared_starts.append(GM_starts[j])\n shared_starts.sort()\n print (\"Number of unique Glimmer starts = \" + str(Glim_unique))\n print (\"Number of unique GM starts = \" + str(GM_unique))\n print(\"Shared starts =\\n\")\n for k in range(0,len(shared_starts)):\n print (shared_starts[k])", "def run_gsea_experiments(self, perc_redundant):\n print('Perc redundant: {}'.format(perc_redundant))\n\n for i in range(self.iterations):\n print('\\ti = {}'.format(i))\n\n modified_gene_sets = copy.copy(self.gene_sets)\n\n redundant_genes = random.sample(self.uniq_genes, int(perc_redundant * len(self.uniq_genes)))\n\n for gene in redundant_genes:\n including_gsets = [\n gs_name for gs_name, gs_entry in modified_gene_sets.items()\n if gene in gs_entry['genes']\n ]\n new_gene_name = gene + '_REDUNDANT'\n mod_gsets = random.sample(including_gsets, int(0.5 * len(including_gsets)))\n\n for gs in mod_gsets:\n orig_genes = modified_gene_sets[gs]['genes']\n modified_gene_sets[gs]['genes'] = [\n new_gene_name if g == gene else g for g in orig_genes\n ]\n\n # write modified gene sets to disk\n gmt_file = os.path.join(\n self.base_dir,\n 'output',\n 'gsea_{0:.2f}'.format(perc_redundant),\n 'reactome_gene_sets_{0:.2f}.gmt'.format(perc_redundant)\n )\n\n self.write_gmt_file(gmt_file, modified_gene_sets)\n\n # run GSEA\n cls_file = os.path.join(self.base_dir, 'output', 'gsea_exp.cls')\n gct_file = os.path.join(self.base_dir, 'output', 'gsea_exp.gct')\n\n gsea_dir = os.path.join(self.base_dir, 'output', 'gsea_{0:.2f}'.format(perc_redundant), 'gsea_output')\n shutil.rmtree(gsea_dir)\n os.mkdir(gsea_dir)\n\n self._run_gsea(gct_file, gmt_file, cls_file, gsea_dir)\n\n # gsea output files to process\n tumor_all_leading_genes_file = os.path.join(\n gsea_dir,\n 'syngsea.all.leading.genes.TUMOR.gmt'\n )\n\n tumor_leading_genes_file = os.path.join(\n gsea_dir,\n 'syngsea.leading.genes.TUMOR.gct'\n )\n\n tumor_summary_results_file = os.path.join(\n gsea_dir,\n 'syngsea.SUMMARY.RESULTS.REPORT.TUMOR.txt'\n )\n\n tumor_leading_genes = self.process_all_leading_genes(tumor_all_leading_genes_file)\n tumor_leading_gene_occurrences = self.process_leading_genes(tumor_leading_genes_file)\n tumor_summary_results = self.process_results_file(tumor_summary_results_file)\n\n gsea_output_dict = {\n 'leading_genes': tumor_leading_genes,\n 'leading_genes_by_occurrence': tumor_leading_gene_occurrences,\n 'summary': tumor_summary_results,\n 'gene_sets': modified_gene_sets\n }\n\n # save to pickle\n gsea_pickle_file = os.path.join(\n self.base_dir,\n 'output',\n 'gsea_{0:.2f}'.format(perc_redundant),\n 'trial_{}.pkl'.format(i)\n )\n\n pickle.dump(gsea_output_dict, open(gsea_pickle_file, 'wb'))", "def mutate(self):\n\n if len(self.genes) < 250:\n for g in self.genes:\n\n if MUTATION_CHANCE < random.random(): # random.random() gives float in [0,1)\n g.mutate()\n\n else:\n k = int(MUTATION_CHANCE*len(self.genes))\n for g in random.sample(self.genes,int(k)): #int(k)\n g.mutate()\n\n #To add random gene\n if ADD_GENE_CHANCE < random.random():\n self.genes.append(Gene(self.size)) #Call to Gene to add to genes list\n\n #To randomly remove genes\n\n if REM_GENE_CHANCE < random.random() and len(self.genes)>0:\n self.genes.remove(random.choice(self.genes))", "def calculate_gc_content(sequence):\n sequence = sequence.upper()\n sc = Counter(sequence)\n return round((sc['C'] + sc['G']) / (sc['A'] + sc['C'] + sc['G'] + sc['T']) * 100, 2)", "def pos_conserved(df, conservation):\n nb_rows, nb_cols = df.shape\n\n value_counts = df.apply(pd.Series.value_counts, axis=0).max(axis=0).ge(conservation * nb_rows)\n\n ge = [i for i, x in enumerate(value_counts) if x]\n return ge", "def test_gene_essentiality_from_data_qualitative(combined_dataframe):\n comparative_dataframe, exp = essential.prepare_qualitative_comparison(\n combined_dataframe\n )\n assert len(comparative_dataframe[comparative_dataframe[\"true_positives\"] == 1]) == 3", "def not_the_same_gene(min_indexes_df, level):\n if level == 'image':\n\n total_count = len(min_indexes_df)\n print (\"total number of images: \", total_count)\n info_csv_path = os.path.join(DATA_DIR, STUDY, \"human_ISH_info.csv\")\n info_csv = pd.read_csv(info_csv_path, index_col=None)\n\n gene_donor_mapping = info_csv[['gene_symbol', 'donor_id', 'image_id']]\n gene_donor_mapping['image_id']=gene_donor_mapping['image_id'].astype(str)\n min_indexes_df = pd.merge(min_indexes_df, gene_donor_mapping, left_on='id1', right_on='image_id')\n min_indexes_df = pd.merge(min_indexes_df, gene_donor_mapping, left_on='id2', right_on='image_id')\n\n not_the_same_image = min_indexes_df.query('image_id_x != image_id_y')\n not_the_same_gene = not_the_same_image.query('gene_symbol_x != gene_symbol_y')\n print(not_the_same_gene)\n\n match_count = len(not_the_same_gene)\n print(\"number of matches with not the same gene is: \", match_count)\n proportion = (match_count / total_count) * 100.0\n \n print (\"proportion is: \", proportion)\n return proportion", "def feature_selection_gbt(df, threshold, cols_to_filter, label_col = 'label', pcg = 1.0):\n print(\"[Info] feature selection by Gradient Boosting may take a long time\")\n\n df = df.select(cols_to_filter + [label_col]).sample(withReplacement=False, fraction=pcg)\n\n df = only_numeric_columns(df, label_col=label_col)\n\n df.cache()\n\n print \"[Info] Number of rows in the DF: \" + str(df.count())\n\n input_cols = list(set(df.columns) - set([label_col]))\n\n assembler = VectorAssembler(inputCols=input_cols, outputCol='features')\n\n numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter = param_selection(df)\n\n gb_model = GBTClassifier(maxDepth=maxDepth, minInstancesPerNode=minInstancesPerNode, maxBins=maxBins,\n subsamplingRate=subsamplingRate, maxIter=maxIter, stepSize=0.1,\n minInfoGain=0.0, lossType='logistic', labelCol = label_col)\\\n\n pipeline = Pipeline(stages=[assembler, gb_model])\n\n pipeline_model = pipeline.fit(df)\n\n from churn_nrt.src.projects_utils.models.modeler import getOrderedRelevantFeats\n\n feat_imp_nrt = getOrderedRelevantFeats(pipeline_model, input_cols, \"f\")\n\n n = threshold if (threshold >= 1) else round(threshold * len(feat_imp_nrt))\n\n num_cols = [f[0] for f in feat_imp_nrt][0:n]\n\n return num_cols", "def classify(genes, G):\n\n import networkx as nx\n\n # Calculate distance\n gendist = gen_dist(genes)\n N = gendist.shape[0]\n\n # Make the distance with comparison \"upper triangular\" by\n # setting lower diagonal of matrix extremely large\n gendist[np.arange(N)[:,None] >= np.arange(N)] = 999999\n\n # Get a list of pairs of indices (i,j) satisfying distance(i,j) < G\n indices = np.where(gendist <= G)\n indices = list(zip(indices[0], indices[1]))\n\n # Now is the tricky part. I want to combine all the (i,j) indices\n # that share at least either i or j. This non-trivial problem can be\n # modeled as a graph problem (stackoverflow link). The solution is\n G = nx.Graph()\n G.add_edges_from(indices)\n return list(nx.connected_components(G))", "def genetic(initial, survival_rate, offspring, generations, mutation):\n\n\t# creates initial population\n\tprint(\"\\nCreating initial population...\")\n\tgenesis = initial_population(initial)\n\n\t# selects fittest individuals\n\tprint(\"Selecting fittest individuals...\")\n\tfittest = selection(genesis, survival_rate)\n\n\t# apply crossover to the fittest schedules\n\tprint(\"Something about the birds and the bees...\\n\")\n\tchildren = cross_over(fittest, offspring, 0, mutation)\n\n\n\t# for amount of generations\n\tfor i in range(generations):\n\n\t\t# select fittest children (that survived)\n\t\tfittest = selection(children, survival_rate)\n\n\t\t# perform cross over, add mutation\n\t\tchildren = cross_over(fittest, offspring, i + 1, mutation)\n\n\t# select fittest children\n\tfittest = selection(children, survival_rate)\n\n\t# extracting varibles best schedule\n\tallcourses = fittest[0][0][0]\n\tchambers = fittest[0][0][2]\n\tstudent_list = fittest[0][0][1]\n\tschedule = fittest[0][1]\n\n\t# calculate score\n\tfittest_score = calc_score(allcourses, student_list, chambers)\n\tprint(\"fittest: \", fittest_score)\n\n\n\tprint(gen_scores)\n\n\n\ttext_file = open(\"scores1.txt\", \"w\")\n\n\ttext_file.write(str(gen_scores))\n\n\ttext_file.close()\n\n\treturn schedule, allcourses, student_list, chambers", "def frac_gc(df, fasta_records, mapped_only=True, return_input=True):\n if not set(df[\"chrom\"].values).issubset(set(fasta_records.keys())):\n raise ValueError(\n \"chrom from intervals not in fasta_records: double-check genome agreement\"\n )\n if not isinstance(fasta_records, dict):\n raise ValueError(\n \"fasta records must be provided as an OrderedDict, can be created \"\n \"by bioframe.load_fasta\"\n )\n\n def _each(chrom_group):\n chrom = chrom_group.name\n seq = fasta_records[chrom]\n seq = str(seq[:])\n gc = []\n for _, bin in chrom_group.iterrows():\n s = seq[bin.start : bin.end]\n gc.append(seq_gc(s, mapped_only=mapped_only))\n return gc\n\n out = df.groupby(\"chrom\", sort=False).apply(_each)\n\n if return_input:\n return pd.concat(\n [df, pd.Series(data=np.concatenate(out), index=df.index).rename(\"GC\")],\n axis=\"columns\",\n )\n else:\n return pd.Series(data=np.concatenate(out), index=df.index).rename(\"GC\")", "def compute_gc(seq): # seq should be a string\n num_GC = list(seq).count('g')+list(seq).count('c')+list(seq).count('G')+list(seq).count('C')\n amount_GC = num_GC/len(seq)\n return amount_GC", "def test_check_cds_20(self):\n self.cds1.gene = \"11\"\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def getGC(self):\n numGC = self.sequence.upper().count(\"G\") + self.sequence.upper().count(\"C\")\n self.gc = float(numGC)/len(self.sequence)\n return self.gc", "def stats_orgs(df, new_data=False):\n rows = []\n\n if new_data:\n df = df[df.index.isin(in_taxa_dict.keys())]\n else:\n df = df[df.index.isin(db_taxa_dict.keys())]\n\n df2 = df.copy()\n df2[df2 >= 1] = 1\n\n df = df.sum(axis=1).to_frame()\n\n if new_data:\n df[f\"Genes out of {len(matrix.columns)}\"] = df2.sum(axis=1).to_frame()\n df = df.rename(columns={0: f\"Sequences Collected\"})\n\n else:\n df = df.rename(columns={0: f\"Genes out of {len(matrix.columns)}\"})\n\n # Fill in taxonomic information\n if new_data:\n list_of_dicts = [{key: value[i] for key, value in in_taxa_dict.items()} for i in range(3)]\n else:\n list_of_dicts = [{key: value[i] for key, value in db_taxa_dict.items()} for i in range(3)]\n df['Long Name'] = df.index.map(list_of_dicts[2])\n df['Higher Taxonomy'] = df.index.map(list_of_dicts[0])\n df['Lower Taxonomy'] = df.index.map(list_of_dicts[1])\n\n # Rearrange Columns to Put Genes after taxa stats\n cols = df.columns.tolist()\n cols = cols[2:] + cols[:2]\n df = df[cols]\n\n if new_data:\n routes_dict = get_routes()\n list_of_routes_dicts = [{key: value[i] for key, value in routes_dict.items()} for i in range(3)]\n df[\"#SBH\"] = df.index.map(list_of_routes_dicts[0])\n df[\"#BBH\"] = df.index.map(list_of_routes_dicts[1])\n df[\"#HMM\"] = df.index.map(list_of_routes_dicts[2])\n out_filename = 'new_taxa_stats.tsv'\n else:\n out_filename = 'db_taxa_stats.tsv'\n\n # Fill in columns for including in SGT construction. By default all are yes\n has_paralogs = check_paralogs()\n if new_data:\n sgt_dict = {org: 'yes' for org in in_taxa_dict.keys()}\n else:\n sgt_dict = {org: 'yes' for org in db_taxa_dict.keys()}\n df['SGT'] = df.index.map(sgt_dict)\n\n # Fill in column for paralogs. If no paralogs entry is 'none'.\n # If there are paralogs entry is 'yes'. If there are paralogs, but --ortholog_only is given entry is 'no'.\n if new_data:\n pass\n else:\n paralogs_dict = {org: ('yes' if org in has_paralogs and not args.orthologs_only\n else 'no' if org in has_paralogs and args.orthologs_only else 'none')\n for org in db_taxa_dict}\n df['Paralogs'] = df.index.map(paralogs_dict)\n\n df = df.rename_axis('Unique ID')\n df.to_csv(f'{output_fold}/{out_filename}', sep='\\t')", "def precomp_threshold(dof, len_sim=1e5, beta=.75):\n for d in dof:\n cdt = GaussianCusum(arl=None, beta=beta)\n cdt.fit(x=np.zeros((1, d)), estimate_threshold=True, len_simulation=len_sim,\n verbose=True, precompute_thresholds=True)", "def precomp_threshold(dof, len_sim=1e5, beta=.75):\n for d in dof:\n cdt = GaussianCusum(arl=None, beta=beta)\n cdt.fit(x=np.zeros((1, d)), estimate_threshold=True, len_simulation=len_sim,\n verbose=True, precompute_thresholds=True)", "def simulate_generations(gene_pool, environment, gen=DEFAULT_GENERATIONS):\n seq_to_fitness = multiprocessing.Manager().dict()\n chromosomes = []\n fittest_chromosome = []\n\n for i in range(DEFAULT_POPULATION_SIZE):\n chromosomes.append(generate_random_gene_sequence(gene_pool))\n\n for i in range(gen):\n chromosomes, fittest_chromosome = simulate_generation(chromosomes,\n gene_pool,\n environment,\n seq_to_fitness)\n\n if i < gen - 1:\n chromosomes = delete_duplicates(chromosomes, gene_pool)\n\n return fittest_chromosome", "def find_gene_by_pos_gff3(insertion_pos, chromosome_GFF_record, detailed_features=False, nearest_genes_for_intergenic=False, \n quiet=False):\n from BCBio import GFF # importing here instead of toplevel because sometimes it's missing and I want to be able to use this file\n # MAYBE-TODO add gene name as well as ID? But gff parsing doesn't seem to grab those at all - leave it for a separate function.\n # (see experiments/arrayed_library/1311_small-lib_mutant-distribution-file/notes.txt)\n # MAYBE-TODO add gene lengths? Right now they can be inferred from the distances to gene start/end for mutants in genes,\n # which seems sufficient.\n\n # get the needed information from either input format\n strand, ins_start, ins_end = _get_insertion_info(insertion_pos, None)\n\n ### Go over all the genes in the chromosome record, \n # and calculate the distance from the insertion (or 0 if insertion overlaps the gene)\n gene_distances = []\n for gene in chromosome_GFF_record.features:\n # for GFF positions, always add 1 to the gene/feature start, because BCBio uses 0-based and I use 1-based, \n # but don't add 1 to the end, because BCBio uses end-exclusive and I use end-inclusive.\n gene_start, gene_end = gene.location.start.position+1, gene.location.end.position\n if position_test_overlap(gene_start, gene_end, ins_start, ins_end): gene_distances.append((gene, 0))\n elif nearest_genes_for_intergenic:\n if gene_end < ins_start: gene_distances.append((gene, gene_end-ins_start))\n elif gene_start > ins_end: gene_distances.append((gene, gene_start-ins_end))\n else: raise MutantError(\"Gene position confusion!\")\n\n ### Pick genes to return - either all the genes that overlap the insertion, OR the closest gene on each side if none overlap\n if not gene_distances:\n nearest_genes = []\n elif min(abs(dist) for (gene,dist) in gene_distances) == 0:\n nearest_genes = [(gene,dist) for (gene,dist) in gene_distances if dist==0]\n elif nearest_genes_for_intergenic:\n # note that sometimes there ARE no genes on one/both sides!\n nearest_genes = []\n genes_upstream = [(gene,dist) for (gene,dist) in gene_distances if dist < 0]\n genes_downstream = [(gene,dist) for (gene,dist) in gene_distances if dist > 0]\n if genes_upstream: nearest_genes.append(max(genes_upstream, key = lambda gene_and_dist: gene_and_dist[1]))\n if genes_downstream: nearest_genes.append(min(genes_downstream, key = lambda gene_and_dist: gene_and_dist[1]))\n else:\n nearest_genes = []\n\n ### Get all the data for each gene\n # (see notes_on_GFF_parsing.txt for what a GFF3 record (chromosome_GFF_record) will be like)\n gene_data_list = []\n for (gene,dist) in nearest_genes:\n gene_start, gene_end = gene.location.start.position+1, gene.location.end.position\n gene_ID = gene.id\n # for mutants in genes, calculate orientation of insertion_pos vs gene\n if dist == 0:\n if strand=='both': orientation = 'both'\n elif gene.strand==1: orientation = 'sense' if strand=='+' else 'antisense'\n elif gene.strand==-1: orientation = 'sense' if strand=='-' else 'antisense'\n else: orientation = '?'\n # for intergenic mutants, check whether the mutant is upstream or downstream of the gene, instead of orientation\n else:\n if dist * gene.strand < 0: orientation = 'downstream'\n else: orientation = 'upstream'\n # calculate distances:\n # - if insertion is inside gene, give distance to 5' end and 3' end, separated by a comma\n # - if it's intergenic, just give the distance to the closest side of the gene (already calculated)\n if dist == 0:\n dist1 = ins_end - gene_start\n dist2 = gene_end - ins_start\n if gene.strand==1: distances = '%s,%s'%(dist1,dist2)\n else: distances = '%s,%s'%(dist2,dist1)\n else: distances = str(abs(dist))\n # basic features: intergenic, gene, gene edge\n if dist == 0:\n # if it overlaps an edge, note that in features_basic by adding 'gene_edge'\n # (MAYBE-TODO if I look at things like inner/outer flanking regions, this is where that would go as well)\n if position_test_contains(gene_start, gene_end, ins_start,ins_end): features_basic = []\n else: features_basic = ['gene_edge']\n else: features_basic = ['intergenic']\n # figure out which feature of the gene the insertion is in, IF we're looking for detailed ones (it's a lot of code)\n if dist != 0: inner_feature = ''\n elif not detailed_features: inner_feature = '?'\n else:\n if len(gene.sub_features)==0: inner_features = ['no_mRNA']\n else: inner_features = []\n for mRNA in gene.sub_features:\n if gene.sub_features[0].type != 'mRNA':\n if not quiet:\n print(\"Warning: gene %s in gff file has unexpected non-mRNA sub-features! \"%gene_ID\n +\"Returning '??' feature.\")\n inner_features.append('??')\n else:\n mRNA_start, mRNA_end = mRNA.location.start.position+1,mRNA.location.end.position\n # if insertion_pos is outside the mRNA, use 'outside_mRNA' as inner_feature\n if not position_test_overlap(mRNA_start, mRNA_end, ins_start, ins_end):\n inner_features.append('outside_mRNA')\n # if insertion_pos is inside the mRNA and mRNA has no subfeatures, use 'mRNA_no_exons' as inner_feature\n elif len(mRNA.sub_features)==0: \n if position_test_contains(mRNA_start, mRNA_end, ins_start, ins_end): inner_features.append('mRNA_no_exons')\n else: inner_features.append('mRNA_edge')\n else: \n # otherwise go over all subfeatures, see which ones contain/overlap insertion_pos\n # (check for overlap only if the contains test failed)\n features_inside = []\n if position_test_contains(mRNA_start, mRNA_end, ins_start, ins_end): features_edge = []\n else: features_edge = ['mRNA_edge']\n for feature in mRNA.sub_features:\n feature_start, feature_end = feature.location.start.position+1, feature.location.end.position\n try: feature_type = GENE_FEATURE_NAMES[feature.type]\n except KeyError: feature_type = feature.type\n if position_test_contains(feature_start, feature_end, ins_start, ins_end):\n features_inside.append(feature_type)\n elif position_test_overlap(feature_start, feature_end, ins_start, ins_end):\n features_edge.append(feature_type)\n # MAYBE-TODO may want to treat exons before 5'UTR or after 3'UTR specially? \n # Not worth it, none in current file.\n # if insertion_pos is inside a single mRNA subfeature, use the type of the subfeature as inner_feature\n if len(features_inside)==1 and len(features_edge)==0:\n inner_features.append(features_inside[0])\n # if insertion_pos is on the edge of two mRNA subfeatures, use 'subfeature1/subfeature2'\n elif len(features_inside)==0 and len(features_edge)==2:\n inner_features.append('/'.join(features_edge))\n # MAYBE-TODO treat insertions CLOSE to an edge specially too? How large is a splice junction?\n # if insertion_pos is on the edge of ONE mRNA subfeature, or not touching any subfeatures at all, \n # the implied subfeature is either an intron (if between features) or mRNA_before/after_exons, \n # (which shouldn't happen in normal files).\n elif len(features_inside)==0 and len(features_edge)<=1:\n # figure out what the implied feature is - outside intron in CDS (normal) or UTR, or outside all exons\n # note that before/after and 5'/3' are swapped if gene is on minus strand!\n CDS_features = [feature for feature in mRNA.sub_features if feature.type=='CDS']\n if ins_start < min([feature.location.start.position+1 for feature in mRNA.sub_features]):\n if gene.strand==1: implied_feature = 'mRNA_before_exons'\n elif gene.strand==-1: implied_feature = 'mRNA_after_exons'\n elif ins_end > max([feature.location.end.position for feature in mRNA.sub_features]):\n if gene.strand==1: implied_feature = 'mRNA_after_exons'\n elif gene.strand==-1: implied_feature = 'mRNA_before_exons'\n elif ins_start < min([feature.location.start.position+1 for feature in CDS_features]):\n if gene.strand==1: implied_feature = \"5'UTR_intron\"\n elif gene.strand==-1: implied_feature = \"3'UTR_intron\"\n elif ins_end > max([feature.location.end.position for feature in CDS_features]):\n if gene.strand==1: implied_feature = \"3'UTR_intron\"\n elif gene.strand==-1: implied_feature = \"5'UTR_intron\"\n else:\n implied_feature = 'intron'\n # set inner_feature based on whether insertion_pos is on a real/implied feature edge \n # or completely inside an implied feature\n if len(features_edge)==1:\n inner_features.append(features_edge[0] + '/' + implied_feature)\n elif len(features_edge)==0:\n inner_features.append(implied_feature)\n # if insertion_pos is inside two features, or inside one and on the edge of another, \n # print a warning, and use all the feature names, with a ?? at the end to mark strangeness\n else:\n inner_features.append('/'.join(features_inside+features_edge) + '??')\n if not quiet:\n print((\"Warning: Location (%s,%s) matched multiple features (%s) \"\n +\"in gene %s!\")%(ins_start, ins_end, inner_features[-1], gene_ID)) \n inner_feature = MULTIPLE_mRNA_JOIN.join(sorted(set(inner_features)))\n \n # prepend whatever gene-level features (edge etc, or []) were found at the start to the full value\n full_feature = '/'.join(features_basic + ([inner_feature] if inner_feature else []))\n gene_data_list.append([gene_ID, orientation, full_feature, distances])\n\n ### Return appropriate value\n # if no gene matching insertion_pos was found, return special value\n if not gene_data_list:\n return [SPECIAL_GENE_CODES.not_found, '-', '-', '-']\n # if single gene found, return its info\n elif len(gene_data_list) == 1:\n return gene_data_list[0]\n # if multiple genes found, return data like this: \"gene1 & gene2\", \"sense & antisense\", \"intron & CDS/4'UTR\",\n # except that the feature for intergenic insertions should be \"intergenic\" instead of \"intergenic & intergenic\".\n else: \n full_data = [MULTIPLE_GENE_JOIN.join(multiple_vals) for multiple_vals in zip(*gene_data_list)]\n if full_data[2] == MULTIPLE_GENE_JOIN.join(['intergenic']*2): full_data[2] = 'intergenic'\n return full_data", "def find_shared_dna(\n self,\n individuals=(),\n cM_threshold=0.75,\n snp_threshold=1100,\n shared_genes=False,\n save_output=True,\n genetic_map=\"HapMap2\",\n ):\n # initialize all objects to be returned to be empty to start\n one_chrom_shared_dna = pd.DataFrame()\n two_chrom_shared_dna = pd.DataFrame()\n one_chrom_shared_genes = pd.DataFrame()\n two_chrom_shared_genes = pd.DataFrame()\n one_chrom_discrepant_snps = pd.Index([])\n two_chrom_discrepant_snps = pd.Index([])\n\n # ensure that all individuals have SNPs that are mapped relative to Build 37\n self._remap_snps_to_GRCh37(individuals)\n\n # return if there aren't enough individuals to compare\n if len(individuals) < 2:\n logger.warning(\"find_shared_dna requires two or more individuals...\")\n return self._find_shared_dna_return_helper(\n one_chrom_shared_dna,\n two_chrom_shared_dna,\n one_chrom_shared_genes,\n two_chrom_shared_genes,\n one_chrom_discrepant_snps,\n two_chrom_discrepant_snps,\n )\n\n # load the specified genetic map (one genetic map for each chromosome)\n genetic_map_dfs = self._resources.get_genetic_map(genetic_map)\n\n if len(genetic_map_dfs) == 0:\n return self._find_shared_dna_return_helper(\n one_chrom_shared_dna,\n two_chrom_shared_dna,\n one_chrom_shared_genes,\n two_chrom_shared_genes,\n one_chrom_discrepant_snps,\n two_chrom_discrepant_snps,\n )\n\n # generate a list of dynamically named columns for each individual's genotype\n # (e.g., genotype0, genotype1, etc).\n cols = [f\"genotype{str(i)}\" for i in range(len(individuals))]\n\n # set the reference SNPs to compare to be that of the first individual\n df = individuals[0].snps\n df = df.rename(columns={\"genotype\": cols[0]})\n\n # build-up a dataframe of SNPs that are common to all individuals\n for i, ind in enumerate(individuals[1:]):\n # join SNPs for all individuals\n df = df.join(ind.snps[\"genotype\"], how=\"inner\")\n df = df.rename(columns={\"genotype\": cols[i + 1]})\n\n # set a flag for if one individual is male (i.e., only one chromosome match on the X\n # chromosome is possible in the non-PAR region)\n one_x_chrom = self._is_one_individual_male(individuals)\n\n # create tasks to compute the genetic distances (cMs) between each SNP on each chromosome\n tasks = []\n chroms_to_drop = []\n for chrom in df[\"chrom\"].unique():\n if chrom not in genetic_map_dfs.keys():\n chroms_to_drop.append(chrom)\n continue\n\n # each task requires the genetic map for the chromosome and the positions of all SNPs\n # in common on that chromosome\n tasks.append(\n {\n \"genetic_map\": genetic_map_dfs[chrom],\n # get positions for the current chromosome\n \"snps\": pd.DataFrame(df.loc[(df[\"chrom\"] == chrom)][\"pos\"]),\n }\n )\n\n # drop chromosomes without genetic distance data (e.g., chroms MT, PAR, etc.)\n for chrom in chroms_to_drop:\n df = df.drop(df.loc[df[\"chrom\"] == chrom].index)\n\n # determine the genetic distance between each SNP using the specified genetic map\n snp_distances = map(self._compute_snp_distances, tasks)\n snp_distances = pd.concat(snp_distances)\n\n # extract the column \"cM_from_prev_snp\" from the result and add that to the dataframe\n # of SNPs common to all individuals; now we have the genetic distance between each SNP\n df[\"cM_from_prev_snp\"] = snp_distances[\"cM_from_prev_snp\"]\n\n # now we apply a mask for whether all individuals match on one or two chromosomes...\n # first, set all rows for these columns to True\n df[\"one_chrom_match\"] = True\n df[\"two_chrom_match\"] = True\n # determine where individuals share an allele on one chromosome (i.e., set to False when\n # at least one allele doesn't match for all individuals)\n for genotype1, genotype2 in combinations(cols, 2):\n df.loc[\n ~df[genotype1].isnull()\n & ~df[genotype2].isnull()\n & (df[genotype1].str[0] != df[genotype2].str[0])\n & (df[genotype1].str[0] != df[genotype2].str[1])\n & (df[genotype1].str[1] != df[genotype2].str[0])\n & (df[genotype1].str[1] != df[genotype2].str[1]),\n \"one_chrom_match\",\n ] = False\n\n # determine where individuals share alleles on two chromosomes (i.e., set to False when\n # two alleles don't match for all individuals)\n for genotype1, genotype2 in combinations(cols, 2):\n df.loc[\n ~df[genotype1].isnull()\n & ~df[genotype2].isnull()\n & (df[genotype1] != df[genotype2])\n & ~(\n (df[genotype1].str[0] == df[genotype2].str[1])\n & (df[genotype1].str[1] == df[genotype2].str[0])\n ),\n \"two_chrom_match\",\n ] = False\n\n # genotype columns are no longer required for calculation\n df = df.drop(cols, axis=1)\n\n # find shared DNA on one chrom\n one_chrom_shared_dna, one_chrom_discrepant_snps = self._find_shared_dna_helper(\n df[[\"chrom\", \"pos\", \"cM_from_prev_snp\", \"one_chrom_match\"]],\n cM_threshold,\n snp_threshold,\n one_x_chrom,\n )\n # find shared DNA on two chroms\n two_chrom_shared_dna, two_chrom_discrepant_snps = self._find_shared_dna_helper(\n df[[\"chrom\", \"pos\", \"cM_from_prev_snp\", \"two_chrom_match\"]],\n cM_threshold,\n snp_threshold,\n one_x_chrom,\n )\n\n if shared_genes:\n one_chrom_shared_genes = self._compute_shared_genes(one_chrom_shared_dna)\n two_chrom_shared_genes = self._compute_shared_genes(two_chrom_shared_dna)\n\n if save_output:\n self._find_shared_dna_output_helper(\n individuals,\n cM_threshold,\n snp_threshold,\n one_chrom_shared_dna,\n two_chrom_shared_dna,\n one_chrom_shared_genes,\n two_chrom_shared_genes,\n genetic_map,\n )\n\n return self._find_shared_dna_return_helper(\n one_chrom_shared_dna,\n two_chrom_shared_dna,\n one_chrom_shared_genes,\n two_chrom_shared_genes,\n one_chrom_discrepant_snps,\n two_chrom_discrepant_snps,\n )", "def tag_GC_simulations():\n # Diagnostics to use?\n d = get_tags_for_NOx_HONO()\n tags = d.values()\n for key in d.keys():\n print('{} : {};'.format(key, d[key]))\n # Also print out just using \"P\" as the prefix.\n for key in d.keys():\n print('P{} : {};'.format(d[key], d[key]))\n # prepare other output for GEOS-Chem input files\n extr_str = 'ARNA_Standard'\n AC.print_out_lines_for_gckpp_file(tags=tags, extr_str=extr_str)\n AC.prt_lines4species_database_yml(tags=tags, extr_str=extr_str)\n\n ptr_str = '{:<11}= IGNORE; {}'\n d = dict(zip(diags, tags))\n for key in d.keys():\n print(ptr_str.format(d[key], '{'+key+'}'))\n\n ptr_str = 'P{:<10}= IGNORE; {}'\n d = dict(zip(diags, tags))\n for key in d.keys():\n print(ptr_str.format(d[key], '{'+key+'}'))", "def gc_frequency(self):\n result = str(self.seq).count(\"G\") + str(self.seq).count(\"C\")\n return result", "def gen_fitness_curves(pop,conc=None):\n\n if conc is None:\n conc = np.logspace(-3,5,num=1000)\n \n n_genotype = pop.n_genotype\n\n fc = {}\n for g in range(n_genotype):\n f = np.zeros(len(conc))\n i = 0\n for c in conc:\n f[i] = gen_fitness(pop,g,c) - pop.death_rate\n i+=1\n fc[g] = f\n\n return fc", "def __mutate(self, chromosomes, mutation_probability):\n\n for chromosome in chromosomes:\n for i in range(self.chromosome_size):\n if random.randint(1, 100) <= mutation_probability:\n logging.getLogger().debug(\n \"---> Mutation in Chromosome \" + str(\n chromosome.chromosome_id) + \"in gene \" + str(i)\n + \" <---\")\n chromosome.genes[i] = random.choice(self.gene_pool)", "def _iRep_gc_content(seq, window = 5000, slide = 100):\n # convert GC\n replacements = {'G':1, 'C':1, 'A':0, 'T':0, 'N':0}\n GC = [] # G - C\n for base in seq:\n try:\n GC.append(replacements[base.upper()])\n except:\n GC.append(0)\n # calculate gc content over sliding windows\n i = 0\n weights = np.ones(window)\n table = defaultdict(list)\n for gc in scipy.signal.fftconvolve(GC, weights, 'valid').tolist()[0::slide]:\n table['index'].append(i)\n table['GC_content'].append(gc/window)\n i += slide\n return pd.DataFrame(table)", "def locus2gene(scaflist, gbeglist, gendlist, gdatalist=False, gff=dbpaths['gff'], comprehensive=True ):\n cuffgenes = {}\n\n for result in range(len(scaflist)):\n if result % 1000 == 0:\n print \"%d genes matched of %d\" % (result, len(scaflist))\n cur_scaf = scaflist[result]\n cur_gbeg = gbeglist[result]\n cur_gend = gendlist[result]\n if gdatalist:\n cur_gdata = gdatalist[result]\n else:\n cur_gdata = 0\n fobj = open(gff, 'rb')\n for line in fobj:\n col = line.split()\n if col[2] == \"mRNA\":\n if col[0] == cur_scaf:\n if float(col[3]) <= cur_gend and float(col[4]) >= cur_gbeg:\n try:\n if (cur_scaf, cur_gbeg) in cuffgenes:\n cuffgenes[(cur_scaf, cur_gbeg, 2)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gdata)\n else:\n cuffgenes[(cur_scaf, cur_gbeg)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gdata)\n if not comprehensive:\n break\n except AttributeError:\n print col[8]\n fobj.close()\n\n return cuffgenes", "def test_genes_to_bedtool_hgnc_ids(database, gene_objects_build37, build=\"GRCh37\"):\n # Given a populated gene collection\n database[\"gene\"].insert_many(gene_objects_build37)\n # When hgnc ids are provided to the genes_to_bedtool function\n hgnc_ids = []\n for gene in gene_objects_build37:\n hgnc_ids.append(gene[\"hgnc_id\"])\n bt = genes_to_bedtool(database[\"gene\"], hgnc_ids=hgnc_ids)\n # THEN the function should return a BedTool object\n assert isinstance(bt, pybedtools.bedtool.BedTool)\n # With 3 gene intervals\n assert len(bt) == 3", "def random_strings(sequence, GC_array):\r\n\r\n AT = 0\r\n GC = 0\r\n\r\n for nt in sequence:\r\n if nt == \"A\" or nt == \"T\":\r\n AT += 1\r\n elif nt == \"G\" or nt == \"C\":\r\n GC += 1\r\n\r\n probabilities = []\r\n\r\n #Calculate probability of G = probability of C = %GC / 2\r\n #Calculate probability of A = probability of T = (1 - %GC) / 2\r\n\r\n #For each consecutive base in provided sequence:\r\n #1. Convert total probability to logarithm using math.log(probability, base=10)\r\n #2. Total probability to be multiplied by probability of specifically that base\r\n\r\n for i in range(len(GC_array)):\r\n prob = (AT * math.log10((1 - GC_array[i])/2)) + (GC * math.log10(GC_array[i]/2))\r\n\r\n probabilities.append('%0.3f' % prob)\r\n\r\n print(*probabilities, sep= \" \")", "def calc_fitness_by_gen(self):\r\n f_sum = 0\r\n # first loop gives us the sum of the fitness\r\n for c, _ in self.temp_hist_by_gen.items():\r\n f_sum += c.fitness()\r\n # now we calc the chances by fitness of each one\r\n for c, _ in self.temp_hist_by_gen.items():\r\n self.temp_hist_by_gen[c] = c.fitness() / f_sum", "def find_common_interactor():\n # 1. filter the unique fusion gene pairs.\n # fusionGenePair = pd.read_csv(\"./fusionGenePair.csv\", header=0, sep=' ')\n # unique_fusionGenePair = fusionGenePair.drop_duplicates()\n # unique_fusionGenePair.to_csv(\"./uniqueFusion.csv\", sep=' ', index=False)\n unique_fusionGenePair = pd.read_csv(\"./uniqueFusion.csv\", sep=' ', header=0)\n\n # 2. for each gene pairs, get all the interactors each partner has.\n\n # Store the 5' partner gene and 3' partner gene in two lists.\n FivePartnerGenelist = []\n ThreePartnerGenelist = []\n for index, row in unique_fusionGenePair.iterrows():\n FivePartnerGenelist.append(row['5_PARTNER_GENE'])\n ThreePartnerGenelist.append(row['3_PARTNER_GENE'])\n # Get the unique gene in each pair\n uniqueFPGL = list(OrderedDict.fromkeys(FivePartnerGenelist))\n uniqueTPGL = list(OrderedDict.fromkeys(ThreePartnerGenelist))\n uniqueGene = list(OrderedDict.fromkeys(uniqueTPGL + uniqueFPGL))\n\n # Find each gene's interactor in the PPI datasets\n PPIS = pd.read_csv(\"./IID results/PPIs_final.tsv\", sep='\\t', header=0)\n\n # Put each gene interactor into a dictionary.\n geneIntDic = {}\n for item in uniqueGene:\n for index, row in PPIS.iterrows():\n if row['Query Symbol'] == item:\n if item in geneIntDic:\n geneIntDic[item].append(row['Partner Symbol'])\n else:\n key = item\n geneIntDic.setdefault(key, [])\n geneIntDic[item].append(row['Partner Symbol'])\n if row['Partner Symbol'] == item:\n if item in geneIntDic:\n geneIntDic[item].append(row['Query Symbol'])\n else:\n key = item\n geneIntDic.setdefault(key, [])\n geneIntDic[item].append(row['Query Symbol'])\n w = csv.writer(open(\"./geneIntDic.csv\", \"w\"))\n for key, val in geneIntDic.items():\n w.writerow([key, val])", "def gc_content(dna):\n seqlength = len(dna)\n\n # Count A and T nucleotides, including the W ambiguity base representing\n # either A or T\n atcount = dna.count('A') + dna.count('a') + \\\n dna.count('T') + dna.count('t') + \\\n dna.count('W') + dna.count('w')\n\n # Count C and G nucleotides, including the S ambiguity base representing\n # either C or G\n gccount = dna.count('C') + dna.count('c') + \\\n dna.count('G') + dna.count('g') + \\\n dna.count('S') + dna.count('s')\n\n # Count all other ambiguous nucleotides; most will be Ns, but occasionally\n # there will be other IUPAC ambiguity symbols\n ncount = seqlength - atcount - gccount\n\n if atcount + gccount == 0:\n assert ncount == seqlength\n gccontent = 0.0\n else:\n gccontent = float(gccount) / float(gccount + atcount)\n return gccontent", "def get_gc_count(dataset):\n\n gc_count_dict = {}\n\n for sequence in SeqIO.parse(dataset, 'fasta'):\n c_count = sequence.seq.count('C')\n g_count = sequence.seq.count('G')\n gc_count = ((c_count + g_count)/len(sequence))*100\n gc_count_dict[sequence.id] = gc_count\n\n\n return gc_count_dict", "def integrate_copy_number(y, cancer_genes_df, genes, loss_df, gain_df,\n include_mutation=True):\n\n # Find if the input genes are in this master list\n genes_sub = cancer_genes_df[cancer_genes_df['Gene Symbol'].isin(genes)]\n\n # Add status to the Y matrix depending on if the gene is a tumor suppressor\n # or an oncogene. An oncogene can be activated with copy number gains, but\n # a tumor suppressor is inactivated with copy number loss\n tumor_suppressor = genes_sub[genes_sub['Classification*'] == 'TSG']\n oncogene = genes_sub[genes_sub['Classification*'] == 'Oncogene']\n\n copy_loss_sub = loss_df[tumor_suppressor['Gene Symbol']]\n copy_gain_sub = gain_df[oncogene['Gene Symbol']]\n\n # Append to column names for visualization\n copy_loss_sub.columns = [col + '_loss' for col in copy_loss_sub.columns]\n copy_gain_sub.columns = [col + '_gain' for col in copy_gain_sub.columns]\n\n # Add columns to y matrix\n y = y.join(copy_loss_sub)\n y = y.join(copy_gain_sub)\n\n # Fill missing data with zero (measured mutation but not copy number)\n y = y.fillna(0)\n y = y.astype(int)\n\n if not include_mutation:\n y = y.drop(genes, axis=1)\n return y", "def rnd_genes(genes=[], n=1, gene_data=None):\n if gene_data is None:\n return np.array([])\n gene_tetra, gene_ct, gene_ids, gene_names = gene_data\n # how many genes are there total?\n if genes == []:\n sel_genes = np.ones(gene_ids.shape, dtype=bool)\n else:\n sel_genes = np.zeros(gene_ids.shape, dtype=bool)\n for gene in genes:\n sel_genes = np.logical_or(sel_genes, \\\n gene_ids == gene_names[gene])\n # randomly pick genes from the collection\n rand_picks = np.random.randint(sum(sel_genes), size=(n,))\n tetra = gene_tetra[sel_genes][rand_picks]\n return tetra", "def add_gtcnt(vcf, out, n_header=None):\n if n_header is None:\n n_header = edit_header(vcf)\n for entry in vcf:\n cnt = [0, 0, 0, 0]\n #cnt = {\"UNK\": 0, \"REF\": 0, \"HET\": 0, \"HOM\": 0}\n for sample in entry.samples:\n gt = entry.samples[sample][\"GT\"]\n if None in gt or len(gt) != 2:\n cnt[0] += 1\n elif gt[0] == gt[1] and gt[0] == 0:\n cnt[1] += 1\n elif gt[0] == gt[1]:\n cnt[3] += 1\n elif gt[0] != gt[1]:\n cnt[2] += 1\n else:\n cnt[0] += 1\n try:\n nentry = truvari.copy_entry(entry, n_header)\n except TypeError:\n yield entry\n continue\n nentry.info[\"GTCNT\"] = cnt \n yield nentry", "def evaluate ( self , genome ) :\n\n\t\tassert isinstance( genome , Genome ), 'genome supplied must be of type cc3dtools.Genome!'\n\t\tloci = genome.get_mutated_loci()\n\t\tmatched_phenotypes = []\n\t\tphenotypes = self.phenotypes.items()\n\n\t\tfor locus in loci:\n\t\t\tfor phenotype, region in phenotypes:\n\t\t\t\t# check if the locus is in the region\n\t\t\t\t# 'locus.locus' to get the float value of that mutation rather \n\t\t\t\t# than an object!\n\t\t\t\tif locus.locus > region[0] and locus.locus < region[1]:\n\t\t\t\t\tmatched_phenotypes.append( phenotype )\n\t\treturn Counter( matched_phenotypes )", "def gc(sequence):\n sequence = sequence.upper()\n return (sequence.count('G') + sequence.count('C')) / float(len(sequence))", "def main():\n\tdb, cursor = connect()\n\t#chroms = ['1','22']\n\t#chroms = ['2','21']\n\t#chroms = ['3','20']\n\t#chroms = ['4','19']\n\t#chroms = ['5','18']\n\t#chroms = ['6','17']\n\t#chroms = ['7','16']\n\t#chroms = ['8','15']\n\t#chroms = ['9','14']\n\t#chroms = ['10','13']\n\tchroms = ['11','12']\n\t#chroms = [str(i) for i in range(10,23)]\n\t#chroms = ['X','Y']\n\tchroms.reverse()\n\tfor chrom in chroms:\n\t\tt0 = time()\n\t\ttable = \"gnomad_freqs_chr_\" + chrom\n\t\tprint\n\t\tprint \"*\"*20\n\t\tprint table\n\t\tprint \"number of variants:\", search_db(cursor, \"select count(1) from %s\" % table)[0][0]\n\t\tqry = \"select count(1) from %s \" % table\n\t\tqry += \"where char_length(reference)=1 and char_length(variant)=1\"\n\t\tprint \"simple SNPs\", search_db(cursor, qry)[0][0]\n\n\t\tcandidates, long_vars_ct = find_complex_variants(cursor, table)\n\t\tprint\n\t\tprint \"Complex variants with reference<30:\", len(candidates),\n\t\tprint \" long variants: \", long_vars_ct\n\n\t\tclusters = find_clusters_of_candidates(candidates)\n\t\tprint\n\t\tprint \"Done clustering. Max pos:\", max([cluster[0][0] for cluster in clusters])\n\t\tprint \"Number of hotspot regions:\", len(clusters)\n\n\n\t\tnumber_of_vars_in_clusters = 0\n\t\tnumber_of_clusters_with_periodic_motifs = 0\n\t\tfor cluster in clusters:\n\t\t\t# no varaints: cluster is just the number of positions here, not the number of\n\t\t\t# vars repoted for each\n\t\t\t[start,end, number_of_variants] = characterize_region(cluster)\n\t\t\tif number_of_variants<2: continue\n\t\t\tnumber_of_vars_in_clusters += number_of_variants\n\t\t\tfixed_fields = {'chrom':chrom, 'start':start, 'end':end}\n\t\t\tstore_without_checking(cursor, 'gnomad_hotspots', fixed_fields)\n\t\tprint\n\t\tprint \"Number of variants with clusters:\", number_of_vars_in_clusters\n\t\tprint \"Number of clusters with periodic motifs:\", number_of_clusters_with_periodic_motifs\n\t\tprint\n\t\tprint \"time taken %.2f min\" % ((time() - t0) / 60.0)\n\t\tprint\n\tcursor.close()\n\tdb.close()\n\n\treturn", "def test_check_cds_19(self):\n self.cds1.gene = \"11\"\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def test_counts(contrib_data):\n ## Find the probands - they should always contribute their own alleles\n P = ped.Pedigree(args.pedfile)\n\n ## TODO: Test individuals with multiple offspring +p2 id:112\n ## Case when no regions are specified - only all probands\n regions = contrib_data.keys()\n if len(regions) == 1:\n all_contribs = contrib_data[regions[0]]\n for prob in P.probands:\n ## Probands should always contribute 1 allele\n assert all_contribs[prob, 1] == args.iterations\n\n ## Test specific individuals\n counts = Counter(all_contribs[11])\n binomial_sd = np.sqrt(args.iterations * 0.25)\n mean = np.mean(counts.values())\n conf95 = (mean - 2 * binomial_sd, mean + 2 * binomial_sd)\n\n for count in counts.values():\n assert conf95[0] < count < conf95[1]", "def gc(args):\n p = OptionParser(gc.__doc__)\n p.add_option(\"--binsize\", default=500, type=\"int\", help=\"Bin size to use\")\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (fastafile,) = args\n binsize = opts.binsize\n allbins = []\n for name, seq in parse_fasta(fastafile):\n for i in range(len(seq) / binsize):\n atcnt = gccnt = 0\n for c in seq[i * binsize : (i + 1) * binsize].upper():\n if c in \"AT\":\n atcnt += 1\n elif c in \"GC\":\n gccnt += 1\n totalcnt = atcnt + gccnt\n if totalcnt == 0:\n continue\n gcpct = gccnt * 100 / totalcnt\n allbins.append(gcpct)\n\n from jcvi.graphics.base import asciiplot\n from collections import Counter\n\n title = \"Total number of bins={}\".format(len(allbins))\n c = Counter(allbins)\n x, y = zip(*sorted(c.items()))\n asciiplot(x, y, title=title)", "def genomic_insertion(erbb2_context):\n params = {\n \"id\": \"normalize.variation:NC_000017.10%3Ag.37880993_37880994insGCTTACGTGATG\", # noqa: E501\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.nHB0_mpsq2t90S-znr81oCi2cY5CMdUe\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.nHB0_mpsq2t90S-znr81oCi2cY5CMdUe\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.E0o4HCXjy1EUthF1m32oj_Bc45g5YmEm\",\n \"interval\": {\n \"end\": {\"value\": 2500, \"type\": \"Number\"},\n \"start\": {\"value\": 2488, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.y9b4LVMiCXpZxOg9Xt1NwRtssA03MwWM\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"TACGTGATGGCTTACGTGATGGCT\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"transcript\",\n \"structural_type\": \"SO:0000667\",\n \"vrs_ref_allele_seq\": \"TACGTGATGGCT\",\n \"gene_context\": erbb2_context\n }\n return VariationDescriptor(**params)", "def genes_exons(species,aceVersion,log=0):\n \n t1=time.time()\n os.chdir(os.environ['PYDATA']+'/%s/log'%species)\n logFile=open('%s_ace_exons.txt'%species,'w')\n #open database for relation between chromosome and Ensembl region\n path=os.environ['PYDATA']+'/'+species+'/ensembl/'+species+'_region_by_chromosome.bkdb'\n chrDB=bsddb.btopen(path,'r')\n \n #open ace genes by- ensembl region\n path=os.environ['PYDATA']+'/'+species+'/aceview/'+species+'_genes_by_ensembl_region.bkdb'\n if os.path.exists(path):\n os.remove(path)\n regionDB=bsddb.btopen(path,'w')\n regionNb=0\n \n \n #open ace exon_by_gene\n path=os.environ['PYDATA']+'/'+species+'/aceview/'+species+'_exons_by_gene.bkdb'\n if os.path.exists(path):\n os.remove(path)\n exonsByGeneDB=bsddb.btopen(path,'w')\n exonGeneNb=0\n \n #open ace transcript_sequence for updating\n path=os.environ['PYDATA']+\"/\"+species+'/aceview/'+species+'_transcript_sequence.bkdb'\n transcriptDB=bsddb.btopen(path,'w')\n transcriptNb=0\n \n #create ace transcript by gene\n path=os.environ['PYDATA']+\"/\"+species+'/aceview/'+species+'_transcripts_by_gene.bkdb'\n if os.path.exists(path):\n os.remove(path)\n transcriptsByGeneDB=bsddb.btopen(path,'w')\n transcriptGeneNb=0\n \n #create ace transcript by exon\n path=os.environ['PYDATA']+\"/\"+species+'/aceview/'+species+'_transcripts_by_exon.bkdb'\n if os.path.exists(path):\n os.remove(path)\n transcriptsByExonDB=bsddb.btopen(path,'w')\n transcriptExonNb=0\n \n \n chromosomes=chrDB.keys() \n for chromosome in chromosomes:\n try: \n region=chrDB[chromosome]\n print 'processing chromosome %s (Ensembl region %s)'%(chromosome,region)\n try:\n fileName = 'x1.genes_gff.%s.gff' %(chromosome)\n os.chdir('%s/%s_%s'%(os.environ['ACEDATA'],species,aceVersion.lower()))\n chrFile = open(fileName)\n chrFlag=1\n except:\n fileName = 'AceView.ncbi_37.genes_gff.gff'\n os.chdir('%s/%s_%s'%(os.environ['ACEDATA'],species,aceVersion.lower()))\n chrFile = open(fileName)\n chrFlag=0\n \n #complete list of AceView genes located in chromosomes common with Ensembl data \n geneIDList=[]\n geneStartList=[]\n geneEndList=[]\n geneStrandList=[]\n \n #initiated to empty to mark the start of the process\n geneID=''\n #information recovered on each gene\n exonIDList=[] \n exonStartList=[]\n exonEndList=[]\n exonStrandList=[]\n transcriptLetterList=[]\n read=1 \n lineRank=-1\n while read: \n line = chrFile.readline() \n lineRank+=1\n if not line:\n read=0 \n proceed=1 \n if read: \n items=line.split('\\t')\n if items[0]!=chromosome:\n if chrFlag: \n print 'abnormal line %u for chromosome %s'%(lineRank,chromosome) \n print line\n logFile.write('abnormal line %u for chromosome %s\\n'%(lineRank,chromosome))\n logFile.write('%s\\n'%line)\n proceed=0\n else: \n geneInfo = items[8].split(';') \n if proceed==1: \n #test if the current line belong to the current gene, or if the end of file is reached \n if geneInfo[0].split(' ')[1]!=geneID or read==0:\n #test if it is the first line (start of the process) \n if geneID=='': \n geneID=geneInfo[0].split(' ')[1]\n else: \n #a gene has been processed and information must be stored\n #update list of all AceView genes\n geneIDList.append(geneID) \n geneStartList.append(min(exonStartList))\n geneEndList.append(max(exonEndList)) \n geneStrandList.append(exonStrandList[0])\n \n # process exon information \n [exonIDList,exonStarts, exonEnds, exonStrands, transcriptsByExon, transcriptIDs, transcriptStarts, transcriptEnds,\\\n exonIndexes, exonGroups, intronStarts, intronEnds]=\\\n bintools.process_exons(exonStarts=array(exonStartList),exonEnds=array(exonEndList),\\\n exonStrands=array(exonStrandList),transcriptIDs=array(transcriptLetterList))\n '''SAVE LIST OF TRANSCRIPTS in transcriptsByGeneDB''' \n transcriptsByGeneDB[geneID]=cPickle.dumps(list(transcriptIDs),protocol=-1)\n transcriptGeneNb=transcriptGeneNb+1\n \n \n '''SAVE LIST OF TRANSCRIPTS in transcriptsByExonDB'''\n for i in range(len(exonStarts)): \n transcriptsByExonDB[geneID+'.exon'+str(exonIndexes[i])]=cPickle.dumps(list(transcriptsByExon[i]),protocol=-1) \n transcriptExonNb=transcriptExonNb+1\n \n '''SAVE LIST OF EXONS in exonByGeneDB''' \n exonsByGeneDB[geneID]=cPickle.dumps(ps_class.ExonList(exonIndexes=exonIndexes,groupList=exonGroups,exonStartArray=exonStarts,exonEndArray=exonEnds,\\\n strandArray=exonStrands,intronStartArray=intronStarts,intronEndArray=intronEnds,\\\n transcriptsByExon=transcriptsByExon,transcriptIDs=transcriptIDs,\\\n transcriptStarts=transcriptStarts, transcriptEnds=transcriptEnds),protocol=-1)\n exonGeneNb=exonGeneNb+1 \n '''UPDATE transcriptDB''' \n for i in range(len(transcriptIDs)):\n transcript=None\n transcriptID=geneID+'.'+transcriptIDs[i]+aceVersion \n try: \n transcript=cPickle.loads(transcriptDB[transcriptID])\n except:\n try:\n transcriptID=transcriptID+'-unspliced'\n transcript=cPickle.loads(transcriptDB[transcriptID])\n except:\n logFile.write('transcript %s (chromosome %s) not in %s_transcripts_sequence.bkdb'%(transcriptID,chromosome,species)+'\\n') \n if transcript is not None: \n transcript.start=transcriptStarts[i]\n transcript.end=transcriptEnds[i]\n transcript.strand=exonStrands[0]\n transcriptDB[transcriptID]=cPickle.dumps(transcript,protocol=-1)\n transcriptNb=transcriptNb+1 \n \n #geneID of the current gene\n geneID=geneInfo[0].split(' ')[1]\n \n exonStartList=[]\n exonEndList=[]\n exonStrandList=[]\n transcriptLetterList=[] \n #test if the current line contain information on exons \n if read==1 and items[2]=='exon': \n exonStartList.append(int(items[3]))\n exonEndList.append(int(items[4]))\n if items[6]=='+':\n exonStrandList.append(1)\n else:\n exonStrandList.append(-1) \n transcriptName = geneInfo[2].split(' ')[2].split('.') \n transcriptLetter=transcriptName[len(transcriptName)-1].split(aceVersion) \n transcriptLetterList.append(transcriptLetter[0]) \n '''SAVE LIST OF GENES in regionDB'''\n regionDB[region]=cPickle.dumps(ps_class.GeneList(idList=geneIDList,startArray=array(geneStartList),endArray=array(geneEndList),strandArray=array(geneStrandList)),protocol=-1)\n regionNb=regionNb+1\n except: \n logFile.write('no AceView file %s\\n'%fileName)\n pass \n chrDB.close()\n regionDB.close()\n exonsByGeneDB.close()\n transcriptDB.close()\n transcriptsByGeneDB.close()\n transcriptsByExonDB.close() \n logFile.close()\n t2=time.time()\n log.write('%s\\t%s\\t\\t16\\t%s_genes_by_ensembl_region.bkdb\\tace_%s\\t%u\\t%.2f\\n'%(date.today(),species,species,aceVersion,regionNb,t2-t1))\n log.write('%s\\t%s\\t\\t16\\t%s_exons_by_gene.bkdb\\tace_%s\\t%u\\t%.2f\\n'%(date.today(),species,species,aceVersion,exonGeneNb,t2-t1))\n log.write('%s\\t%s\\t\\t16\\t%s_transcripts_by_gene.bkdb\\tace_%s\\t%u\\t%.2f\\n'%(date.today(),species,species,aceVersion,transcriptGeneNb,t2-t1))\n log.write('%s\\t%s\\t\\t16\\t%s_transcripts_by_exon.bkdb\\tace_%s\\t%u\\t%.2f\\n'%(date.today(),species,species,aceVersion,transcriptExonNb,t2-t1))\n log.write('%s\\t%s\\tupdate\\t16\\t%s_transcript_sequence.bkdb\\tace_%s\\t%u\\t%.2f\\n'%(date.today(),species,species,aceVersion,transcriptNb,t2-t1))", "def gdcs_reporter(metadata, analysistype, reportpath):\n # Initialise list to store all the GDCS genes, and genera in the analysis\n gdcs = list()\n genera = list()\n for sample in metadata:\n sample[analysistype].faidict = dict()\n if sample.general.bestassemblyfile != 'NA':\n if os.path.isdir(sample[analysistype].targetpath):\n # Update the fai dict with all the genes in the analysis, rather than just those with baited hits\n Reports.gdcs_fai(sample=sample,\n analysistype=analysistype)\n sample[analysistype].createreport = True\n # Determine which genera are present in the analysis\n if sample.general.closestrefseqgenus not in genera:\n genera.append(sample.general.closestrefseqgenus)\n try:\n # Add all the GDCS genes to the list\n for gene in sorted(sample[analysistype].faidict):\n if gene not in gdcs:\n gdcs.append(gene)\n except AttributeError:\n sample[analysistype].createreport = False\n else:\n sample[analysistype].createreport = False\n else:\n sample[analysistype].createreport = False\n sample.general.incomplete = True\n header = 'Strain,Genus,Matches,Pass/Fail,{},\\n'.format(','.join(sorted(gdcs)))\n data = str()\n with open(os.path.join(reportpath, '{}.csv'.format(analysistype)), 'w') as report:\n # Sort the samples in the report based on the closest refseq genus e.g. all samples with the same genus\n # will be grouped together in the report\n for genus in genera:\n for sample in metadata:\n if sample.general.closestrefseqgenus == genus:\n if sample[analysistype].createreport:\n sample[analysistype].totaldepth = list()\n # Add the sample to the report if it matches the current genus\n data += '{},{},'.format(sample.name, genus)\n # Initialise a variable to store the number of GDCS genes were matched\n count = 0\n # As I want the count to be in the report before all the gene results, this string will\n # store the specific sample information, and will be added to data once count is known\n specific = str()\n for gene in sorted(gdcs):\n # As there are different genes present in the GDCS databases for each organism of\n # interest, genes that did not match because they're absent in the specific database are\n # indicated using an X\n if gene not in [result for result in sample[analysistype].faidict]:\n specific += 'X,'\n else:\n try:\n specific += '{p_id},'.format(p_id=sample[analysistype].blastresults[gene])\n # Report the necessary information for each gene result\n count += 1\n # If the gene was missing from the results attribute, add a - to the cell\n except (KeyError, AttributeError):\n specific += '-,'\n # Determine whether the sample pass the necessary quality criteria:\n # Pass, all GDCS, mean coverage greater than 20X coverage;\n # ?: Indeterminate value;\n # -: Fail value\n # Allow one missing GDCS to still be considered a pass\n if count >= len(sample[analysistype].faidict) - 1:\n quality = '+'\n else:\n quality = '-'\n # Add the count, mean depth with standard deviation, the pass/fail determination,\n # and the total number of GDCS genes as well as the results\n data += '{hits}/{total},{fail},{gdcs}\\n'\\\n .format(hits=str(count),\n total=len(sample[analysistype].faidict),\n fail=quality,\n gdcs=specific)\n # Any samples with a best assembly of 'NA' are considered incomplete.\n else:\n data += '{},{},,,-\\n'.format(sample.name, sample.general.closestrefseqgenus)\n elif sample.general.closestrefseqgenus == 'NA':\n data += '{}\\n'.format(sample.name)\n # Write the header and data to file\n report.write(header)\n report.write(data)\n # Return the updated metadata object\n return metadata", "def __simulate_generation(self):\n global seq_to_fitness\n\n # 1. calculate fitness value of each chromosome.\n threads = []\n for chromosome in self.chromosomes:\n t = threading.Thread(target=chromosome.calculate_fitness_value())\n t.start()\n threads.append(t)\n\n for thread in threads:\n thread.join()\n\n for chromosome in self.chromosomes:\n key = ''.join(chromosome.genes)\n if key not in seq_to_fitness:\n seq_to_fitness[key] = chromosome.fitness_value\n\n # 2. sort the chromosomes by its fitness value and reverse the list,\n # because the chromosome with the lowest fitness value is the best.\n self.chromosomes.sort(key=lambda c: c.fitness_value)\n self.chromosomes = self.chromosomes[::-1]\n\n # 3. divide the chromosome into two halves and delete the weakest\n # chromosome.\n index_half = len(self.chromosomes) // 2\n lower_half = self.chromosomes[:index_half]\n upper_half = self.chromosomes[index_half:]\n\n # 4. delete four more weak chromosomes.\n del lower_half[0]\n random.shuffle(lower_half)\n\n for i in range(0, 3):\n lower_half.pop()\n\n # 5. crossover: fill the four vacancies in the population with new\n # chromosomes. The genes of the new chromosomes are mixtures of the\n # genes of two randomly chosen strong chromosomes.\n c1 = random.choice(upper_half)\n c2 = random.choice(upper_half)\n new_chromosomes = [\n Chromosome(c1.genes[:self.chromosome_size // 2]\n + c2.genes[self.chromosome_size // 2:],\n self.environment),\n Chromosome(c1.genes[self.chromosome_size // 2:]\n + c2.genes[:self.chromosome_size // 2],\n self.environment),\n Chromosome(c2.genes[:self.chromosome_size // 2]\n + c1.genes[self.chromosome_size // 2:],\n self.environment),\n Chromosome(c2.genes[self.chromosome_size // 2:]\n + c1.genes[:self.chromosome_size // 2],\n self.environment)]\n\n # 6. Get the fittest chromosome of this generation and perform\n # mutations on the remaining chromosomes.\n # The mutation probability for the upper half is 5 percent and\n # the mutation probability for the lower half is 10 percent.\n self.fittest_chromosome = upper_half.pop()\n self.__mutate(lower_half, 10)\n self.__mutate(upper_half, 5)\n\n # 7. Rejoin all chromosomes.\n upper_half.append(self.fittest_chromosome)\n self.chromosomes = lower_half + upper_half + new_chromosomes\n self.generation += 1", "def _cond_gccovgc(signals, x_idx, y_idx, ind_tx, conditional=True):\n d_x, d_y = signals[x_idx, :], signals[y_idx, :]\n n_lags, n_dt = ind_tx.shape\n gc = np.empty(3, dtype=d_x.dtype, order='C')\n # define z past\n z_indices = np.array([k for k in range(signals.shape[0]) if k not in [x_idx, y_idx]])\n d_z = signals[z_indices, :] # other roi selection\n rsh = int(len(z_indices) * (n_lags - 1)) # roi_range = 150-2 = 148; n_lags-1 = 5 --> rsh = 740\n\n x = d_x[ind_tx]\n y = d_y[ind_tx]\n # temporal selection\n x_pres, x_past = x[0], x[1:]\n y_pres, y_past = y[0], y[1:]\n xy_past = np.concatenate((x[1:], y[1:]), axis=0)\n # conditional granger causality case\n if conditional:\n # condition by the past of every other possible sources\n z_past = d_z[..., ind_tx[1:, :]] # (lag_past, dt) selection\n z_past = z_past.reshape(rsh, n_dt)\n # cat with past\n yz_past = np.concatenate((y_past, z_past), axis=0)\n xz_past = np.concatenate((x_past, z_past), axis=0)\n xyz_past = np.concatenate((xy_past, z_past), axis=0)\n else:\n yz_past, xz_past, xyz_past = y_past, x_past, xy_past\n\n # copnorm over the last axis (avoid copnorming several times)\n x_pres = copnorm_nd(x_pres, axis=-1)\n x_pres = np.expand_dims(x_pres, 0)\n x_past = copnorm_nd(x_past, axis=-1)\n y_pres = copnorm_nd(y_pres, axis=-1)\n y_pres = np.expand_dims(y_pres, 0)\n y_past = copnorm_nd(y_past, axis=-1)\n yz_past = copnorm_nd(yz_past, axis=-1)\n xz_past = copnorm_nd(xz_past, axis=-1)\n xyz_past = copnorm_nd(xyz_past, axis=-1)\n\n # -----------------------------------------------------------------\n # Granger Causality measures\n # -----------------------------------------------------------------\n gc[0] = cmi_nd_ggg(y_pres, x_past, yz_past)\n # gc(pairs(:,2) -> pairs(:,1))\n gc[1] = cmi_nd_ggg(x_pres, y_past, xz_past)\n # gc(pairs(:,2) . pairs(:,1))\n gc[2] = cmi_nd_ggg(x_pres, y_pres, xyz_past)\n return gc", "def civic_cancer_genes():\n\n civic_genes_location = os.path.join(data_location, 'gene_catalog', 'civic_gene_summaries.tsv')\n civic_genes_data = pd.read_csv(civic_genes_location, skipinitialspace=True, usecols=['name'], delimiter='\\t')\n civic_genes = list(civic_genes_data['name'])\n\n return civic_genes", "def can_mutate(self, ga, chromosome):\n return len(chromosome.genes) < len(ga._gene_bank)", "def test_tdg_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_non_clifford.tdg_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_non_clifford.tdg_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def get_gc_content(regions, fasta):\r\n\tnuc_count = {\"T\":0, \"t\":0, \"A\":0, \"a\":0, \"G\":1, \"g\":1, \"C\":1, \"c\":1}\r\n\r\n\tgc = 0\r\n\ttotal = 0\r\n\tfasta_obj = pysam.FastaFile(fasta)\r\n\tfor region in regions:\r\n\t\tseq = fasta_obj.fetch(region.chrom, region.start, region.end)\r\n\t\tgc += sum([nuc_count.get(nuc, 0.5) for nuc in seq])\r\n\t\ttotal += region.end - region.start\r\n\tfasta_obj.close()\r\n\tgc_content = gc / float(total)\r\n\r\n\treturn(gc_content)", "def test_check_cds_3(self):\n self.eval_flags[\"check_gene\"] = False\n import_genome.check_cds(self.cds1, self.eval_flags)\n self.assertEqual(len(self.cds1.evaluations), 11)", "def get_coverage_df(df_tbot_raw, df_coverage_nodes, conf_threshold):\n # Convert confidence to numeric type\n # df_tbot_raw['response.top_intent_confidence'] = pd.to_numeric(df_Tbot_raw['response.top_intent_confidence'])\n\n # Create a 'covered' flag and 'Not covered cause' in dataframe\n df_tbot_raw['Covered'] = True\n df_tbot_raw['Not Covered cause'] = None\n\n # Filter all the valid dialog node ids for non-coverage\n df_coverage_valid = df_coverage_nodes[df_coverage_nodes['Valid']] # ['dialog_node'].tolist()\n\n # (1) Mark all messages that hit any non-coverage node including but not limited to 'anything_else' as 'Not covered'\n # and update the 'Not Covered cause' column\n for node in df_coverage_valid['Node ID'].tolist():\n cause = \"'{}' node\".format(df_coverage_valid.loc[df_coverage_valid['Node ID'] == node, 'Condition'].values[0])\n df_tbot_raw.loc[\n (df_tbot_raw['response.output.nodes_visited_s'].apply(lambda x: bool(intersection(x, node)))), [\n 'Covered', 'Not Covered cause']] = [False, cause]\n\n # (2) Mark all messages that did not meet confidence threshold set as 'Not covered' and update the 'Not Covered\n # cause' column\n df_tbot_raw.loc[df_tbot_raw['response.top_intent_confidence'] < conf_threshold, ['Covered']] = False\n df_tbot_raw.loc[df_tbot_raw['response.top_intent_confidence'] < conf_threshold, [\n 'Not Covered cause']] = 'Classified below confidence threshold'\n return df_tbot_raw", "def pseudo_seurat(adata, arg_minpct, arg_mindiffpct, arg_logfcdiff):\n # define cells\n cluster_cells_ind = which_ind(adata.obs[\"idents\"] == \"1\")\n other_cells_ind = which_ind(adata.obs[\"idents\"] == \"0\")\n\n # compute perecentage expressed\n # from normnalised but not scaled data\n # remember cells are rows and genes are columns\n\n # note: I don't know why norm_counts[cluster_cell_ind:, col_ind] deosn\"t work, but it doesn't\n cluster_pct = (adata.X[cluster_cells_ind, :] > 0).sum(axis=0) / len(cluster_cells_ind)\n other_pct = (adata.X[other_cells_ind, :] > 0).sum(axis=0) / len(other_cells_ind)\n\n pcts = pd.DataFrame(np.vstack((cluster_pct, other_pct)).transpose())\n max_pct = pcts.max(axis=1)\n min_pct = pcts.min(axis=1)\n diff_pct = max_pct - min_pct\n take_diff_pct = diff_pct > arg_mindiffpct\n\n # remove genes that are not expressed higher than 0.1 in one of the groups\n take_min_pct = max_pct > arg_minpct\n\n\n # KEEP IN CASE NP.ARRAY METHOD USES TOO MUCH MEMORY\n # import time\n # this has the potential to be very slow. Transposeing it speeds it up a bit.\n # I need to understand sparse matrices better to make it work\n # start = time.time()\n # nct = adata.X.T[:,cluster_cells_ind]\n # cluster_mean0 = [exp_mean_sparse(nct[x,:]) for x in range(0,nct.shape[0])]\n # end = time.time()\n # print(end - start)\n #\n # start = time.time()\n # nct = adata.X.T[:, other_cells_ind]\n # other_mean0 = [exp_mean_sparse(nct[x,:]) for x in range(0, nct.shape[0])]\n # end = time.time()\n # print(end - start)\n\n # extract the counts for cluster cells and calculate exp means on each row\n nct = adata.X.T[:, cluster_cells_ind]\n cluster_mean = np.apply_along_axis(exp_mean_dense, 1, nct.todense())\n\n # likewise for non-cluster cells\n nct = adata.X.T[:, other_cells_ind]\n other_mean = np.apply_along_axis(exp_mean_dense, 1, nct.todense())\n diff_mean = abs(cluster_mean - other_mean)\n\n # remove genes with less than threshold difference\n take_thresh = diff_mean > arg_logfcdiff\n # take = if a cell passes all the tests then it is to be kept.\n take = [a and b and c for a, b, c in zip(take_thresh, take_min_pct, take_diff_pct)]\n print(\"saving universe for fisher test\")\n stats_df = pd.DataFrame(np.vstack((adata.var_names, cluster_mean, other_mean, diff_mean,\n cluster_pct, other_pct, max_pct, min_pct, diff_pct, take)).transpose(),\n columns=[\"gene\", \"cluster_mean\", \"other_mean\", \"diff_mean\",\n \"cluster_pct\", \"other_pct\",\n \"max_pct\", \"min_pct\", \"diff_pct\", \"background\"])\n return stats_df", "def insert_gene_expression( self, db, row2id, col2id, ratios, ratios_standardized ):\n\t\texp_data = []\n\t\tcounter = 0\n\t\tfor i in ratios.index.values:\n\t\t\tif counter%200 == 0:\n\t\t\t\tprint \"%s percent done\" % round( ( float(counter)/ratios.shape[0] )*100,1 )\n\t\t\tfor j in ratios.columns.values:\n\t\t\t\texp_data.append(\n\t\t\t\t\t{\n\t\t\t\t \t\"row_id\": row2id.loc[i].row_id,\n\t\t\t\t \t\"col_id\": col2id.loc[j].col_id,\n\t\t\t \t\t\"raw_expression\": ratios.loc[i,j],\n\t\t\t \t\t\"standardized_expression\": ratios_standardized.loc[i,j]\n\t\t\t\t \t} )\n\t\t\tcounter = counter + 1\n\n\t\t# write to mongoDB collection \n\t\tgene_expression_collection = db.gene_expression\n\t\t\n\t\t# Check whether documents are already present in the collection before insertion\n\t\tif gene_expression_collection.count() > 0:\n\t\t\td_f = filter( None, [ self.check4existence( gene_expression_collection, i ) for i in exp_data ] )\n\t\telse:\n\t\t\td_f = exp_data \n\n\t\tprint \"%s new records to write\" % len( d_f )\n\n\t\tif len(d_f) > 0:\n\t\t\tgene_expression_collection.insert( d_f )\n\n\t\treturn gene_expression_collection", "def _need_genes(config):\n need_genes = []\n for t in ['gene', 'gene1', 'gene2']:\n if (t in config.keys()) and config[t]:\n need_genes.append(config[t])\n if ('adj_gene' in config.keys()) and config['adj_gene']:\n if config['adj_gene'] == 'CTL':\n need_genes.extend(['CD8A', 'CD8B', 'PRF1', 'GZMA', 'GZMB'])\n else:\n need_genes.append(config['adj_gene'])\n if ('protein_gene' in config.keys()) and config['protein_gene']:\n need_genes.extend(config['protein_gene'])\n return(need_genes)", "def gc_var(sequence, as_overlap=False, k=20):\n # calculates the percent of gc content\n gc = get_gc_content(sequence) * 100\n # get the gc content in the window space as an array\n gc_i = np.array(gc_content_sequence_window(sequence, as_overlap, k=k))\n # get the len of the gc content in the window space\n len_gc_i = np.shape(gc_i)[0]\n # check the difference of each point \n dif = gc_i - gc\n return np.log((1 / len_gc_i) * sum(abs(dif)))", "def calc_GC(filepath):\n liste=['small.exon.piRNA_2.fa', 'small.exon.piRNA_1.fa', 'small.exon.piRNA_3.fa']\n \n length=list(range(0,34))\n d={}\n for i in length:\n d[i]={'A':0, 'G':0, 'T':0, 'C':0}\n for i in liste:\n with open(filepath+'/'+i, 'r') as f:\n for line in f:\n #fasta header starts with >\n if line.startswith('>'):\n pass\n else:\n line_l=list(line)\n for el in range(len(line_l)):\n if line_l[el]=='A':\n d[el]['A']+=1\n elif line_l[el]=='T':\n d[el]['T']+=1\n elif line_l[el]== 'G':\n d[el]['G']+=1\n elif line_l[el]== 'C':\n d[el]['C']+=1\n\n df=pd.DataFrame.from_dict(d)\n df=df.transpose()\n df.index = np.arange(1, len(df) + 1)\n \n\n df['A [%]']=df['A']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['G [%]']=df['G']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['T [%]']=df['T']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['C [%]']=df['C']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100", "def evaluate(genome):\n # base fitness\n fit = 1.0\n # promote 1001 starting motif\n matches = 0\n if genome.sequence_A[0] == 1:\n matches += 1\n if genome.sequence_A[1] == 0:\n matches += 1\n if genome.sequence_A[2] == 0:\n matches += 1\n if genome.sequence_A[3] == 1:\n matches += 1\n fit += matches * 0.1\n # finish\n return fit", "def detect_events(G_, G_cache_, theta_, avg_, std_, cc_lengths):\n small_ccs = []\n event_exists = 0\n threshold = theta_ * std_ + avg_\n\n H = nx.compose(G_, G_cache_)\n ccs = nx.connected_component_subgraphs(H)\n for cc in ccs:\n cc_nodes = cc.nodes()\n cc_len = len(cc_nodes)\n if cc_len < avg_:\n small_ccs += cc_nodes\n elif cc_len >= threshold:\n event_exists = 1\n\n H.remove_nodes_from(small_ccs)\n\n return (np.mean(cc_lengths), np.std(cc_lengths), event_exists, H)", "def detect_doublets(adata,marker_genes=[\"GCG\",\"INS\",\"SST\",\"PPY\",\"COL3A1\",\"CFTR\",\"PRSS2\",\"GHRL\"],inplace=True):\n counts=np.zeros((1,adata.shape[0]))\n for gene in marker_genes:\n gm = mixture.GaussianMixture(n_components=2, covariance_type='full',reg_covar=0.3)\n expressions = (adata[:,gene].X).reshape(-1,1)\n gm.fit(expressions)\n predictions = gm.predict(expressions)\n if gm.predict([[0]]):\n predictions = 1 - predictions\n counts= counts + predictions\n if inplace:\n adata._inplace_subset_obs((counts <=1)[0])\n else: \n #In that case, the doublets won't be removed, but the \"doublet score\" will be added to the anndata. This is useful for testing that this filter correctly identifies the doublets.\n adata.obs[\"doublets\"] = counts[0]", "def test_check_cds_17(self):\n self.cds1.gene = \"\"\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\", \"warning\")\n self.assertEqual(count, 3)", "def geneExonicRegions(self, df):\n scaffold = df.iloc[0].scaffold\n strand = df.iloc[0].strand\n gene_type = df.iloc[0].gene_type\n gene_id = df.iloc[0].gene_id\n gene_name = df.iloc[0].gene_name\n start = df.start.min()\n end = df.end.max()\n bp = [False] * (end - start + 1)\n for i in range(df.shape[0]):\n s = df.iloc[i]['start'] - start\n e = df.iloc[i]['end'] - start + 1\n bp[s:e] = [True] * (e - s)\n regions = list(range(start, end + 1))\n groups = []\n\n for i, j in groupby(bp):\n groups.append((i, len(list(j))))\n e_start = 0\n\n for i in groups:\n e_end = e_start + i[1]\n if i[0]:\n record = Record(scaffold=scaffold, start=regions[e_start],\n end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,\n gene_name=gene_name, strand=strand)\n yield record\n e_start += i[1]", "def TranscriptionFind(genome, gene_start_stop_dict,\n gene_first_exon_dict, gene_scaff_dict,\n gene_direction, gene_set, gene_gff_line,\n bam, stand_dev_threshold, walk, min_value,\n interation_value, out_file, logger, TITLE,\n keep_gene_depth,\n default_cutoff,\n test_mode):\n logger.info(\"RESULTS in outfile: %s\", out_file)\n genome_index = index_genome_file(genome, logger)\n depth_set = set([])\n # open outfile:\n out_file_gff = out_file.split(\".\")[0] + \"based_on_min_value.gff\"\n out_file_gff2 = out_file.split(\".\")[0] + \"based_on_SD_threshold.gff\"\n logger.info(\"gff info will be in %s\", out_file_gff)\n gff_out = open(out_file_gff, \"w\")\n gff_sd_out = open(out_file_gff2, \"w\")\n gene_failed_count = 0\n gene_results_printed_count = 0\n fall_off_contig_count = 0\n default_cutoff = float(default_cutoff)\n logger.info(\"any problem and default cutoff is used. Which is %.1f\",\n default_cutoff)\n\n with open(out_file, 'w') as file_out:\n file_out.write(TITLE)\n for gene in gene_set:\n gene = gene.rstrip()\n start_stop = gene_start_stop_dict[gene]\n start, stop = start_stop.split(\"\\t\")\n start =int(start)\n stop = int(stop)\n scaffold = gene_scaff_dict[gene]\n scaffold = scaffold.rstrip()\n direction = gene_direction[gene]\n if gene in gene_first_exon_dict:\n exon_start_exon_stop = gene_first_exon_dict[gene]\n exon_start, exon_stop = exon_start_exon_stop.split(\"\\t\")\n exon_start =int(exon_start)\n exon_stop = int(exon_stop)\n else:\n exon_start =int(start)\n exon_stop = int(stop)\n # call samtools to get the depth per posititon for\n # the transcript of interest\n depth_filename = os.path.join(\"temp_reads_per_base\",\n gene + \"_depth.tmp\")\n #exon_depth_file = os.path.join(\"temp_reads_per_base\",\n #gene + \"_exon_depth.tmp\")\n scaffold_depth_file = os.path.join(\"temp_reads_per_base\",\n scaffold + \"_depth.tmp\")\n scaffold_start_stop = \"%s:%s-%s\" %(scaffold, start, stop)\n # call the func to run\n if scaffold_depth_file not in depth_set:\n depth_set.add(scaffold_depth_file)\n # print(\"not seen %s\" % scaffold)\n pipe = run_samtools_depth(scaffold, bam_file,\n scaffold_depth_file, logger)\n # call the depth for the gene specifically\n pipe = run_samtools_depth(scaffold_start_stop, bam_file,\n depth_filename, logger)\n if \"Y\" not in keep_gene_depth.upper():\n # can keep the gene depth file, or not\n os.remove(depth_filename)\n\n # assign zeros to all positions of the transcript,\n # as samtool does no report zeros\n seq_record = genome_index[scaffold]\n if \"Y\" in test_mode.upper():\n outstr = \" \".join([\"scaff = %s\" % scaffold,\n \"len scaffold = %d \" % (len(seq_record.seq)),\n \"gene = %s \" % gene,\n \"depth scaff out = \",\n scaffold_depth_file])\n logger.info(outstr)\n\n all_coverage = [0] * len(seq_record.seq)\n if \"Y\" in test_mode.upper():\n outstr = \"length all_cov = %d\" % len(all_coverage)\n logger.info(outstr)\n all_coverage = fill_in_zero_cov(all_coverage,\n scaffold_depth_file)\n # print(\"seq = \", len(seq_record.seq))\n # print(exon_all_coverage)\n # get the mean and std reads per base for exon 1\n exon_mean, exon_stdDev = avg_std_dev(all_coverage\n [exon_start:exon_stop])\n # get the mean and std reads per base for exon 1\n gene_mean, gene_stdDev = avg_std_dev(all_coverage\n [start:stop])\n if exon_mean == 0:\n warn = \"No RNAseq expression for gene exon 1 %s\" % gene\n logger.warning(\"%s: gene failed\", warn)\n gene_failed_count = gene_failed_count + 1\n continue\n out_str = \"\\t\".join([gene + \":\",\n \"Cov min: %i\" % min(all_coverage),\n \"max: %i\" % max(all_coverage),\n \"gene mean %0.2f:\" % gene_mean,\n \"gene std %0.2f:\" % gene_stdDev,\n \"Sliced section:\",\n \"exon mean %0.2f\" % exon_mean,\n \"exon std: %0.2f\" % exon_stdDev,\n direction])\n # logger.info(out_str)\n cut_off = exon_mean - (int(stand_dev_threshold) * exon_stdDev)\n position_mean_cov = mean(all_coverage[exon_start:exon_stop])\n # walk in 3 bases to find the position where coverage sig drops\n current_end = stop\n current_start = start\n position_mean_cov = 10000000000000\n if cut_off < default_cutoff:\n logger.warning(\"%s gene cut off set to %.1f\", gene, default_cutoff)\n cut_off = default_cutoff\n write = \"yes\"\n while position_mean_cov >= cut_off:\n current_start, current_end = walk_away_from_end(current_start,\n current_end,\n direction, walk)\n current_start, current_end = add_one_direct_aware(current_start,\n current_end,\n interation_value,\n direction)\n if current_start < 1:\n logger.warning(\"%s has fallen off start scaffold %s\",\n gene,\n scaffold)\n position_mean_cov = 0\n write = \"no\"\n fall_off_contig_count = fall_off_contig_count + 1\n break\n if current_end >= len(seq_record.seq):\n logger.warning(\"%s has fallen off end scaffold %s\",\n gene,\n scaffold)\n position_mean_cov = 0\n write = \"no\"\n fall_off_contig_count = fall_off_contig_count + 1\n break\n position_mean_cov = mean(all_coverage\n [current_start:current_end])\n if position_mean_cov == False:\n position_mean_cov = 0\n #print(\"setting position_mean_cov to: \", position_mean_cov)\n # run the while loop again to find the position where the expression\n # is less than the option min value\n current_end1 = stop\n current_start1 = start\n position_mean_cov = 10000000000\n write_min_value = \"ok\"\n while position_mean_cov >= int(min_value):\n current_start1, current_end1 = walk_away_from_end(current_start1,\n current_end1,\n direction, walk)\n current_start1, current_end1 = add_one_direct_aware(current_start1,\n current_end1,\n interation_value,\n direction)\n if current_start < 1:\n logger.warning(\"%s has fallen off start scaffold %s\", gene, scaffold)\n position_mean_cov = 0\n write_min_value = \"not_ok\"\n break\n if current_end >= len(seq_record.seq):\n logger.warning(\"%s has fallen off end scaffold %s\", gene, scaffold)\n position_mean_cov = 0\n write_min_value = \"not_ok\"\n break\n # print(\"bases = \", all_coverage[current_start1:current_end1], \"\\n\")\n position_mean_cov = mean(all_coverage[current_start1:current_end1])\n if position_mean_cov == False:\n position_mean_cov = 0\n break\n\n out_str = \"\\t\".join([gene,\n str(current_start),\n str(current_end),\n str(seq_record.seq[current_start:current_end]),\n str(current_start1),\n str(current_end1),\n str(seq_record.seq[current_start1:current_end1]),\n \"%s\" % start,\n \"%s\" % stop,\n \"%0.2f\" % gene_mean,\n \"%0.2f\" % gene_stdDev,\n \"%0.2f\" % exon_mean,\n \"%0.2f\" % exon_stdDev,\n direction,\n \"\\n\"])\n if current_start1 > 0 and current_end1 > 0 and current_start > 0 and current_end > 0:\n if write == \"yes\" and write_min_value == \"ok\":\n # print(\"writing: \", out_str)\n file_out.write(out_str)\n GENE_gff = gene_gff_line[gene]\n # for the min value approach\n\n new_gff_line1, UTR_start, UTR_stop = create_gff_line(GENE_gff, gene,\n current_start1,\n current_end1)\n Min_val_Hits_geneic_or_not = iterate_coordinate_dict(gene_gff_line,\n gene,\n scaffold,\n UTR_start,\n UTR_stop,\n logger)\n if Min_val_Hits_geneic_or_not == \"HITS genic region\":\n gene_failed_count = gene_failed_count + 1\n continue\n if Min_val_Hits_geneic_or_not == \"OK\":\n gff_out.write(new_gff_line1)\n # for the standard dev approach\n new2_gff_line, UTR_start, UTR_stop = create_gff_line(GENE_gff, gene,\n current_start,\n current_end)\n # Check to see if this hits a gene or not\n sd_geneic_or_not = iterate_coordinate_dict(gene_gff_line,\n gene,\n scaffold,\n UTR_start,\n UTR_stop,\n logger)\n if sd_geneic_or_not == \"HITS genic region\":\n gene_failed_count = gene_failed_count + 1\n continue\n if sd_geneic_or_not == \"OK\":\n gff_sd_out.write(new2_gff_line)\n gene_results_printed_count = gene_results_printed_count + 1\n else:\n gene_failed_count = gene_failed_count + 1\n \n logger.info(\"deleting scaffold depth files\")\n for depthfile in depth_set:\n os.remove(depthfile) \n logger.info(\"main function finished. %d gene failed\", gene_failed_count)\n logger.info(\"Results generated for . %d gene\", gene_results_printed_count)\n logger.info(\"fall_off_contig_count = %d\", fall_off_contig_count)\n gff_out.close()", "def copy_test(df):\n # :TODO change to groups\n dfg = ((df['var_type'] == 'copy number gain') |\\\n ( df['var_type'] == 'copy number loss') |\\\n ( df['var_type'] == 'copy number variation'))\n # append together\n cnv = df.ix[dfg, :]\n return(cnv)", "def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict", "def gen_dist(genes):\n\n # First generate an NxNxB matrix that has False where\n # i and j individuals have the same kth gene and True\n # otherwise (XOR operation). Then sum along\n # the genome axis to get distance\n return np.sum(genes[:,None,:] ^ genes, axis=-1)", "def findgene(fname, dbpaths=dbpaths):\n scaf = []\n gbeg = []\n gend = []\n gfor = []\n gsta = []\n gdif = []\n cuffgenes = {}\n\n fobj = open(fname)\n for line in fobj:\n col = line.split()\n scaf.append( re.search('[sCcafold]*[0-9]+', col[3]).group() )\n gbeg.append( int(re.search(':(.*)-', col[3]).groups()[0]) )\n gend.append( int(re.search('-(.*)', col[3]).groups()[0]) )\n gfor.append(float(col[7]))\n gsta.append(float(col[8]))\n gdif.append(float(col[9]))\n\n fobj.close()\n print \"Significant transcripts read\"\n\n\n for result in range(len(scaf)):\n cur_scaf = scaf[result]\n cur_gbeg = gbeg[result]\n cur_gend = gend[result]\n cur_gfor = gfor[result]\n cur_gsta = gsta[result]\n cur_gdif = gdif[result]\n fobj = open(dbpaths['gff'])\n for line in fobj:\n col = line.split()\n if col[2] == \"mRNA\":\n if col[0] == cur_scaf:\n if float(col[3]) <= cur_gend and float(col[4]) >= cur_gbeg:\n try:\n cuffgenes[(cur_scaf, cur_gbeg)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gfor, cur_gsta, cur_gdif)\n except AttributeError:\n print col[8]\n fobj.close()\n\n return cuffgenes", "def get_entropy(self, documents):\n entropy = 0\n for c in self.classes:\n count = 0\n for document in documents:\n if document.c == c:\n count += 1\n if count != 0 and len(documents) != 0:\n ratio = count / len(documents)\n entropy -= ratio * log(ratio, 2)\n return entropy", "def createGTUnique(classesDict, length, gtList):\n\n y_GT = np.empty([length])\n y_GT.fill(-1) #-1 corresponds to no label given\n\n classesNotTrained = []\n for i in range(len(gtList)):\n \"\"\" Fill array from start to end of each ground truth label with the correct label: \"\"\"\n if gtList[i][2] == \"start\":\n tmpContext = gtList[i][1]\n start = getIndex(float(gtList[i][0]))\n\n # Find the end time of this context:\n for j in range(i,len(gtList)):\n if ((gtList[j][1] == tmpContext) and (gtList[j][2] == \"end\")):\n\n end = getIndex(float(gtList[j][0]))\n if end >= y_GT.shape[0]:\n end = y_GT.shape[0] - 1\n\n \"\"\" Fill ground truth array, and check if our classifier was \n trained with all labels of the test file, if not give warning: \"\"\"\n\n if gtList[i][1] not in classesDict.keys():\n classesNotTrained.append(gtList[i][1])\n y_GT[start:end+1].fill(-1)\n \n else:\n y_GT[start:end+1].fill(classesDict[tmpContext])\n \n break\n \n if classesNotTrained:\n for el in set(classesNotTrained):\n print(\"The classifier wasn't trained with class '\" + \n el + \"'. It will not be considered for testing.\")\n return y_GT" ]
[ "0.62168896", "0.5950708", "0.5604085", "0.5591386", "0.5474958", "0.54715043", "0.5465627", "0.5456833", "0.535208", "0.5323605", "0.5284358", "0.52755475", "0.5262531", "0.52539337", "0.52331626", "0.520046", "0.5148381", "0.507968", "0.5076866", "0.5065854", "0.5053424", "0.5049818", "0.5047182", "0.50386256", "0.50204575", "0.50156444", "0.50059795", "0.49790072", "0.49786723", "0.49635655", "0.49555042", "0.49510098", "0.49488205", "0.49312502", "0.49081844", "0.4900791", "0.4900714", "0.4898717", "0.48948404", "0.48810816", "0.48742488", "0.48719248", "0.48377094", "0.48254988", "0.48232555", "0.4815878", "0.48080426", "0.48080426", "0.4801781", "0.47976318", "0.47960123", "0.4783609", "0.47770247", "0.47767162", "0.47729695", "0.4771632", "0.47526038", "0.47474316", "0.47329482", "0.4730522", "0.47136164", "0.47095665", "0.4709302", "0.47026724", "0.46941644", "0.4691554", "0.46913156", "0.46893132", "0.4686117", "0.46785998", "0.46779355", "0.46762183", "0.46729577", "0.46720266", "0.46700078", "0.46681562", "0.4655078", "0.46540034", "0.4653928", "0.46446028", "0.46381828", "0.46244135", "0.4622292", "0.46218112", "0.46214887", "0.46160185", "0.46126375", "0.46093655", "0.46068305", "0.46063733", "0.46048236", "0.46037102", "0.460172", "0.4601401", "0.45993316", "0.45983243", "0.45958322", "0.45884153", "0.45872262", "0.45861" ]
0.6928965
0
Subsets insertions for given gene windows.
def _subset_to_windows( insertions, # type: List[Insertion] gene_windows # type: Dict[str, Tuple[str, int, int]] ): # type: (...) -> List[Insertion] # Create lookup trees. trees = { chrom: IntervalTree.from_tuples((i[1:]) for i in chrom_int) for chrom, chrom_int in itertools.groupby( sorted(gene_windows.values()), operator.itemgetter(0)) } # Determine which insertions overlap tree intervals and # correspond to genes with known gene window. def _in_windows(ins, trees): try: return trees[ins.seqname].overlaps(ins.position) except KeyError: return False return [ ins for ins in insertions if ins.metadata['gene_id'] in gene_windows and _in_windows(ins, trees) ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_windows(sliding_windows_file, genes_file, output_file):\n\n\t# Read sliding windows file and create a list in the form\n\t# genes = [('gene1', 1000, 2000), ('gene2', 4000, 45000)]\n\tgenes = []\t\t# this could be a dictionary but I prefer not\n\tfor line in genes_file:\n\t\tline = line.strip()\n\n\t\tif line and not line.startswith('#'):\t\t# if line is not empty and not a comment\n#\t\tif line and re.match('\\d+', line):\n\t\t\tlogging.debug((\"line: %s\" %line))\n\t\t\tfields = line.split()\t\t# it is better to use the default splitting algorithm here.\n\t\t\t\t\t\t\t\t\t\t# read help(''.split)\t\n\n\t\t\tgene_name = fields[0]\n\t\t\tlogging.debug((\"fields: %s\" %fields))\n\t\t\tstart = int(fields[2])\n\t\t\tend = int(fields[3].strip())\t\t# remove \\n\\r, like chomp\n\t\t\tgenes.append((gene_name, start, end))\n\t\t\t\n#\tlogging.debug((\"genes :\", genes))\t\t# print the contents of genes, if level=loggin.DEBUG\n\n\t# read sliding windows file, and select windows that fall in genes\n\toutput = '#gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score\\n'\n\toutputlineskeleton = \"%s\\t%d\\t%d\\t%d\\t%d\\t%d\\t%s\\t%s\\t%s\\n\"\t# %(gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\n\tfor line in sliding_windows_file:\n\t\tline = line.strip()\t\t# remove trailing characters (like chomp)\n\t\tif line and not line.startswith('#'):\n\t\t\twindow_fields = line.split()\n\n#\t\t\tlogging.debug(window_fields)\n\t\t\twindow_start = int(window_fields[0])\n\t\t\twindow_middle = int(window_fields[2])\n\t\t\twindow_end = int(window_fields[1])\n#\t\t\tgene = window_fields[3]\n\t\t\tpopulation = window_fields[4]\n\t\t\tnumber = window_fields[5]\n\t\t\tscore = window_fields[6]\n\n\t\t\tfor gene in genes:\n\t\t\t\tgene_start = int(gene[1])\n\t\t\t\tgene_end = int(gene[2])\n\t\t\t\tgene_name = gene[0]\n\t\t\t\t# if window_start is comprised between gene_end and gene_start\n\t\t\t\tif gene_end > window_start >= gene_start:\n\t\t\t\t\tlogging.debug(\"This window starts inside gene %s (%s, %s)\" %(gene[0], gene_start, gene_end))\n\t\t\t\t\tlogging.debug(line)\n\t\t\t\t\toutput += outputlineskeleton % (gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\t\t\t\telif gene_end >= window_end > gene_start:\n\t\t\t\t\tlogging.debug(\"This window ends inside gene %s (%s, %s)\" %(gene[0], gene_start, gene_end))\n\t\t\t\t\tlogging.debug(line)\n\t\t\t\t\toutput += outputlineskeleton % (gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\t\n\tlogging.debug(output)\n\toutput_file.write(output)\n\toutput_file.seek(0)\n\treturn output_file", "def select_windows(start, stop, num_windows,\n window_width=1, window_units=\"D\",\n sampling=1, sampling_units=\"T\",\n no_overlaps=True, verbose=True):\n\n # Create all sample candidates\n dt_range = pd.date_range(start, stop-pd.Timedelta(window_width),\n freq=\"%i%s\" % (sampling, sampling_units))\n\n # Sample candidate windows\n selected_windows = np.random.choice(dt_range, num_windows, replace=False)\n selected_windows = pd.DataFrame(selected_windows, columns=[\"start\"])\n\n # Calculate window end\n end_delta = (pd.Timedelta(window_width, unit=window_units)\n - pd.Timedelta(sampling,\n unit=\"m\" if sampling_units==\"T\" else sampling_units))\n selected_windows[\"end\"] = (selected_windows[\"start\"] + end_delta)\n\n # Filter overlaps\n if not no_overlaps:\n return selected_windows\n else:\n selected_windows = filter_overlaps(selected_windows,\n pd.Timedelta(window_width,\n unit=window_units))\n\n while selected_windows.shape[0]<num_windows:\n if verbose:\n print(\"Got %i windows...\" % selected_windows.shape[0])\n\n selected_windows = pd.concat([selected_windows,\n select_windows(start, stop, num_windows,\n window_width, window_units,\n sampling, sampling_units,\n no_overlaps=False)],\n ignore_index=True)\n selected_windows = filter_overlaps(selected_windows,\n pd.Timedelta(window_width,\n unit=window_units))\n return selected_windows.iloc[:num_windows]", "def create_subsets(self, start_ids):\n subsets = list()\n df = self.all_df.copy()\n for sid in start_ids:\n df2 = df.loc[sid:, :]\n subsets.append(df.drop(df2.index, axis=0))\n df = df2.copy()\n subsets.append(df)\n return subsets", "def main():\n\tsliding_windows_file_path = ''\n\tgenes_file_path = ''\n\toutput_file_path = ''\n\n\t# Read arguments and parameters\n\ttry:\n\t\topts, args = getopt.getopt(sys.argv[1:], \"g:w:o:ht\", [\"genes=\", \"window=\", \"output=\", \"help\", \"test\"])\n\n\texcept getopt.GetoptError, err:\n\t\tusage()\n\t\tsys.exit(2)\n\t\n\tif opts == []:\n\t\tusage()\n\t\tsys.exit()\n\n\tfor opt, arg in opts:\n\t\tif opt in ('-h', '--help'):\n\t\t\tusage()\n\t\t\tsys.exit()\n\t\telif opt in ('--genes', '-g'):\n\t\t\tgenes_file_path = arg\n\t\telif opt in ('--window', '-w'):\n\t\t\tsliding_windows_file_path = arg\n\t\telif opt in ('--output', '-o'):\n\t\t\toutput_file_path = arg\n\t\telif opt in ('--test', '-t'):\n\t\t\t_test()\n\t\t\tsys.exit()\n\n\t# default values\n\tif not sliding_windows_file_path:\n\t\tprint \"using default parameters windows file!\"\n\t\tsliding_windows_file_path = '../data/Resultats_lower_daf.txt'\n\telif not genes_file_path:\n\t\tprint \"using default genes file!\"\n\t\tgenes_file_path = '../data/Genes.txt'\n\telif not output_file_path:\n\t\tprint \"using default output file!\"\n\t\toutput_file_path = '../results/filtered_windows.txt'\n\n\tsliding_windows_file = file(sliding_windows_file_path, 'r')\n\tgenes_file = file(genes_file_path, 'r')\n\toutput_file = file(output_file_path, 'w')\n\n\tfilter_windows(sliding_windows_file, genes_file, output_file)", "def genChunkTestSets(data, nSets, ws, gapSize, dirName=\"test_data/\", ofCut=0.9):\n # Start times for windows with at least ofCut of data observed\n tOFCut = np.where(windowObsFrac(data, ws) > ofCut)[0]\n\n # Choose times for test intervals\n np.random.seed(np.random.randint(0, 100))\n sampleTs = np.random.choice(tOFCut, size=nSets, replace=False)\n\n for ti in sampleTs:\n # Randomly select a sensor\n sensor = np.random.randint(0, data.shape[1])\n # Remove some data to use for testing\n _, removedTimes = removeChunk(data, ti, ws, sensor, gapSize)\n\n # Save data in csvs\n np.savetxt(dirName + \"/ti=%i_tf=%i_sensor=%i.csv\"%(ti, ti+ws, sensor), removedTimes, \\\n delimiter=\" \", fmt=\"%i\")", "def create_subset_list(self):\n\n row = 0\n for time_step in self.min_increments:\n subset = SubsetClass(time_step=time_step, query_df=self.query_df, model_df=self.model_df, row=row)\n self.subset_list.append(subset)\n row += 1", "def subsets(self):\n \n # note subsets have an unusual encoding\n query = \"\"\"\n prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>\n SELECT DISTINCT ?s WHERE {{\n GRAPH <{g}> {{\n ?c oboInOwl:inSubset ?s \n }}\n }}\n \"\"\".format(g=self.graph_name)\n bindings = run_sparql(query)\n return [r['s']['value'] for r in bindings]", "def test_ctgs(\n insertions, # type: List[Insertion]\n reference, # type: Reference\n gene_ids=None, # type: Set[str]\n chromosomes=None, # type: Set[str]\n pattern=None, # type: str\n per_sample=True, # type: bool\n window=None #type: Tuple[int, int]\n):\n\n # Default to shared chromosome sequences (typically drops some\n # of the more esoteric extra scaffold/patch sequences).\n if chromosomes is None:\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n reference_gtf = GtfIterator(reference.indexed_gtf_path)\n\n chromosomes = list(\n set(reference_seq.keys()) & set(reference_gtf.contigs))\n\n if len(chromosomes) == 0:\n ValueError('No chromosomes are shared between the reference '\n 'sequence and reference gtf files')\n\n if len(chromosomes) == 0:\n raise ValueError('At least one chromosome must be given')\n\n # Determine gene windows using GTF.\n logging.info('Generating gene windows')\n gene_windows = _build_gene_windows(\n reference.indexed_gtf_path, window=window, chromosomes=chromosomes)\n\n # Subset insertions to gene intervals.\n insertions = _subset_to_windows(insertions, gene_windows)\n\n if gene_ids is None:\n gene_ids = set(ins.metadata['gene_id'] for ins in insertions)\n\n # Collapse insertions per gene/sample (recommended).\n # Corrects for hopping/multiple detection issues.\n if per_sample:\n logging.info('Collapsing insertions')\n insertions = list(_collapse_per_sample(insertions))\n\n # Calculate total number of pattern occurrences within intervals.\n logging.info('Counting pattern occurrences')\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n\n total = count_total(\n reference_seq, pattern=pattern, intervals=gene_windows.values())\n\n # Calculate p-values for each gene.\n logging.info('Calculating significance for genes')\n insertion_trees = GenomicIntervalTree.from_objects_position(\n insertions, chrom_attr='seqname')\n\n p_values = {\n gene_id: test_region(\n insertions=insertions,\n reference_seq=reference_seq,\n region=gene_windows[gene_id],\n total=total,\n pattern=pattern,\n filters=[lambda ins, gid=gene_id: ins.metadata['gene_id'] == gid],\n insertion_trees=insertion_trees)\n for gene_id in gene_ids\n }\n\n # Build result frame.\n result = pd.DataFrame.from_records(\n iter(p_values.items()), columns=['gene_id', 'p_value'])\n\n # Calculate corrected p-value using bonferroni correction.\n result['q_value'] = (result['p_value'] * len(result)).clip_upper(1.0)\n\n # Sort by q-value and p-value.\n result.sort_values(by=['q_value', 'p_value'], inplace=True)\n\n if len(insertions) > 0:\n # Annotate with gene_name if possible.\n if 'gene_name' in insertions[0].metadata:\n name_map = {\n ins.metadata['gene_id']: ins.metadata['gene_name']\n for ins in insertions\n }\n result.insert(1, 'gene_name', result['gene_id'].map(name_map))\n else:\n result['gene_name'] = np.nan\n\n # Annotate with frequency.\n frequency = (Insertion.to_frame(insertions)\n .groupby('gene_id')['sample'].nunique()\n .reset_index(name='n_samples'))\n result = pd.merge(result, frequency, on='gene_id', how='left')\n else:\n result['gene_name'] = np.nan\n result['n_samples'] = np.nan\n\n return result", "def build_subsets(self):\n self.all = nrn.SectionList()\n self.all.wholetree(sec=self.soma)", "def split():\n flag = 0\n for chromosome in region:\n for inf in region[chromosome]:\n if flag == 0:\n if chromosome not in test_set:\n test_set[chromosome] = [inf]\n else:\n test_set[chromosome].append(inf)\n else:\n if chromosome not in train_set:\n train_set[chromosome] = [inf]\n else:\n train_set[chromosome].append(inf)\n\n flag += 1\n flag %= 10", "def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])", "def create_subsets(subset_name, num_bg_ratings):\n bgplus_df = bgplus_table.get_full_df()\n details_df = details_table.get_full_df()\n users_df = users_table.get_full_df()\n ratings_df = ratings_table.get_full_df()\n ratings_df = ratings_df[ratings_df[\"bg_id\"].isin(details_df.iloc[:num_bg_ratings].index)]\n\n dump_dfs_to_trepos(subset_name, [(bgplus_df, bgplus_table), (details_df, details_table), (users_df, users_table), (ratings_df, ratings_table)])", "def split(self, X: tp.ArrayLike, n: tp.Optional[int] = None, min_len: int = 1, **kwargs) -> RangesT:\n X = to_any_array(X)\n if isinstance(X, (pd.Series, pd.DataFrame)):\n index = X.index\n else:\n index = pd.Index(np.arange(X.shape[0]))\n\n # Resolve start_idxs and end_idxs\n start_idxs = np.full(len(index), 0)\n end_idxs = np.arange(len(index))\n\n # Filter out short ranges\n window_lens = end_idxs - start_idxs + 1\n min_len_mask = window_lens >= min_len\n if not np.any(min_len_mask):\n raise ValueError(f\"There are no ranges that meet window_len>={min_len}\")\n start_idxs = start_idxs[min_len_mask]\n end_idxs = end_idxs[min_len_mask]\n\n # Evenly select n ranges\n if n is not None:\n if n > len(start_idxs):\n raise ValueError(f\"n cannot be bigger than the maximum number of windows {len(start_idxs)}\")\n idxs = np.round(np.linspace(0, len(start_idxs) - 1, n)).astype(int)\n start_idxs = start_idxs[idxs]\n end_idxs = end_idxs[idxs]\n\n return split_ranges_into_sets(start_idxs, end_idxs, **kwargs)", "def __create_windows(self, dat_in, dat_out, sequential):\n print(\"Creating windows...\")\n num_pkts = dat_in.shape[0]\n num_wins = math.ceil(num_pkts / self.win)\n fets = [(name, typ) for name, typ in dat_in.dtype.descr if name != \"\"]\n # Select random intervals from this simulation to create the\n # new input data. Do not pick indices between 0 and self.win\n # to make sure that all windows ending on the chosen index fit\n # within the simulation.\n pkt_idxs = random.choices(range(self.win, num_pkts), k=num_wins)\n # The new data format consists of self.win copies of the\n # existing input features. All copies of a particular feature\n # share the same scaling group.\n scl_grps, dtype = zip(\n *[(scl_grp, (f\"{name}_{idx}\", typ))\n for idx in range(self.win)\n for scl_grp, (name, typ) in enumerate(fets)])\n scl_grps = np.array(scl_grps)\n dat_in_new = np.zeros((num_wins,), dtype=list(dtype))\n\n for win_idx, end_idx in enumerate(pkt_idxs):\n # This could be done on a single line with a range select\n # and a generator, but this version is preferable because\n # it removes intermediate data copies and guarantees that\n # the resulting row is properly ordered.\n for fet_idx, pkt_idx in enumerate(\n range(end_idx - self.win + 1, end_idx + 1)):\n for name, _ in fets:\n dat_in_new[f\"{name}_{fet_idx}\"][win_idx] = (\n dat_in[pkt_idx][name])\n\n # Verify that we selected at least as many windows as we intended to.\n num_selected_wins = len(dat_in_new)\n assert num_selected_wins >= num_wins, \\\n f\"Insufficient windows: {num_selected_wins} < {num_wins}\"\n\n # As an output feature, select only the final ground truth\n # value. I.e., the final ground truth value for this window\n # becomes the ground truth for the entire window.\n return dat_in_new, np.take(dat_out, pkt_idxs), scl_grps", "def split(self, X: tp.ArrayLike, n: tp.Optional[int] = None, window_len: tp.Optional[float] = None,\n min_len: int = 1, **kwargs) -> RangesT:\n X = to_any_array(X)\n if isinstance(X, (pd.Series, pd.DataFrame)):\n index = X.index\n else:\n index = pd.Index(np.arange(X.shape[0]))\n\n # Resolve start_idxs and end_idxs\n if window_len is None and n is None:\n raise ValueError(\"At least n or window_len must be set\")\n if window_len is None:\n window_len = len(index) // n\n if 0 < window_len < 1:\n window_len = math.floor(window_len * len(index))\n start_idxs = np.arange(len(index) - window_len + 1)\n end_idxs = np.arange(window_len - 1, len(index))\n\n # Filter out short ranges\n window_lens = end_idxs - start_idxs + 1\n min_len_mask = window_lens >= min_len\n if not np.any(min_len_mask):\n raise ValueError(f\"There are no ranges that meet window_len>={min_len}\")\n start_idxs = start_idxs[min_len_mask]\n end_idxs = end_idxs[min_len_mask]\n\n # Evenly select n ranges\n if n is not None:\n if n > len(start_idxs):\n raise ValueError(f\"n cannot be bigger than the maximum number of windows {len(start_idxs)}\")\n idxs = np.round(np.linspace(0, len(start_idxs) - 1, n)).astype(int)\n start_idxs = start_idxs[idxs]\n end_idxs = end_idxs[idxs]\n\n return split_ranges_into_sets(start_idxs, end_idxs, **kwargs)", "def create_sets():\n global train_x, train_y, val_x, val_y\n\n print('Creating sets')\n\n dataframe = pd.read_csv('LoggerBot.log', names=NAMES).sample(frac=1)\n inputs = dataframe.values[:,:-1].astype(np.float32)\n outputs = dataframe.values[:,-1].astype(np.int32)\n\n train_set_size = int(len(dataframe) * 0.7)\n train_x, train_y = inputs[:train_set_size], outputs[:train_set_size]\n val_x, val_y = inputs[train_set_size:], outputs[train_set_size:]", "def region_sets(self,listA,listB):\n self.setA = GenomicRegionSet('for Unit Test')\n for i in range(len(listA)):\n self.setA.add(GenomicRegion(chrom=listA[i][0], initial=listA[i][1], final=listA[i][2]))\n \n self.setB = GenomicRegionSet('for Unit Test')\n for i in range(len(listB)):\n self.setB.add(GenomicRegion(chrom=listB[i][0], initial=listB[i][1], final=listB[i][2]))", "def generate_superset(self, number):\n \n superset = []\n for i in range(0, 2**(self.dim)):\n if (number & i)==number:\n superset.append(i)\n return superset", "def geneExonicRegions(self, df):\n scaffold = df.iloc[0].scaffold\n strand = df.iloc[0].strand\n gene_type = df.iloc[0].gene_type\n gene_id = df.iloc[0].gene_id\n gene_name = df.iloc[0].gene_name\n start = df.start.min()\n end = df.end.max()\n bp = [False] * (end - start + 1)\n for i in range(df.shape[0]):\n s = df.iloc[i]['start'] - start\n e = df.iloc[i]['end'] - start + 1\n bp[s:e] = [True] * (e - s)\n regions = list(range(start, end + 1))\n groups = []\n\n for i, j in groupby(bp):\n groups.append((i, len(list(j))))\n e_start = 0\n\n for i in groups:\n e_end = e_start + i[1]\n if i[0]:\n record = Record(scaffold=scaffold, start=regions[e_start],\n end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,\n gene_name=gene_name, strand=strand)\n yield record\n e_start += i[1]", "def _chunk_windows(windows, num_chunks):\n if num_chunks <= 0 or int(num_chunks) != num_chunks:\n raise ValueError(\"Number of chunks must be an integer > 0\")\n num_chunks = min(len(windows) - 1, num_chunks)\n splits = np.array_split(windows[:-1], num_chunks)\n chunks = []\n for j in range(num_chunks - 1):\n chunk = np.append(splits[j], splits[j + 1][0])\n chunks.append(chunk)\n chunk = np.append(splits[-1], windows[-1])\n chunks.append(chunk)\n return chunks", "def samples_set(self):\n self.get_samples_set(self.samples_db)\n self.choose_samples(self.chosen_samples_db, self.chosen_hashes)", "def windows(self,windowSize):\n for i in range(0,len(self)-windowSize):\n yield (i,i+windowSize)", "def add_genesets(snp_dict,gene_file):\n inf = open(gene_file,\"r\")\n for i in snp_dict.keys():\n snp_dict[i]['genes']=np.empty(len(snp_dict[i]['bps']), dtype=set)\n for line in inf:\n if re.match(\"\\#\",line):\n continue\n line.rstrip()\n fields=line.split()\n if len(fields) < 3:\n continue\n bps=int(fields[1])\n if fields[0] in snp_dict.keys():\n idx = snp_dict[fields[0]]['bps'].searchsorted(bps)\n if (idx < len(snp_dict[fields[0]]['bps'])) and snp_dict[fields[0]]['bps'][idx] == bps:\n snp_dict[fields[0]]['genes'][idx]=set([ x for x in fields[2:] ])\n return True", "def subsets(self):\n return set(self.subset_map.values())", "def split_ranges_into_sets(start_idxs: tp.ArrayLike, end_idxs: tp.ArrayLike,\n set_lens: tp.MaybeSequence[tp.Sequence[float]] = (),\n left_to_right: tp.MaybeSequence[bool] = True) -> RangesT:\n start_idxs = np.asarray(start_idxs)\n end_idxs = np.asarray(end_idxs)\n checks.assert_len_equal(start_idxs, end_idxs)\n\n for i in range(len(start_idxs)):\n start_idx = start_idxs[i]\n end_idx = end_idxs[i]\n\n range_len = end_idx - start_idx + 1\n new_set_lens = []\n if len(set_lens) == 0:\n yield (np.arange(start_idx, end_idx + 1),)\n else:\n if checks.is_sequence(set_lens[0]):\n _set_lens = set_lens[i]\n else:\n _set_lens = set_lens\n if checks.is_sequence(left_to_right):\n _left_to_right = left_to_right[i]\n else:\n _left_to_right = left_to_right\n for j, set_len in enumerate(_set_lens):\n if 0 < set_len < 1:\n set_len = math.floor(set_len * range_len)\n if set_len == 0:\n raise ValueError(f\"Set {j} in the range {i} is empty\")\n new_set_lens.append(set_len)\n if sum(new_set_lens) < range_len:\n if _left_to_right:\n new_set_lens = new_set_lens + [range_len - sum(new_set_lens)]\n else:\n new_set_lens = [range_len - sum(new_set_lens)] + new_set_lens\n else:\n raise ValueError(f\"Range of length {range_len} too short to split into {len(_set_lens) + 1} sets\")\n\n # Split each range into sets\n idx_offset = 0\n set_ranges = []\n for set_len in new_set_lens:\n new_idx_offset = idx_offset + set_len\n set_ranges.append(np.arange(start_idx + idx_offset, start_idx + new_idx_offset))\n idx_offset = new_idx_offset\n\n yield tuple(set_ranges)", "def windows(self, windows):\n\n self._windows = windows", "def addSubsetProteins(self, proteinIds):\n self._addProteins(proteinIds, ['subset', 'proteins'])", "def test_build_genomic_regions(self):\n\n CDS = pybedtools.BedTool(\"\"\"chr1\\t7700\\t7900\\tfoo\\t0\\t+\\n\n chr1\\t7999\\t8500\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR5 = pybedtools.BedTool(\"\"\"chr1\\t7499\\t7700\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR3 = pybedtools.BedTool(\"\"\"chr1\\t8500\\t9000\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n proxintron = pybedtools.BedTool(\"\"\"chr1\\t100\\t300\\tfoo\\t0\\t+\\n\n chr1\\t798\\t998\\tfoo\\t0\\t+\\n\n chr1\\t2000\\t2200\\tfoo\\t0\\t+\\n\n chr1\\t2798\\t2998\\tfoo\\t0\\t+\\n\n chr1\\t6000\\t6200\\tfoo\\t0\\t+\\n\n chr1\\t6798\\t6998\\tfoo\\t0\\t+\\n\n chr1\\t7900\\t7998\\tfoo\\t0\\t+\\n\"\"\", from_string = True\n )\n distintron = pybedtools.BedTool(\"\"\"chr1\\t301\\t797\\tfoo\\t0\\t+\\n\n chr1\\t2201\\t2797\\tfoo\\t0\\t+\\n\n chr1\\t6201\\t6797\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n \n regions = build_genomic_regions(pybedtools.BedTool(clipper.test_file(\"test.gtf\")), prox_distance=200) \n \n #print UTR3\n\n #print regions['UTR3']\n print proxintron\n print regions['proxintron']\n #print regions['distintron']\n \n self.assertEqual(len(CDS.intersect(regions['CDS'], f= 1.0, r = True)), 2)\n self.assertEqual(len(UTR5.intersect(regions['UTR5'], f= 1.0, r = True)), 1)\n self.assertEqual(len(UTR3.intersect(regions['UTR3'], f= 1.0, r = True)), 1)\n self.assertEqual(len(proxintron.intersect(regions['proxintron'], f= 1.0, r = True)), 7)\n self.assertEqual(len(distintron.intersect(regions['distintron'], f= 1.0, r = True)), 3)", "def _construct_windows(self, Nw, ti, i0=0, i1=None):\n if i1 is None:\n i1 = Nw\n\n # get data for windowing period\n df = self.data.get_data(ti-self.dtw, ti+(Nw-1)*self.dto)[self.data_streams]\n\n # create windows\n dfs = []\n for i in range(i0, i1):\n dfi = df[:].iloc[i*(self.iw-self.io):i*(self.iw-self.io)+self.iw]\n try:\n dfi['id'] = pd.Series(np.ones(self.iw, dtype=int)*i, index=dfi.index)\n except ValueError:\n print('hi')\n dfs.append(dfi)\n df = pd.concat(dfs)\n window_dates = [ti + i*self.dto for i in range(Nw)]\n return df, window_dates[i0:i1]", "def bind(self, exp_to_use,window):\n for gene_name in self.genes_name_list:\n gene_data = self.data[gene_name]\n max_position = gene_data[exp_to_use].idxmax()\n motif = ''.join(list(gene_data[max_position-window:max_position+window]['nucleotide']))\n if len(motif) >= 8:\n print '>'+gene_name\n print motif + '\\n'\n # print gene_data[max_position-window:max_position+window]", "def create_window(features, buffer_left, buffer_right, bam_has_chr_prefix):\n for f in features:\n if f.type == \"transcript\":\n chr = str(f.iv.chrom)\n if bam_has_chr_prefix and chr[:3] != 'chr':\n chr = 'chr'+str(chr)\n window = HTSeq.GenomicInterval(chr, f.iv.start-buffer_left, f.iv.end+buffer_right, '.')\n return window\n print \"Could not locate a GenomicFeature of type 'transcript', so could not create a GenomicInterval.\"\n sys.exit(1)", "def new_image_sample_grid(dset, window_dims, window_overlap=0.0,\n classes_of_interest=None, ignore_coverage_thresh=0.6,\n negative_classes={'ignore', 'background'},\n use_annots=True, legacy=True, verbose=1):\n # import netharn as nh\n import kwarray\n from ndsampler import isect_indexer\n keepbound = True\n\n # Create a sliding window object for each specific image (because they may\n # have different sizes, technically we could memoize this)\n gid_to_slider = {}\n for img in dset.imgs.values():\n full_dims = [img['height'], img['width']]\n window_dims_ = full_dims if window_dims == 'full' else window_dims\n slider = kwarray.SlidingWindow(full_dims, window_dims_,\n overlap=window_overlap, keepbound=keepbound,\n allow_overshoot=True)\n gid_to_slider[img['id']] = slider\n\n if use_annots:\n _isect_index = isect_indexer.FrameIntersectionIndex.from_coco(dset)\n\n positives = []\n negatives = []\n targets = []\n positive_idxs = []\n negative_idxs = []\n for gid, slider in gid_to_slider.items():\n\n # For each image, create a box for each spatial region in the slider\n boxes = []\n regions = list(slider)\n for region in regions:\n y_sl, x_sl = region\n boxes.append([x_sl.start, y_sl.start, x_sl.stop, y_sl.stop])\n boxes = kwimage.Boxes(np.array(boxes), 'ltrb')\n\n for region, box in zip(regions, boxes):\n\n if use_annots:\n # Check to see what annotations this window-box overlaps with\n aids = _isect_index.overlapping_aids(gid, box)\n\n # Look at the categories within this region\n catnames = [\n dset.cats[dset.anns[aid]['category_id']]['name'].lower()\n for aid in aids\n ]\n\n if ignore_coverage_thresh:\n ignore_flags = [catname == 'ignore' for catname in catnames]\n if any(ignore_flags):\n # If the almost the entire window is marked as ignored then\n # just skip this window.\n ignore_aids = list(ub.compress(aids, ignore_flags))\n ignore_boxes = dset.annots(ignore_aids).boxes\n\n # Get an upper bound on coverage to short circuit extra\n # computation in simple cases.\n box_area = box.area.sum()\n coverage_ub = ignore_boxes.area.sum() / box_area\n if coverage_ub > ignore_coverage_thresh:\n max_coverage = ignore_boxes.iooas(box).max()\n if max_coverage > ignore_coverage_thresh:\n continue\n elif len(ignore_boxes) > 1:\n # We have to test the complex case\n try:\n from shapely.ops import cascaded_union\n ignore_shape = cascaded_union(ignore_boxes.to_shapley())\n region_shape = box[None, :].to_shapley()[0]\n coverage_shape = ignore_shape.intersection(region_shape)\n real_coverage = coverage_shape.area / box_area\n if real_coverage > ignore_coverage_thresh:\n continue\n except Exception as ex:\n import warnings\n warnings.warn(\n 'ignore region select had non-critical '\n 'issue ex = {!r}'.format(ex))\n\n if classes_of_interest:\n # If there are CoIs then only count a region as positive if one\n # of those is in this region\n interest_flags = np.array([\n catname in classes_of_interest for catname in catnames])\n pos_aids = list(ub.compress(aids, interest_flags))\n elif negative_classes:\n # Don't count negative classes as positives\n nonnegative_flags = np.array([\n catname not in negative_classes for catname in catnames])\n pos_aids = list(ub.compress(aids, nonnegative_flags))\n else:\n pos_aids = aids\n else:\n aids = None\n pos_aids = None\n\n # aids = sampler.regions.overlapping_aids(gid, box, visible_thresh=0.001)\n tr = {\n 'gid': gid,\n 'slices': region,\n 'aids': aids,\n }\n targets.append(tr)\n if pos_aids:\n positive_idxs.append(len(targets))\n if legacy:\n positives.append(tr)\n else:\n negative_idxs.append(len(targets))\n if legacy:\n negatives.append(tr)\n\n if verbose:\n print('Found {} targets'.format(len(targets)))\n if use_annots:\n print('Found {} positives'.format(len(positive_idxs)))\n print('Found {} negatives'.format(len(negative_idxs)))\n\n sample_grid = {\n 'targets': targets,\n 'positives_indexes': positive_idxs,\n 'negatives_indexes': negative_idxs,\n }\n if legacy:\n sample_grid.update({\n # Deprecated:\n 'positives': positives,\n 'negatives': negatives,\n })\n return sample_grid", "def segregating_sites(\n self, sample_sets=None, windows=None, mode=\"site\", span_normalise=True\n ):\n return self.__one_way_sample_set_stat(\n self._ll_tree_sequence.segregating_sites,\n sample_sets,\n windows=windows,\n mode=mode,\n span_normalise=span_normalise,\n )", "def test_subset_reconstruction_iterable(self, wires):\n circuit = hadamard_circuit(wires)\n bits, recipes = circuit()\n shadow = ClassicalShadow(bits, recipes)\n\n # choose 1000 random indices\n snapshots = np.random.choice(np.arange(10000, dtype=np.int64), size=1000, replace=False)\n state = shadow.global_snapshots(snapshots=snapshots)\n assert state.shape == (len(snapshots), 2**wires, 2**wires)\n\n # check the results against obtaining the full global snapshots\n expected = shadow.global_snapshots()\n for i, t in enumerate(snapshots):\n assert np.allclose(expected[t], state[i])", "def _complete_windows(it: Iterator[_T], window_size: int) -> Iterator[Tuple[_T, ...]]:\n win = deque(islice(it, window_size), window_size)\n if len(win) < window_size:\n return\n # cache method access for slight speed boost\n append = win.append\n yield tuple(win)\n for e in it:\n append(e)\n yield tuple(win)", "def extend_all(element, powerset):\n\n new_elements = set()\n\n for subset in powerset:\n extended_element = subset | frozenset([element])\n set.add(new_elements, extended_element)\n \n return new_elements", "def subsampleData(self, count):\n size = 0\n for block in self.blocks: size += len(block[1])\n subset = numpy.random.permutation(size)[:count]\n subset.sort()\n\n pos = 0\n index = 0\n ret = Dataset()\n for block in self.blocks:\n while subset[index]<(pos+len(block[1])):\n loc = subset[index] - pos\n ret.add(block[0][loc,:], block[1][loc])\n index += 1\n if index==subset.shape[0]: return ret\n pos += len(block[1])\n \n return ret", "def _get_run_onsets(\n runs, length_fr, pad_fr, running_threshold_cms, offset_fr):\n out = []\n for run in runs:\n t2p = run.trace2p()\n tr = t2p.trace('deconvolved')\n\n # Add all onsets of \"other\" frames\n others = t2p.nocs(length_fr, pad_fr,\n running_threshold_cms)\n for ot in others:\n start = ot + offset_fr\n out.append(tr[:, start:start + length_fr])\n\n return out", "def subWindowList(self, QMdiArea_WindowOrder_order=None): # real signature unknown; restored from __doc__\r\n pass", "def subsetFromGeneIds(self, geneIds):\n\t\tgs = copy.copy(self)\n\t\tgs._dataframe = gs._dataframe.loc[[item for item in geneIds if item in gs._dataframe.index]]\n\t\treturn gs", "def get_train_windows(self, scene):\n pass", "def powerset3(orig, newset=[]):\n if orig == []:\n yield newset\n else:\n for s in powerset3(orig[1:], newset+[orig[0]]):\n yield s\n for s in powerset3(orig[1:], newset):\n yield s", "def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497],[174.9497,176.0104]]])\r\n cutlineset.extend([[[175.4800,-3.05],[175.4800,-4.55]],[[174.7300,-3.8],[176.2300,-3.8]]])\r\n \r\n for cutline in cutlineset:\r\n for pos in cutline:\r\n pos[0]=pos[0]+globalconfig.CUTLINE_X_OFFSET\r\n pos[1]=pos[1]+globalconfig.CUTLINE_Y_OFFSET\r\n \r\n for row in range(0,globalconfig.X_ARRAY_NUM):\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,-3.0+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,174.68+globalconfig.CUTLINE_Y_OFFSET]])\r\n for line in range(0,globalconfig.Y_ARRAY_NUM):\r\n cutlineset.append([[0.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[-3.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[171.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[174.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n return cutlineset", "def add_sets(list_of_sets):\n global true_introns\n for item in list_of_sets:\n true_introns.update(item)", "def visualize(self, chrom, start, end,\n fig_width=12,\n row_heights=1,\n ax_spacing=0.05,\n num_xticks=10,\n seaborn_style=seaborn.axes_style(style='ticks',\n rc={'axes.edgecolor': 'w', 'axes.facecolor': '#EAEAF2'})):\n # ToDo: Add gene (or other feature) lookup instead of specifying coordinates.\n start, end = int(start), int(end)\n\n assert end > start, 'Window end must be greater than window start! Got: {}, {}'.format(start, end)\n\n # if we receive a scalar here, use it as the height for all rows\n try:\n if len(row_heights) == 1:\n row_heights = row_heights * len(self.subplot_objects) # treat as a uniform row height\n except TypeError:\n row_heights = [row_heights] * len(self.subplot_objects) # treat as a uniform row height\n\n assert len(row_heights) == len(self.subplot_objects)\n\n span = end - start\n xtick_increment = span / num_xticks\n rounding_increment = 5 * 10 ** numpy.round(numpy.log10(xtick_increment) - 1)\n xtick_increment = utilities.roundto(xtick_increment, rounding_increment)\n num_ticks = int(span / xtick_increment) + 1\n round_start = utilities.roundto(start, rounding_increment)\n\n seaborn.set_style(seaborn_style)\n\n fig = plt.figure(len(self.subplot_objects),\n figsize=(fig_width, numpy.sum(row_heights) * (1 + ax_spacing * len(self.subplot_objects))))\n bottoms, heights = compute_ax_row_positions(row_heights=row_heights, ax_spacing=ax_spacing)\n\n for ax_idx in range(len(self.subplot_objects)):\n this_ax = fig.add_axes([0, bottoms[ax_idx], 1, heights[ax_idx]])\n\n if ax_idx == len(self.subplot_objects) - 1:\n this_ax.set_xticks(numpy.arange(num_ticks) * xtick_increment + round_start)\n this_ax.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))\n this_ax.set_xlabel('{} position'.format(chrom))\n \n else: # clear out xticks but plot objects can override this later\n this_ax.set_xlabel('')\n this_ax.set_xticks([])\n\n plot_object_subset = self.subplot_objects[ax_idx]\n\n # Set default plot limits (can be changed by client objects)\n this_ax.set_ylim(0, 1)\n this_ax.set_xlim(start, end)\n\n for plot_object in plot_object_subset:\n plot_object.plot(this_ax, chrom=chrom, ws=start, we=end, fig_width=fig_width,\n row_height=row_heights[ax_idx])\n\n # ToDo: Refactor legend code to get colors and names from objects not from axes handles.\n # if len(this_ax.get_legend_handles_labels()[1]):\n # this_ax.legend(loc=self.VECTOR_LEGEND_LOC)\n\n return fig", "def subsample(self, se):\n\t\tdf = ReadDF('noname', self.readdict.refmap)\n\t\tfor i in random.sample(xrange(1, self.n+1), min(se, self.n)):\n\t\t\tpos, read = self.partial_sampling_func(i)\n\t\t\tdf.add_read_to_vec(read,copy=1) # important to remember to use just this ONE copy!!!\n\t\treturn df", "def get_subsets_from_viewer(self, viewer_reference, data_label=None):\n viewer = self.get_viewer(viewer_reference)\n data = self.get_data_from_viewer(viewer_reference,\n data_label,\n cls=None)\n regions = {}\n\n for key, value in data.items():\n if isinstance(value, Subset):\n # Range selection on a profile is currently not supported in\n # the glue translation machinery for astropy regions, so we\n # have to do it manually. Only data that is 2d is supported,\n # therefore, if the data is already 2d, simply use as is.\n if value.data.ndim == 2:\n region = value.data.get_selection_definition(\n format='astropy-regions')\n regions[key] = region\n continue\n # There is a special case for 1d data (which is also not\n # supported currently). We now eschew the use of the\n # translation machinery entirely and construct the astropy\n # region ourselves.\n elif value.data.ndim == 1:\n # Grab the data units from the glue-astronomy spectral axis\n # TODO: this needs to be much simpler; i.e. data units in\n # the glue component objects\n unit = value.data.coords.spectral_axis.unit\n hi, lo = value.subset_state.hi, value.subset_state.lo\n xcen = 0.5 * (lo + hi)\n width = hi - lo\n region = RectanglePixelRegion(\n PixCoord(xcen, 0), width, 0,\n meta={'spectral_axis_unit': unit})\n regions[key] = region\n continue\n\n # Get the pixel coordinate [z] of the 3D data, repeating the\n # wavelength axis. This doesn't seem strictly necessary as it\n # returns the same data if the pixel axis is [y] or [x]\n xid = value.data.pixel_component_ids[0]\n\n # Construct a new data object collapsing over one of the\n # spatial dimensions. This is necessary because the astropy\n # region translation machinery in glue-astronomy does not\n # support non-2D data, even for range objects.\n stat_func = 'median'\n\n if hasattr(viewer.state, 'function'):\n stat_func = viewer.state.function\n\n # Compute reduced data based on the current viewer's statistic\n # function. This doesn't seem particularly useful, but better\n # to be consistent.\n reduced_data = Data(x=value.data.compute_statistic(\n stat_func, value.data.id[xid],\n subset_state=value.subset_state.copy(), axis=1))\n\n # Instantiate a new data collection since we need to compose\n # the collapsed data with the current subset state. We use a\n # new data collection so as not to inference with the one used\n # by the application.\n temp_data_collection = DataCollection()\n temp_data_collection.append(reduced_data)\n\n # Get the data id of the pixel axis that will be used in the\n # range composition. This is the wavelength axis for the new\n # 2d data.\n xid = reduced_data.pixel_component_ids[1]\n\n # Create a new subset group to hold our current subset state\n subset_group = temp_data_collection.new_subset_group(\n label=value.label, subset_state=value.subset_state.copy())\n\n # Set the subset state axis to the wavelength pixel coordinate\n subset_group.subsets[0].subset_state.att = xid\n\n # Use the associated collapsed subset data to get an astropy\n # regions object dependent on the extends of the subset.\n # **Note** that the y values in this region object are not\n # useful, only the x values are.\n region = subset_group.subsets[0].data.get_selection_definition(\n format='astropy-regions')\n regions[key] = region\n\n return regions", "def getSets():", "def find_proper_subsets(powerset, cardinality_difference = 1, debug = False):\n subset_idx = [] # Which will be set A of the powerset\n superset_idx = [] # Which will be set B of the powerset\n\n for A, a_idx in zip(powerset, list(range(0, len(powerset)))):\n # A_is_proper_subset_of_B = True\n for B, b_idx in zip(powerset[a_idx:], list(range(a_idx, len(powerset)))):\n if len(A) is not len(B)-cardinality_difference:\n continue\n else: # Check every element\n for a in A:\n A_is_proper_subset_of_B = True\n found_a_in_b = False\n for b in B:\n if a is b:\n found_a_in_b = True\n break\n if found_a_in_b is False:\n A_is_proper_subset_of_B = False\n if A_is_proper_subset_of_B:\n if debug:\n print(\"A:\", A, \" is proper subset of B:\", B)\n subset_idx.append(a_idx)\n superset_idx.append(b_idx)\n # return the indecees for the corresponding sets\n #print(\"subset_idx: \", subset_idx)\n #print(\"superset_idx: \", superset_idx)\n return subset_idx, superset_idx", "def multiple_chromosome_set_generator(random, args):\n keys = args.get('keys')\n candidate = MultipleChromosomeGenome(keys=keys)\n for key in keys:\n key_args = {\n 'representation': args.get(\"%s_representation\" % key),\n 'max_size': args.get(\"%s_max_size\" % key),\n 'variable_size': args.get('variable_size')\n }\n candidate[key] = set_generator(random, key_args)\n\n return candidate", "def get_train_windows(self, scene: Scene) -> List[Box]:\n\n def filter_windows(windows):\n if scene.aoi_polygons:\n windows = Box.filter_by_aoi(windows, scene.aoi_polygons)\n return windows\n\n raster_source = scene.raster_source\n extent = raster_source.get_extent()\n label_store = scene.ground_truth_label_source\n chip_size = self.config.chip_size\n\n chip_options = self.config.chip_options\n\n if chip_options.window_method == 'random_sample':\n return get_random_sample_train_windows(\n label_store, chip_size, self.config.class_map, extent,\n chip_options, filter_windows)\n elif chip_options.window_method == 'sliding':\n stride = chip_options.stride\n if stride is None:\n stride = chip_size / 2\n\n return list(\n filter_windows((extent.get_windows(chip_size, stride))))", "def get_inter_cds_regions(annotations):\n # Determine locations of inter-CDS regions for each chromosome\n inter_cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n inter_cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate through genes and store the ranges between them;\n # for TriTrypDB files, the gene boundaries are generally the same\n # as the CDS boundaries.\n for gene in genes:\n # Determine location for the region up to start of the gene\n end = int(gene.location.start)\n\n # Skip over snoRNAs, etc. that are nested inside of other genes\n # For example: TcCLB TcChr22-2 179,000:180,000\n if end <= start:\n continue\n\n # Add CDS to relevant list based on strand\n if strand is None:\n # Left-most gene\n inter_cds_regions[chr_id][gene.location.strand].append((start, end))\n elif strand != gene.location.strand:\n # Add ORFs in both directions at transcription switch sites (TSSs)\n inter_cds_regions[chr_id][+1].append((start, end))\n inter_cds_regions[chr_id][-1].append((start, end))\n else:\n # Within PTU; look for ORFs on same strand\n inter_cds_regions[chr_id][strand].append((start, end))\n\n # update start counter and strand\n start = int(gene.location.end)\n strand = gene.location.strand\n\n # add region after last gene\n inter_cds_regions[chr_id][strand].append((start, ch_end))\n\n return inter_cds_regions", "def construct_combined_set(filenames, sensor_names, cnt_preprocessors,\n marker_def, end_marker_def, trial_classes,\n trial_start_offset_ms, trial_stop_offset_ms,\n min_break_length_ms, max_break_length_ms,\n break_start_offset_ms, break_stop_offset_ms,\n last_set_split_trial, add_trial_breaks=True,\n filename_to_extra_args=None):\n default_args = deepcopy(locals())\n sets = []\n\n if filename_to_extra_args is not None:\n for filename_with_args in filename_to_extra_args:\n assert filename_with_args in filenames\n\n for i_file, filename in enumerate(filenames):\n this_args = copy(default_args)\n if filename_to_extra_args is not None and (\n filename in filename_to_extra_args):\n for key in filename_to_extra_args[filename]:\n assert key in this_args\n this_args[key] = filename_to_extra_args[filename][key]\n assert key != 'last_set_split_trial', \"Does not make sense :)\"\n marker_segmenter = MarkerSegmenter(segment_ival=[\n this_args['trial_start_offset_ms'], \n this_args['trial_stop_offset_ms']],\n marker_def=this_args['marker_def'],\n trial_classes=this_args['trial_classes'],\n end_marker_def=this_args['end_marker_def'])\n trial_break_adder = AddTrialBreaks(min_length_ms=this_args['min_break_length_ms'],\n max_length_ms=this_args['max_break_length_ms'], \n start_offset_ms=this_args['break_start_offset_ms'], \n stop_offset_ms=this_args['break_stop_offset_ms'],\n start_marker_def=this_args['marker_def'],\n end_marker_def=this_args['end_marker_def'])\n if (i_file < len(filenames) - 1) or (\n this_args['last_set_split_trial'] is None):\n segmenters = [marker_segmenter,]\n else:\n segmenters = [marker_segmenter,\n RestrictTrialRange(0,this_args['last_set_split_trial'])]\n if this_args['add_trial_breaks']:\n segmenters.append(trial_break_adder)\n segmenter = PipelineSegmenter(segmenters)\n cnt_set = SetWithMarkers(BBCIDataset(filename,\n load_sensor_names=this_args['sensor_names']),\n this_args['cnt_preprocessors'],\n segmenter) \n sets.append(cnt_set)\n\n # add last set last part as test set if you split apart last set\n # we use that this_args is now from last set already\n if last_set_split_trial is not None:\n segmenters = [marker_segmenter,\n RestrictTrialRange(last_set_split_trial,None),]\n if this_args['add_trial_breaks']:\n segmenters.append(trial_break_adder)\n segmenter = PipelineSegmenter(segmenters)\n cnt_set = SetWithMarkers(BBCIDataset(filenames[-1], # again last file needed\n load_sensor_names=this_args['sensor_names']),\n this_args['cnt_preprocessors'],\n segmenter)\n sets.append(cnt_set)\n dataset = CombinedSet(sets)\n return dataset", "def divide_and_count(L_windows, k, t):\n\n results = set()\n\n for L_mer in L_windows:\n k_windows = divide_genome(L_mer, k) # We extract in a list all the possible k-mers\n\n # Generate a set of unique elements to avoid multicounts...\n k_windows_set = set(k_windows)\n\n for k_window in k_windows_set:\n if k_windows.count(k_window) == t:\n results.add(k_window)\n\n\n print(\"\\t\".join(results))", "def copy_mean_depths_to_subgrid(self,overwrite=True,cells=True,edges=True):\n if cells:\n area = self.cells_area()\n depth = self.cells['depth_mean']\n # funny indexing to add unit dimension,\n # and zip to make these into tuples like ([area[0]],[depth[0]])\n for c in range(self.Ncells()):\n if overwrite or self.cells['subgrid'][c]==0:\n self.cells['subgrid'][c] = (area[c,None],depth[c,None])\n if edges:\n length = self.edges_length()\n depth = self.edges['depth_mean']\n # funny indexing to add unit dimension,\n # and zip to make these into tuples like ([area[0]],[depth[0]])\n for j in range(self.Nedges()):\n if overwrite or self.edges['subgrid'][j]==0:\n self.edges['subgrid'][j] = (length[j,None],depth[j,None])", "def copy_subset(wdir, fullset, metadata, outfolder):\n \n ## Read metadata from file\n metadata = pd.read_csv(wdir+metadata, delimiter=',', index_col=0)\n #print(metadata.head())\n\n ## Filter the metadata table by one or several criteria\n ## USER: For categorical criteria, set filter category (column) and list of values to be selected.\n #filter_category = \"subgenre\" # author_short, genre, subgenre, availability, decade, etc.\n #selection_list = [\"policier\"] # See metadata file for possible values\n #metadata = metadata[metadata[filter_category].isin(selection_list)]\n\n ## USER: And/or, for numeric criteria, set a filter category and upper and lower bound.\n filter_category = \"year\"\n lower_bound = \"1799\"\n upper_bound = \"1900\"\n myquery = lower_bound + \"<\" + filter_category + \"<\" + upper_bound \n metadata = metadata.query(myquery)\n \n ## Create a list of filenames corresponding to the filter criteria.\n subset = []\n for item in metadata.index:\n subset.append(item)\n #print(subset)\n \n ## Copy the right files to a new folder. \n if not os.path.exists(wdir+outfolder):\n os.makedirs(wdir+outfolder)\n source = wdir+fullset\n destination = wdir+outfolder\n counter = 0\n for file in glob.glob(source):\n basename = os.path.basename(file)\n idno = basename[0:6]\n #print(file)\n #print(idno, basename)\n #print(wdir+outfolder+basename)\n if idno in subset:\n counter +=1\n shutil.copy(file, destination)\n \n print(\"Done. Files selected and copied: \"+ str(len(subset)) +\",\"+ str(counter))", "def subsample(expdat,numreads=10000,inplace=False):\n\timport biom\n\n\tparams=locals()\n\n\tnewexp=hs.filterorigreads(expdat,numreads,inplace)\n\tnewexp=hs.toorigreads(newexp,inplace=True)\n\n\ttable=biom.table.Table(newexp.data,newexp.seqs,newexp.samples)\n\ttable=table.subsample(numreads,axis='observation')\n\ttids=table.ids(axis='sample')\n\tfor idx,cid in enumerate(tids):\n\t\tif not cid==newexp.samples[idx]:\n\t\t\tprint('problem with sample ids!!!!')\n\tnewpos=[]\n\tfor cseq in table.ids(axis='observation'):\n\t\tnewpos.append(newexp.seqdict[cseq])\n\tnewexp=hs.reorderbacteria(newexp,newpos,inplace=True)\n\tnewexp.data=table.matrix_data.todense().A\n\tnewexp=normalizereads(newexp,numreads=10000,inplace=True,fixorig=False)\n\tfor cidx in range(len(newexp.samples)):\n\t\tnewexp.origreads[cidx]=numreads\n\tnewexp=updateorigreads(newexp)\n\tnewexp.filters.append(\"subsample to %d\" % numreads)\n\ths.addcommand(newexp,\"subsample\",params=params,replaceparams={'expdat':expdat})\n\treturn newexp", "def extract_multi_wavelet(\n self, min_freq=0.06, max_freq=0.66, bank=8, *args, **kwargs\n ):\n out = []\n for f in np.geomspace(min_freq, max_freq, bank):\n out.append(self.extract_wavelet(f, *args, **kwargs))\n return self.__class__(\n pd.concat(out, axis=1),\n sampling_freq=self.sampling_freq,\n features=self.features,\n sessions=self.sessions,\n )", "def random_sampling(self, n_subset):\n t = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"[INFO] {} - Random sampling with replacement ...\".format(t))\n subset_list = []\n training_set = self\n subset_size = math.ceil(training_set.n_samples / n_subset)\n # create subsets\n for i in range(n_subset):\n # run a permutation to mix all samples (sampling with replacement)\n self.permutation()\n # always draw the first samples\n start_idx = 0\n stop_idx = subset_size\n subset = deepcopy(training_set)\n subset.data = subset.data[start_idx:stop_idx][:]\n subset.labels = subset.labels[start_idx:stop_idx][:]\n subset.labels_onehot = subset.labels_onehot[start_idx:stop_idx][:]\n subset.n_samples = stop_idx - start_idx\n subset.true_distribution = subset._get_true_distribution()\n subset.set_batch_size(training_set.batch_size)\n subset_list.append(subset)\n print(\"\\tSubset shape {}\".format(subset.data.shape))\n return subset_list", "def __init__(self, data, row_sentence_bounds, window=5, process_all=False):\n assert it is not None, \"NEED PACKAGE INTERVALTREE!\"\n self.window = window\n self.data = data\n base_shape = self.data.shape\n self.shape = (base_shape[0], (2 * self.window + 1) * base_shape[1])\n self.tree = it.IntervalTree(\n [it.Interval(int(e[0]), int(e[1]) + 1) for e in row_sentence_bounds]\n )\n if process_all:\n print(\"adding context to all the dataset\", end=\"- \")\n self.data = self.generate_all()\n print(\"DONE\")\n self.process_all = process_all", "def test_subset_reconstruction_integer(self, wires, snapshots):\n circuit = hadamard_circuit(wires)\n bits, recipes = circuit()\n shadow = ClassicalShadow(bits, recipes)\n\n state = shadow.global_snapshots(snapshots=snapshots)\n assert state.shape == (snapshots, 2**wires, 2**wires)", "def select_positive_regions(targets, window_dims=(300, 300), thresh=0.0,\n rng=None, verbose=0):\n unique_gids, groupxs = kwarray.group_indices(targets['gid'])\n gid_to_groupx = dict(zip(unique_gids, groupxs))\n wh, ww = window_dims\n rng = kwarray.ensure_rng(rng)\n selection = []\n\n # Get all the bounding boxes\n cxs, cys = ub.take(targets, ['cx', 'cy'])\n n = len(targets)\n cxs = cxs.astype(np.float32)\n cys = cys.astype(np.float32)\n wws = np.full(n, ww, dtype=np.float32)\n whs = np.full(n, wh, dtype=np.float32)\n cxywh = np.hstack([a[:, None] for a in [cxs, cys, wws, whs]])\n boxes = kwimage.Boxes(cxywh, 'cxywh').to_ltrb()\n\n iter_ = ub.ProgIter(gid_to_groupx.items(),\n enabled=verbose,\n label='select positive regions',\n total=len(gid_to_groupx), adjust=0, freq=32)\n\n for gid, groupx in iter_:\n # Select all candiate windows in this image\n cand_windows = boxes.take(groupx, axis=0)\n # Randomize which candidate windows have the highest scores so the\n # selection can vary each epoch.\n cand_scores = rng.rand(len(cand_windows))\n cand_dets = kwimage.Detections(boxes=cand_windows, scores=cand_scores)\n # Non-max supresssion is really similar to set-cover\n keep = cand_dets.non_max_supression(thresh=thresh)\n selection.extend(groupx[keep])\n\n selection = np.array(sorted(selection))\n return selection", "def subset_gen(itemSet):\n subsets = []\n for i in range(1, len(itemSet)):\n c = combinations(itemSet, r=i)\n for cc in c:\n subsets.append(set(cc))\n return subsets", "def select_regions(args):\n assert args.files, \"Need a set of fastq files\"\n assert args.out, \"Need --out\"\n region = os.path.abspath(args.region)\n workdir = 'select'\n safe_makedir(workdir)\n out_file = os.path.join(workdir, splitext_plus(args.out)[0] + \"_cpg.bed\")\n out_snp_file = os.path.join(workdir, splitext_plus(args.out)[0] + '_snp.bed')\n if not file_exists(out_file):\n with file_transaction(out_file) as tx_out:\n with open(tx_out, 'w') as out_handle:\n # print >> out_handle, \"chrom\\tstart\\tend\\tcu\\tcm\\tstrand\\tgene\\tsample\"\n for in_vcf in args.files:\n snp_file = in_vcf.replace(\"rawcpg\", \"rawsnp\")\n sample = splitext_plus(os.path.basename(in_vcf))[0].split(\"_\")[0]\n get_het(snp_file, region, sample, out_snp_file)\n res = pybedtools.BedTool(in_vcf).intersect(b=region, wo=True)\n # cmd = (\"bedtools intersect -u -a {in_vcf} -b {region} > {tx_tmp}\")\n # do.run(cmd.format(**locals()), \"selecting %s\" % in_vcf)\n\n for record in res:\n gene = record[-2]\n chrom, pos, info, header, frmt = record[0], int(record[1]), record[7], record[8], record[9]\n cs = info.split(';')[0].split('=')[1]\n frmt = dict(zip(header.split(\":\"), frmt.split(':')))\n if is_good_cpg(frmt):\n tag = \"%s-%s-%s-%s\" % (frmt['CU'], frmt['CM'], gene, sample)\n print >> out_handle, \"%s\\t%s\\t%s\\t%s\\t.\\t%s\" % (chrom, pos, pos + 1, tag, cs)", "def addSubsetToGroups(self, proteinIds, groupIds):\n for groupId in AUX.toList(groupIds):\n self.groups[groupId].addSubsetProteins(proteinIds)\n self._addProteinIdsToGroupMapping(proteinIds, groupId)", "def insere_n_nos(self, num_nos):\n for i in range(num_nos):\n index = random.randint(1, 11 ** 4)\n elem = random.randint(1, 11 ** 4)\n self.insere(index, elem)", "def bin_sizing(self):\n\n self.log.info(\"Begin Re-Binning the Genome Space.\")\n new_list = []\n seg_num = 0\n\n for chrom in natsort.natsorted(self.seg_analyzer.chrom_list):\n self.log.debug(\"Binning Chromosome {0}\".format(chrom))\n\n # Some chromosomes have no segments.\n try:\n chrom_slice = \\\n self.seg_analyzer.seg_copy_array[self.seg_analyzer.seg_copy_array[:, 1] == chrom.encode()]\n seg_count = chrom_slice.shape[0]\n coord_start = int(chrom_slice[0, 2])\n except IndexError:\n continue\n\n for i in range((seg_count-1)):\n if (i+1) < seg_count and (i+1) % int(self.args.Combine_Segments) == 0:\n coord_stop = int(chrom_slice[i, 3])\n new_list.append([seg_num, chrom.encode(), coord_start, coord_stop])\n\n coord_start = int(chrom_slice[i+1, 2])\n seg_num += 1\n\n self.log.info(\"Genome Space Successfully Re-Binned.\")\n\n return numpy.array(new_list, dtype='object')", "def get_subregions(xint,conn):\n\n subregions = ('SELECT DISTINCT cvt.name, fip.value, f.name '\n 'FROM interaction i, feature_interaction fi, feature_interactionprop fip, ' \n 'feature f, cvterm cvt, cvterm cvt2, feature_relationship fr, feature f2 '\n 'WHERE f.feature_id = fi.feature_id AND fi.interaction_id = i.interaction_id '\n 'AND fi.feature_interaction_id = fip.feature_interaction_id '\n 'AND fi.role_id = cvt.cvterm_id '\n 'AND fip.type_id = cvt2.cvterm_id AND '\n 'cvt2.name = \\'subpart_info\\' AND f.feature_id = fr.subject_id '\n 'AND f2.feature_id = fr.object_id AND f.is_obsolete = \\'f\\' AND '\n 'f2.uniquename = %s AND i.uniquename = %s')\n subs = connect(subregions,xint,conn)\n return(subs)", "def _generate_rows(self):\n logger.debug(\"Generating pre-genealogical coherence data for %s\", self.w1)\n if not self.rows:\n for w2 in self.all_mss:\n if self.w1 == w2:\n continue\n self._add_row(w2)\n\n self._sort()\n logger.debug(\"Generated pre-genealogical coherence data for %s\", self.w1)", "def _subset_in_relative_order(self, superset, subset):\n superset_indices = [superset.index(item) for item in subset]\n sorted_superset_indices = sorted(superset_indices)\n if superset_indices != sorted_superset_indices:\n for index, superset_index in enumerate(sorted_superset_indices):\n superset[superset_index] = subset[index]\n return superset", "def NewExtentList(*args):\n ex_list = []\n for start_block, num_blocks in args:\n ex_list.append(PayloadCheckerTest.NewExtent(start_block, num_blocks))\n return ex_list", "def evaluate_get_pos_windows(dataset):\n #for cls in dataset.classes:\n for cls in ['aeroplane','bicycle','bird','person']:\n t = time.time()\n print(cls)\n print(dataset.get_pos_windows(cls).shape)\n print(\"time: %f\"%(time.time()-t))\n t = time.time()\n print(dataset.get_neg_windows(5000,cls).shape)\n print(\"time: %f\"%(time.time()-t))", "def finalSubsets(self):\n subs = self.allSubsets()\n for s in self.graph.observed:\n subs = subs[subs[:,s] == 1,] # remove subsets where values in s are not True\n return subs", "def selection(self):\n # create an empty priority queue for the new generation\n newGeneration = PriorityQueue()\n # pick top X of the population to survive\n for c in range(0, self.generation.qsize() / SELECTION_FRACTION):\n # get a chromosome\n chromosome = self.generation.get()\n # put the chromosomes in the new generation\n newGeneration.put(chromosome)\n # keep the new generation\n self.generation = newGeneration", "def power_set_efective(seq,k_min,k_max):\n \n seq = list(seq)\n \n #Empty set or one element sets\n if len(seq) <= 1:\n yield seq\n yield []\n\n else:\n for item in power_set(seq[1:]):\n if (len([seq[0]]+item) <= k_max and len([seq[0]]+item) >= k_min):\n yield [seq[0]]+item\n if (len(item) <= k_max and len(item) >= k_min): \n yield item", "def subsets(self, sub_ids, sub_args_lst=None):\n\n indices = [[self.ids.index(id_) for id_ in ids] for ids in sub_ids]\n if sub_args_lst is not None:\n subsets = [Subset(dataset=self, indices=index, sub_attrib_args=args)\n for index, args in zip(indices, sub_args_lst)]\n else:\n subsets = [Subset(dataset=self, indices=index) for index in indices]\n return subsets", "def simulate_generations(self, generations=DEFAULT_GENERATIONS):\n for i in range(generations):\n logging.getLogger().debug(self)\n self.__simulate_generation()\n\n if i < generations - 1:\n self.__delete_duplicates()\n\n return self.fittest_chromosome", "def filter_rare_genes(data, *extra_data, cutoff=0, min_cells=5):\n gene_sums = measure.gene_capture_count(data, cutoff=cutoff)\n keep_genes_idx = gene_sums >= min_cells\n data = select.select_cols(data, *extra_data, idx=keep_genes_idx)\n return data", "def subsetFromGeneSymbols(self, geneSymbols):\n\t\tgs = copy.copy(self)\n\t\tgs._dataframe = gs._dataframe[gs._dataframe['GeneSymbol'].isin(geneSymbols)]\n\t\treturn gs", "def input_slice(self, inputs):\n result = []\n for i in range(int(len(inputs) / self.window_size)):\n result.append(inputs[i * self.window_size:(i + 1) * self.window_size])\n return result", "def _get_subsampled_tensors(param, start_dims, savefields, subsamplings):\n tensors = []\n\n for start_dim, savefield, subsampling in zip(\n start_dims, savefields, subsamplings\n ):\n tensor = getattr(param, savefield)\n\n if subsampling is not None:\n batch_axis = start_dim - 1\n select = torch.tensor(\n subsampling, dtype=torch.int64, device=tensor.device\n )\n tensor = tensor.index_select(batch_axis, select)\n\n tensors.append(tensor)\n\n return tensors", "def split(self, fractions=[0.8, 0.2]):\n\n if sum(fractions) > 1.0 or sum(fractions) <= 0:\n raise ValueError(\"the sum of fractions argument should be between 0 and 1\")\n\n # random indices\n idx = np.arange(self.n_samples)\n np.random.shuffle(idx)\n\n # insert zero\n fractions.insert(0, 0)\n\n # gte limits of the subsets\n limits = (np.cumsum(fractions) * self.n_samples).astype(np.int32)\n\n subsets = []\n # create output dataset\n for i in range(len(fractions) - 1):\n subsets.append(\n Dataset(self.inputs[idx[limits[i]:limits[i + 1]]], self.targets[idx[limits[i]:limits[i + 1]]]))\n\n return subsets", "def windows(self, size, overlap=0, rm_offset=False):\r\n rows = self.data.shape[0]\r\n if (0 < size <= rows) and (0 <= overlap < size):\r\n n = (rows - size) // (size - overlap) + 1\r\n\r\n for i in range(n):\r\n start = (size - overlap) * i\r\n end = start + size\r\n win = self.data.iloc[start:end, :]\r\n if rm_offset:\r\n win_offset = win - win.mean()\r\n win_offset['t'] = win['t']\r\n yield win_offset\r\n\r\n yield win\r\n\r\n else:\r\n raise IndexError(f\"{self} no possible window of size '{size}'.\")", "def accept(self):\n self.selInsts = [x.get() for x in self.selInstsVar]\n self.selParams = [x.get() for x in self.selParamsVar]\n self.updateTitle()\n self.window.grab_release()\n self.window.destroy()\n self.instBoxes = []\n self.paramBoxes = []\n self.subRows = []\n self.selInstsVar = []\n self.selParamsVar = []\n self.addRow = []\n self.instTraces = []", "def make_windows(coord_file: str,\n clust: int,\n scaf: str):\n start_list = []\n end_list = []\n step = clust\n with open(coord_file, 'r') as coords:\n for line in coords:\n s, e = line.strip().split(\"-\")\n start_list.append(s)\n end_list.append(e)\n with open(f\"{scaf}.windows.out\", 'w') as file1:\n # clust coords\n s_ix = 0\n e_ix = s_ix + step\n while e_ix < len(end_list):\n start = start_list[s_ix]\n end = end_list[e_ix - 1]\n file1.write(f\"{scaf}\\t{start}\\t{end}\\n\")\n s_ix = e_ix\n e_ix = s_ix + step\n else:\n start = start_list[s_ix]\n end = end_list[-1]\n file1.write(f\"{scaf}\\t{start}\\t{end}\\n\")\n return(None)", "def set_fake_regular_offsets(self, win_wd, win_gap=0):\n sample_onset = int((win_wd + win_gap)*self.sampling_rate)\n self.onset_samples = range(0, len(self.audio), sample_onset)\n # excluding windows that are too close to the beginning\n self.onset_samples = [x for x in self.onset_samples if x > self.beginning_buffer]\n self.onset_times = [x/self.sampling_rate for x in self.onset_samples]", "def enframe(samples, winlen, winshift):\n\n # check if i+winlen > len(samples):\n\n result = []\n for i in range(0,len(samples),winshift):\n if(i+winlen > len(samples)): break\n result.append(samples[i:i+winlen])\n return np.array(result)\n # return np.array([samples[i:i+winlen] for i in range(0,len(samples),winshift)])", "def subsample(cfg, poses, targets, window_width=90, overlap=0.5):\n joint_len = cfg['num_of_joints']\n poses = np.reshape(poses, (poses.shape[0], poses.shape[1], joint_len, 2))\n \n ret_pose = []\n ret_target = []\n \n # iterate poses\n for i in range(len(poses)):\n step = int(-window_width*overlap) \n for ss_stop in range(poses.shape[1], 0, step): \n if ss_stop >= window_width:\n ss = poses[i,ss_stop - window_width:ss_stop]\n ret_pose.append(ss)\n ret_target.append(targets[i]) \n \n poses = np.reshape(ret_pose, (np.array(ret_pose).shape[0], np.array(ret_pose).shape[1],\n joint_len * 2))\n\n return np.array(poses), np.array(ret_target)", "def initialize_sets(self):\n for block in self.blocks:\n # Insert phi nodes from SSA stage into the assignments of the block\n for phi in block.phis:\n block.gen.setdefault(phi, []).insert(0, phi)\n\n # Update the kill set with the variables that are assigned to in\n # the block\n block.kill = set(block.gen)\n block.output = set(block.gen)\n #for entry in block.bound:\n # block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.itervalues():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def subset(S, i, sub):\n if i == len(s): # if the last element was reached\n print(sub)\n else:\n subset(S, i + 1, sub) # clone subset of super-subset\n subset(S, i + 1, [*sub, S[i]]) # new subset by adding S[i] to super-subset", "def subset(\n self,\n nodes,\n record_provenance=True,\n reorder_populations=True,\n remove_unreferenced=True,\n ):\n tables = self.dump_tables()\n tables.subset(\n nodes,\n record_provenance=record_provenance,\n reorder_populations=reorder_populations,\n remove_unreferenced=remove_unreferenced,\n )\n return tables.tree_sequence()", "def subset(\n self, \n include: Union[Sequence[Any], Any] = None, \n exclude: Union[Sequence[Any], Any] = None) -> Bunch:\n pass", "def define_windows(w, data):\n data_w1 = data[0:w, :]\n data_w2 = data[w:w * 2, :]\n data_w3 = data[w * 2:w * 3, :]\n data_w4 = data[w * 3:w * 4, :]\n data_w5 = data[w * 4:w * 5, :]\n data_w6 = data[w * 5:, :]\n\n return data_w1, data_w2, data_w3, data_w4, data_w5, data_w6", "def build_subsets(self):\n\t\tself.all = h.SectionList()\n\t\tself.all.wholetree(sec=self.soma)\n\n\t\t# morphological section lists\n\t\tself.axon_list = []\n\t\tself.axosomatic_list = []\n\t\tself.apicalshaftoblique_list = []\n\t\tself.apicaltree_list = []\n\t\tself.tuft_list = []\n\t\tself.soma_list = []\n\t\tself.basal_list = []\n\n\t\tself.axon_list.append(hillock)\n\t\tself.axon_list.append(iseg)\n\t\tself.axon_list.append(axon)\n\n\t\tself.axosomatic_list.append(soma)\n\t\tself.axosomatic_list.append(basal)\n\t\tself.axosomatic_list.append(hillock)\n\t\tself.axosomatic_list.append(iseg)\n\t\tself.axosomatic_list.append(axon)\n\n\t\tself.apicalshaftoblique_list.append(apical)\n\n\t\tself.apicaltree_list.append(apical)\n\t\tself.apicaltree_list.append(tuft)\n\n\t\tself.tuft_list.append(tuft)\n\n\t\tself.soma_list.append(soma)\n\n\t\tself.basal_list.append(basal)\n\n\t# Create lists of cell parts that contain each ion channel type\n\t\tself.nat_list = []\n\t\tself.kslow_list = []\n\t\tself.kfast_list = []\n\t\tself.ih_list = []\n\n\t\tself.ih_list.append(basal)\n\t\tself.ih_list.append(apical)\n\t\tself.ih_list.append(tuft)\n\n\t\tself.excsyn_list.append(basal)\n\t\tself.excsyn_list.append(apical)\n\t\tself.excsyn_list.append(tuft)\n\n\t\tself.inhdendsyn_list.append(basal)\n\t\tself.inhdendsyn_list.append(apical)\n\n\t\tself.inhsomasyn_list.append(soma)\n\n\t\tself.nat_list.append(soma)\n\t\tself.nat_list.append(hillock)\n\t\tself.nat_list.append(iseg)\n\t\tself.nat_list.append(apical)\n\t\tself.nat_list.append(tuft)\n\n\t\tself.kfast_list.append(soma)\n\t\tself.kfast_list.append(apical)\n\t\tself.kfast_list.append(tuft)\n\n\t\tself.kslow_list.append(soma)\n\t\tself.kslow_list.append(apical)\n\t\tself.kslow_list.append(tuft)", "def subsets(conjunto: list, matriz_resposta: list, capacidade: int) -> list:\n\n starts = [linha for linha in range(len(conjunto)+1) if matriz_resposta[linha][capacidade]]\n\n resultados = list()\n append = resultados.append\n for linha in starts:\n coluna = capacidade\n \n subconjunto = set()\n add = subconjunto.add\n\n while coluna >= 0 and linha >= 0:\n if (coluna - conjunto[linha-1]) > 0 and coluna == capacidade:\n coluna -= conjunto[linha-1]\n linha -= 1\n add(conjunto[linha])\n elif matriz_resposta[linha][coluna] == 1:\n linha -= 1\n else:\n coluna -= conjunto[linha]\n add(conjunto[linha])\n\n if sum(subconjunto) == capacidade and subconjunto not in resultados:\n append(subconjunto)\n\n return resultados", "def get_windows(self, x_train, y_train):\n\n def roundMultiple(x, base=4):\n \"\"\"Round n up to nearest multiple of base.\"\"\"\n return int(base * round(float(x)/base))\n\n def auto_set_stride():\n self.stride = roundMultiple(\n int(self.window_size / 10), base=2)\n debug(\"Stride auto set to \", self.stride)\n\n def auto_set_window_size(sequence):\n threshold = (self.left_epsilon + self.right_epsilon) * 2\n time_arr = sequence[:, self.X_TIME_COLUMN]\n self.window_size = roundMultiple(\n np.argmax(time_arr > threshold), base=4)\n debug(\"Window size auto set to \", self.window_size)\n\n windows_x = []\n windows_y = []\n debug(\"Making windows...\")\n if self.window_size is None:\n auto_set_window_size(x_train[0])\n if self.stride is None:\n auto_set_stride()\n\n for index in tqdm(range(len(x_train))):\n sequence_extractions, sequence_extraction_labels = \\\n self.get_windows_for_sequence(\n x_train[index], y_train[index])\n windows_x.append(sequence_extractions)\n windows_y.append(sequence_extraction_labels)\n return np.array(windows_x), np.array(windows_y)", "def new_sample_grid(self, task, window_dims, window_overlap=0, **kwargs):\n dset = self.dset\n if task == 'video_detection':\n sample_grid = new_video_sample_grid(dset, window_dims,\n window_overlap, **kwargs)\n elif task == 'image_detection':\n sample_grid = new_image_sample_grid(dset, window_dims,\n window_overlap, **kwargs)\n else:\n raise NotImplementedError(task)\n\n return sample_grid", "def remove_super_sets(sub_set, set_of_sets):\n return [x for x in set_of_sets if not set(x).issuperset(set(sub_set))]", "def make_leftover_subrooms(self,externalrows,externalendpos):\n #~ oldrows=[r[:] for r in self.rows]\n restore={}\n for sq in self.allsqs:\n ori=self.local2global(sq)\n if externalrows[ori[1]][ori[0]] !=self.rows[sq[1]][sq[0]]:\n\n restore[sq]=self.rows[sq[1]][sq[0]]\n self.rows[sq[1]][sq[0]]=externalrows[ori[1]][ori[0]]\n endpos=(externalendpos[0]-self.xoffset,externalendpos[1]-self.yoffset)\n subrooms=self.make_subrooms_from_current_state(endpos=endpos)\n for sq, v in restore.items():\n self.rows[sq[1]][sq[0]]=v\n #restore!\n #~ if self.rows!=oldrows:\n #~ import ipdb;ipdb.set_trace();print 'ipdb!'\n #~ self.rows=oldrows\n return subrooms", "def EventSubsetDisplay( tubes, quantities, PMTFlatMapPositive, tubes_to_plot, title=\"Charge\", cutrange=[-1,-1], padding=10):\n PMTFlatMapPositive_values = [PMTFlatMapPositive[tube] for tube in tubes_to_plot]\n subset_x_values = np.array([value[0] for value in PMTFlatMapPositive_values])\n subset_y_values = np.array([value[1] for value in PMTFlatMapPositive_values])\n \n # set up dimensions for subset preimage with short tank data\n min_subplot_x_value = subset_x_values.min() - padding\n max_subplot_x_value = subset_x_values.max() + padding\n\n min_subplot_y_value = subset_y_values.min() - padding\n max_subplot_y_value = subset_y_values.max() + padding\n \n fig, ax= plt.subplots(figsize=[30,30])\n preimage = np.zeros( preimage_dimensions )\n\n subset_quantities = []\n for idx, tube in enumerate( tubes ):\n if cutrange[0] != cutrange[1]:\n if quantities[idx] < cutrange[0] or quantities[idx] > cutrange[1]:\n continue\n for dx in range(-3,4):\n for dy in range(-3,4):\n if abs(dx)==3 and abs(dy)==3:\n continue\n if tube in tubes_to_plot: \n #print( \"idx=\", idx, \" len(quantities)=\",len(quantities), \" tube=\", tube, \" len(PMTFlatMap)=\", len(PMTFlatMapPositive))\n preimage[ PMTFlatMapPositive[tube][1]+dx, PMTFlatMapPositive[tube][0]+dy ] = quantities[idx]\n subset_quantities.append(quantities[idx])\n \n subset_quantities = np.array(subset_quantities)\n\n imgmin = subset_quantities.min()\n imgmax = subset_quantities.max()\n \n if cutrange[0] != cutrange[1]:\n imgmin = cutrange[0]\n imgmax = cutrange[1]\n \n subset_image = preimage[min_subplot_y_value:max_subplot_y_value, min_subplot_x_value:max_subplot_x_value]\n \n im = ax.imshow( subset_image, extent = [min_subplot_x_value, max_subplot_x_value, min_subplot_y_value, max_subplot_y_value], vmin=imgmin, vmax=imgmax )\n\n fig.suptitle(title, fontsize=80)\n\n plt.rc('xtick', labelsize=24) \n plt.rc('ytick', labelsize=24) \n plt.xlabel('Distance CCW on perimeter from x-axis (cm)', fontsize=48)\n plt.ylabel('Y (cm)', fontsize=48)\n \n plt.set_cmap('gist_heat_r')\n\n # Create colourbar\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cbar = plt.colorbar(im, cax=cax)\n cbar.ax.tick_params(labelsize=24)\n\n # Fix title height\n plt.tight_layout()" ]
[ "0.5770418", "0.53267133", "0.52984023", "0.51812553", "0.51691467", "0.51118696", "0.5079057", "0.50481063", "0.50467324", "0.5036095", "0.50196743", "0.50149506", "0.49723238", "0.49606135", "0.49236315", "0.49137327", "0.48717156", "0.48612767", "0.48102915", "0.48091227", "0.47925404", "0.47831523", "0.47766387", "0.47659022", "0.47584647", "0.4756608", "0.47518867", "0.47471407", "0.47261307", "0.47094387", "0.4695153", "0.4688234", "0.46748146", "0.46568984", "0.46567643", "0.46559447", "0.46555662", "0.46495056", "0.46446243", "0.4637516", "0.46369466", "0.46028027", "0.45956427", "0.45885476", "0.45790836", "0.45541212", "0.45522085", "0.45481294", "0.45477602", "0.45438856", "0.45300287", "0.45244154", "0.45006514", "0.44791663", "0.44757265", "0.44750997", "0.4468127", "0.44662023", "0.4465492", "0.4460602", "0.44581324", "0.4457038", "0.4447134", "0.4437432", "0.44370288", "0.44357014", "0.4433136", "0.4418268", "0.44132933", "0.4407441", "0.440602", "0.4403397", "0.44020668", "0.43989766", "0.4391769", "0.43886364", "0.4387821", "0.43870395", "0.4386964", "0.43842483", "0.43823454", "0.4381226", "0.4379131", "0.4376073", "0.43759328", "0.43645537", "0.4362693", "0.43616852", "0.4360149", "0.4358821", "0.43574804", "0.4355461", "0.43550143", "0.43495682", "0.4347541", "0.43431553", "0.43374693", "0.4336362", "0.43359894", "0.4334443" ]
0.7655855
0
Tests a given genomic region for enrichment in insertions.
def test_region( insertions, # type: List[Insertion] reference_seq, # type: pyfaidx.Fasta region, # type: Tuple[str, int, int] pattern=None, # type: Optional[str] intervals=None, # type: Optional[Iterable[Tuple[str, int, int]]] total=None, # type: Optional[int] filters=None, # type: Optional[List[Callable]] insertion_trees=None # type: GenomicIntervalTree ): # type: (...) -> float if total is None: total = count_total( reference_seq, pattern=pattern, intervals=intervals) # Count pattern in region. region_count = count_region(reference_seq, region=region, pattern=pattern) # Sub-select insertions for region. if insertion_trees is None: insertion_trees = GenomicIntervalTree.from_objects_position( insertions, chrom_attr='seqname') region_ins = set(interval[2] for interval in insertion_trees.search(*region)) # Apply additional filter functions to insertions if given # (such as filtering on gene name/id for example). if filters is not None: for filter_func in filters: region_ins = set(ins for ins in region_ins if filter_func(ins)) # Calculate p-value. x = len(list(region_ins)) mu = len(insertions) * (region_count / total) # Note here we use loc=1, because we are interested in # calculating P(X >= x), not P(X > x) (the default # surivival function). p_val = poisson.sf(x, mu=mu, loc=1) # type: float return p_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_build_genomic_regions(self):\n\n CDS = pybedtools.BedTool(\"\"\"chr1\\t7700\\t7900\\tfoo\\t0\\t+\\n\n chr1\\t7999\\t8500\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR5 = pybedtools.BedTool(\"\"\"chr1\\t7499\\t7700\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR3 = pybedtools.BedTool(\"\"\"chr1\\t8500\\t9000\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n proxintron = pybedtools.BedTool(\"\"\"chr1\\t100\\t300\\tfoo\\t0\\t+\\n\n chr1\\t798\\t998\\tfoo\\t0\\t+\\n\n chr1\\t2000\\t2200\\tfoo\\t0\\t+\\n\n chr1\\t2798\\t2998\\tfoo\\t0\\t+\\n\n chr1\\t6000\\t6200\\tfoo\\t0\\t+\\n\n chr1\\t6798\\t6998\\tfoo\\t0\\t+\\n\n chr1\\t7900\\t7998\\tfoo\\t0\\t+\\n\"\"\", from_string = True\n )\n distintron = pybedtools.BedTool(\"\"\"chr1\\t301\\t797\\tfoo\\t0\\t+\\n\n chr1\\t2201\\t2797\\tfoo\\t0\\t+\\n\n chr1\\t6201\\t6797\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n \n regions = build_genomic_regions(pybedtools.BedTool(clipper.test_file(\"test.gtf\")), prox_distance=200) \n \n #print UTR3\n\n #print regions['UTR3']\n print proxintron\n print regions['proxintron']\n #print regions['distintron']\n \n self.assertEqual(len(CDS.intersect(regions['CDS'], f= 1.0, r = True)), 2)\n self.assertEqual(len(UTR5.intersect(regions['UTR5'], f= 1.0, r = True)), 1)\n self.assertEqual(len(UTR3.intersect(regions['UTR3'], f= 1.0, r = True)), 1)\n self.assertEqual(len(proxintron.intersect(regions['proxintron'], f= 1.0, r = True)), 7)\n self.assertEqual(len(distintron.intersect(regions['distintron'], f= 1.0, r = True)), 3)", "def test_signal_regions(i07_nexus: I07Nexus, regions):\n # Note: this should probably always be a for loop with just 1 iteration.\n for i, _ in enumerate(regions):\n assert i07_nexus.signal_regions[i] == regions[i]", "async def test_genomic_insertion(test_handler, genomic_insertion,\n grch38_genomic_insertion):\n resp = await test_handler.normalize(\"NC_000017.10:g.37880993_37880994insGCTTACGTGATG\") # noqa: E501\n assertion_checks(resp.variation_descriptor, grch38_genomic_insertion,\n \"NC_000017.10:g.37880993_37880994insGCTTACGTGATG\")\n\n fixture_id = \\\n \"normalize.variation:NC_000017.10%3Ag.37880993_37880994insGCTTACGTGATG\"\n resp = await test_handler.normalize(\"17-37880993-G-GGCTTACGTGATG\")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:17-37880993-G-GGCTTACGTGATG\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, grch38_genomic_insertion,\n \"17-37880993-G-GGCTTACGTGATG\")\n\n resp = await test_handler.normalize(\n \"ERBB2 g.37880993_37880994insGCTTACGTGATG\")\n assert resp.variation_descriptor.id ==\\\n \"normalize.variation:ERBB2%20g.37880993_37880994insGCTTACGTGATG\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, genomic_insertion,\n \"ERBB2 g.37880993_37880994insGCTTACGTGATG\")", "def test_regions(self):\n for i, item in enumerate(self._letters_proto.item):\n for code in item.region:\n # Region codes should be alpha-2 (where possible) or alpha-3 codes as\n # defined by ISO 3166 standard.\n self.assertLess(1, len(code))\n self.assertGreater(4, len(code))\n self.assertTrue(code.isupper(), f'Letter {i}: Region code `{code}` '\n 'should be upper-case')\n if len(code) == 3:\n country = pycountry.countries.get(alpha_3=code)\n self.assertTrue(country, f'Failed to find country for code `{code}`')\n if hasattr(country, 'alpha_2'):\n self.fail(f'Letter {i}: Please use two-letter code '\n f'`{country.alpha_2}` instead of `{country.alpha_3}` '\n f'for {country.name}')\n else:\n country = pycountry.countries.get(alpha_2=code)\n self.assertTrue(country, f'Failed to find country for code {code}')", "def test_bad_region():\n ref_file = pkg_resources.resource_filename('m260b.test_data', 'ref_practice_W_1_chr_1.fasta')\n read_file = pkg_resources.resource_filename('m260b.test_data', 'practice_w_1.std.bad_region1.bam')\n ref_hdr, reference = read_basic_fasta(ref_file) \n read_iter = pysam.Samfile(read_file)\n chr = ref_hdr[1:].strip()\n areg = list(active_regions(read_iter, reference, chr, start_offset=0, flank=30, dfrac=1.0))\n found = False\n for region, reads in areg:\n found |= region.start <= 5769 <= region.stop\n if not found:\n raise ValueError('Window did not open around variant')", "def _process_region(self, region, writer):", "def test_avalanche_warning_by_region_simple(self):\n pass", "def test_ith_region_nxs_01(i07_nexus_object_01: I07Nexus,\n i, ith_region):\n assert i07_nexus_object_01._get_ith_region(i) == ith_region", "def test_assign_to_regions(self):\n \n tool = pybedtools.BedTool(clipper.test_file(\"FOX2Brain-05.15.09.polyATrim.adapterTrim.rmRep.sorted.rmDup.peaks.bed\"))\n \n assign_to_regions(tool=tool, \n clusters=\"test\", \n speciesFA= clipper.test_file(\"mm9.fa\"), \n regions_dir=os.path.join(clipper.test_dir(), \"regions\"), \n regions={\"exons\" : \"Exon\", \"utr3\" : \"3' UTR\", \n \"utr5\" : \"5' UTR\", \"proxintron500\" : \"Proximal Intron\", \n \"distintron500\" : \"Distal Intron\"} ,\n assigned_dir = clipper.test_dir(),\n fasta_dir = clipper.test_dir(),\n species=\"mm9\", \n nrand = 3, \n getseq=False)", "def is_in_region(location, region):\n c= count_hits_region(location, region)\n if c%2==1:\n return True\n else:\n return False", "def assertRegionsEqual(self, expected_region, actual_region, msg=None):\n if (expected_region.size() == 1) and (actual_region.size() == 1):\n expected_region = _make_region(self.view, expected_region.begin(), expected_region.end())\n actual_region = _make_region(self.view, actual_region.begin(), actual_region.end())\n self.assertEqual(expected_region, actual_region, msg)", "def test_ensure_coverage_works_on_edition(self):\n edition = self._edition()\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n record = provider.ensure_coverage(edition)\n assert isinstance(record, CoverageRecord)\n assert edition.primary_identifier == record.identifier", "def check_region(deepconsensus_input: deepconsensus_pb2.DeepConsensusInput,\n species: str,\n contig_chrom: Dict[str, str]) -> Tuple[bool, bool, bool]:\n\n # Eval set contains only molecules that start and end within the bounds.\n # Train set contains only molecules that are entirely outside of the bounds.\n # Based on this logic, molecules that span the training and eval regions\n # will be thrown out entirely.\n\n if species == 'ecoli':\n assert 'ecoli' in deepconsensus_input.chrom_name\n in_train_region = between(deepconsensus_input.chrom_start, *\n dc_constants.ECOLI_REGIONS['TRAIN']) and between(\n deepconsensus_input.chrom_end, *\n dc_constants.ECOLI_REGIONS['TRAIN'])\n in_eval_region = between(deepconsensus_input.chrom_start, *\n dc_constants.ECOLI_REGIONS['EVAL']) and between(\n deepconsensus_input.chrom_end, *\n dc_constants.ECOLI_REGIONS['EVAL'])\n in_test_region = between(deepconsensus_input.chrom_start, *\n dc_constants.ECOLI_REGIONS['TEST']) and between(\n deepconsensus_input.chrom_end, *\n dc_constants.ECOLI_REGIONS['TEST'])\n\n elif species == 'human':\n assert 'ecoli' not in deepconsensus_input.chrom_name\n # Resolve the chrom name for each contig\n chrom_name = contig_chrom.get(deepconsensus_input.chrom_name,\n deepconsensus_input.chrom_name)\n in_train_region = chrom_name in dc_constants.HUMAN_TRAIN_REGIONS\n in_eval_region = chrom_name in dc_constants.HUMAN_EVAL_REGIONS\n in_test_region = chrom_name in dc_constants.HUMAN_TEST_REGIONS\n\n else:\n raise ValueError(\n f\"Invalid species: {species}. Must be either 'human' or 'ecoli.'\")\n\n return in_train_region, in_eval_region, in_test_region", "def region_gene_overlap(\n region_pr,\n gene_bed,\n up=100_000,\n down=100_000,\n):\n genes = pr.read_bed(gene_bed)\n # Convert to DataFrame & we don't need intron/exon information\n genes = genes.as_df().iloc[:, :6]\n\n # Get the TSS only\n genes.loc[genes[\"Strand\"] == \"+\", \"End\"] = genes.loc[\n genes[\"Strand\"] == \"+\", \"Start\"\n ]\n genes.loc[genes[\"Strand\"] == \"-\", \"Start\"] = genes.loc[\n genes[\"Strand\"] == \"-\", \"End\"\n ]\n\n # Extend up and down\n genes.loc[genes[\"Strand\"] == \"+\", \"Start\"] -= up\n genes.loc[genes[\"Strand\"] == \"+\", \"End\"] += down\n genes.loc[genes[\"Strand\"] == \"-\", \"Start\"] -= down\n genes.loc[genes[\"Strand\"] == \"-\", \"End\"] += up\n\n # Perform the overlap\n genes = pr.PyRanges(genes)\n genes = genes.join(region_pr).as_df()\n\n return genes", "def test_bkg_regions(i07_nexus: I07Nexus, regions):\n for i, _ in enumerate(regions):\n assert i07_nexus.background_regions[i] == regions[i]", "def test_job_region(self):\n inv_search = 'region:EU not region:Europe'\n spi_search = 'find region EU not continent Europe'\n self._compare_searches(inv_search, spi_search)", "def test_avalanche_warning_by_region_obs(self):\n pass", "def test_signal_regions_len(i07_nexus, regions):\n assert len(i07_nexus.signal_regions) == len(regions)", "def test_e2e(self):\n\n # Make segmentation & regions file\n seg = get_temp_file_name(extension='gtf')\n out_dir = get_temp_dir()\n iCount.genomes.segment.get_segments(self.gtf, seg, self.fai)\n iCount.genomes.segment.make_regions(seg, out_dir)\n regions = os.path.join(out_dir, iCount.genomes.segment.REGIONS_FILE)\n\n # Build STAR index:\n genome_index = get_temp_dir()\n rcode = iCount.externals.star.build_index(self.fasta, genome_index, annotation=self.gtf)\n self.assertEqual(rcode, 0)\n # Map reads:\n map_dir = get_temp_dir()\n rcode = iCount.externals.star.map_reads(\n self.reads, genome_index, out_dir=map_dir, annotation=self.gtf)\n self.assertEqual(rcode, 0)\n\n # Get bam with mapped reads:\n bam = [fname for fname in os.listdir(map_dir) if fname.startswith('Aligned')][0]\n bam = os.path.join(map_dir, bam)\n pysam.index(bam) # pylint:disable=no-member\n\n sites_single = get_temp_file_name(extension='bed.gz')\n sites_multi = get_temp_file_name(extension='bed.gz')\n skipped = get_temp_file_name(extension='bam')\n iCount.mapping.xlsites.run(bam, sites_single, sites_multi, skipped)\n\n iCount.analysis.rnamaps.run(sites_single, regions)", "def test_avalanche_warning_by_region_detail(self):\n pass", "def _sample_regions(region_rois, gt_regions, voc_sign):\n # overlaps: (rois x gt_regions)\n overlaps_gt = bbox_overlaps(\n np.ascontiguousarray(region_rois[:, 1:5], dtype=np.float),\n np.ascontiguousarray(gt_regions[:, :4], dtype=np.float))\n # gt_assignment = overlaps_gt.argmax(axis=1)\n max_overlaps_gt = overlaps_gt.max(axis=1)\n # labels = gt_regions[gt_assignment, 4:]\n fg_inds = np.where(max_overlaps_gt >= cfg.TRAIN.FG_THRESH_REGION)[0]\n bg_inds = np.where(\n (max_overlaps_gt < cfg.TRAIN.BG_THRESH_HI_REGION) & (max_overlaps_gt >= cfg.TRAIN.BG_THRESH_LO_REGION))[0]\n\n # ## Debug Codes\n # print('fg: {} v.s. bg:{}'.format(len(fg_inds), len(bg_inds)))\n # gt_hit_overlap = overlaps_gt.max(axis=0)\n # hit_ids = np.unique(np.where(gt_hit_overlap >= cfg.TRAIN.FG_THRESH_REGION)[0])\n # print('Recall: {} ({}/{})'.format(\n # float(len(hit_ids)) / len(gt_regions), len(hit_ids), len(gt_regions)))\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Select sampled values from various arrays:\n labels = np.ones((len(keep_inds), gt_regions.shape[1] - 4), dtype=np.int64) * voc_sign['end']\n # Here we randomly select regions overlapped with proposed ROI more than 0.7\n gt_assignment = np.zeros(len(fg_inds), dtype=np.int64)\n for i in range(len(fg_inds)):\n gt_assignment[i] = npr.choice(np.where(overlaps_gt[fg_inds[i]] > cfg.TRAIN.FG_THRESH_REGION)[0], size=1)\n labels[i] = gt_regions[gt_assignment[i], 4:]\n\n # add start label to background and padding them with <end> sign\n labels[len(fg_inds):, 0] = voc_sign['start']\n rois = region_rois[keep_inds]\n\n targets_fg = bbox_transform(rois[:len(fg_inds), 1:5], gt_regions[gt_assignment, :4])\n bbox_inside_weights_fg = np.ones(targets_fg.shape, dtype=np.float32) * cfg.TRAIN.BBOX_INSIDE_WEIGHTS\n targets_bg = np.zeros((bg_inds.size, targets_fg.shape[1]), dtype=np.float32)\n bbox_inside_weight_bg = np.zeros(targets_bg.shape, dtype=np.float32)\n bbox_targets = np.vstack([targets_fg, targets_bg])\n bbox_inside_weight = np.vstack([bbox_inside_weights_fg, bbox_inside_weight_bg])\n\n return labels, bbox_targets, bbox_inside_weight, keep_inds", "def createSubdivRegion(*args, **kwargs)->bool:\n pass", "def establecer_region(self, region, guess, delta_ppm=(1,1)): \r\n # obtengo los indices del centro del pico.\r\n xc, yc = self.encontrar_picos(guess, delta_ppm)\r\n # obtengo las coordenadas que determinan el rectangulo donde voy a\r\n # integrar. \r\n x_lims, y_lims = self.establecer_limites(xc, yc)\r\n \r\n xi,xf = x_lims\r\n yi,yf = y_lims\r\n spec = self.spec[yi:yf, xi:xf]\r\n ppmGridDir = self.ppmGridDir[yi:yf, xi:xf]\r\n ppmGridInd = self.ppmGridInd[yi:yf, xi:xf]\r\n \r\n \r\n n, m = region\r\n self.regiones[n][m] = Region(ppmGridDir, ppmGridInd, spec)", "def test_ctgs(\n insertions, # type: List[Insertion]\n reference, # type: Reference\n gene_ids=None, # type: Set[str]\n chromosomes=None, # type: Set[str]\n pattern=None, # type: str\n per_sample=True, # type: bool\n window=None #type: Tuple[int, int]\n):\n\n # Default to shared chromosome sequences (typically drops some\n # of the more esoteric extra scaffold/patch sequences).\n if chromosomes is None:\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n reference_gtf = GtfIterator(reference.indexed_gtf_path)\n\n chromosomes = list(\n set(reference_seq.keys()) & set(reference_gtf.contigs))\n\n if len(chromosomes) == 0:\n ValueError('No chromosomes are shared between the reference '\n 'sequence and reference gtf files')\n\n if len(chromosomes) == 0:\n raise ValueError('At least one chromosome must be given')\n\n # Determine gene windows using GTF.\n logging.info('Generating gene windows')\n gene_windows = _build_gene_windows(\n reference.indexed_gtf_path, window=window, chromosomes=chromosomes)\n\n # Subset insertions to gene intervals.\n insertions = _subset_to_windows(insertions, gene_windows)\n\n if gene_ids is None:\n gene_ids = set(ins.metadata['gene_id'] for ins in insertions)\n\n # Collapse insertions per gene/sample (recommended).\n # Corrects for hopping/multiple detection issues.\n if per_sample:\n logging.info('Collapsing insertions')\n insertions = list(_collapse_per_sample(insertions))\n\n # Calculate total number of pattern occurrences within intervals.\n logging.info('Counting pattern occurrences')\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n\n total = count_total(\n reference_seq, pattern=pattern, intervals=gene_windows.values())\n\n # Calculate p-values for each gene.\n logging.info('Calculating significance for genes')\n insertion_trees = GenomicIntervalTree.from_objects_position(\n insertions, chrom_attr='seqname')\n\n p_values = {\n gene_id: test_region(\n insertions=insertions,\n reference_seq=reference_seq,\n region=gene_windows[gene_id],\n total=total,\n pattern=pattern,\n filters=[lambda ins, gid=gene_id: ins.metadata['gene_id'] == gid],\n insertion_trees=insertion_trees)\n for gene_id in gene_ids\n }\n\n # Build result frame.\n result = pd.DataFrame.from_records(\n iter(p_values.items()), columns=['gene_id', 'p_value'])\n\n # Calculate corrected p-value using bonferroni correction.\n result['q_value'] = (result['p_value'] * len(result)).clip_upper(1.0)\n\n # Sort by q-value and p-value.\n result.sort_values(by=['q_value', 'p_value'], inplace=True)\n\n if len(insertions) > 0:\n # Annotate with gene_name if possible.\n if 'gene_name' in insertions[0].metadata:\n name_map = {\n ins.metadata['gene_id']: ins.metadata['gene_name']\n for ins in insertions\n }\n result.insert(1, 'gene_name', result['gene_id'].map(name_map))\n else:\n result['gene_name'] = np.nan\n\n # Annotate with frequency.\n frequency = (Insertion.to_frame(insertions)\n .groupby('gene_id')['sample'].nunique()\n .reset_index(name='n_samples'))\n result = pd.merge(result, frequency, on='gene_id', how='left')\n else:\n result['gene_name'] = np.nan\n result['n_samples'] = np.nan\n\n return result", "def __test_region(self, bk):\n for arg in self.args['region']:\n ds = ArgoDataFetcher(backend=bk).region(arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True", "def geneExonicRegions(self, df):\n scaffold = df.iloc[0].scaffold\n strand = df.iloc[0].strand\n gene_type = df.iloc[0].gene_type\n gene_id = df.iloc[0].gene_id\n gene_name = df.iloc[0].gene_name\n start = df.start.min()\n end = df.end.max()\n bp = [False] * (end - start + 1)\n for i in range(df.shape[0]):\n s = df.iloc[i]['start'] - start\n e = df.iloc[i]['end'] - start + 1\n bp[s:e] = [True] * (e - s)\n regions = list(range(start, end + 1))\n groups = []\n\n for i, j in groupby(bp):\n groups.append((i, len(list(j))))\n e_start = 0\n\n for i in groups:\n e_end = e_start + i[1]\n if i[0]:\n record = Record(scaffold=scaffold, start=regions[e_start],\n end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,\n gene_name=gene_name, strand=strand)\n yield record\n e_start += i[1]", "def region(self, gnuc_beg, gnuc_end):\n # check if gnuc_beg and gnuc_end are inside the genomic region\n pexon = None\n overlapping_exons = []\n for exon in self.exons:\n if (exon[0] <= gnuc_beg and exon[1] >= gnuc_end):\n _cds_beg = min(self.cds_beg, self.cds_end)\n _cds_end = max(self.cds_beg, self.cds_end)\n\n if gnuc_beg > _cds_beg and gnuc_end < _cds_end:\n return 'Coding'\n elif gnuc_beg < _cds_beg and gnuc_end < _cds_beg:\n return \"5'UTR\" if self.strand == '+' else \"3'UTR\"\n elif gnuc_beg > _cds_end and gnuc_end > _cds_end:\n return \"3'UTR\" if self.strand == '+' else \"5'UTR\"\n elif gnuc_beg < _cds_beg:\n return \"5'UTR;coding\" if self.strand == '+' else \"3'UTR;coding\"\n elif gnuc_end > _cds_end:\n return \"coding;3'UTR\" if self.strand == '+' else \"coding;5'UTR\"\n else:\n return \"Unknown\"\n if exon[0] >= gnuc_beg and exon[0] <= gnuc_end:\n overlapping_exons.append(exon)\n if pexon and gnuc_beg > pexon[1] and gnuc_end < exon[0]:\n return 'Intronic'\n pexon = exon\n\n if overlapping_exons:\n return 'Intronic;Exonic'\n else:\n return 'Unknown'", "def test_region_check(self):\n reference = {'region': 'reference'}\n target = {'region': 'target'}\n\n # Check that IOError is raised for nonmatching regions\n self.assertRaises(IOError, librad_drift.RadiometricDrift.check_fields, reference, target)\n\n # Check no error raised if regions match\n librad_drift.RadiometricDrift.check_fields(reference, reference)", "def test_center_region(self):\n before_b = \"\"\"\\\n Some 90% of all presidentially declared disasters are weather related,\n leading to around 500 deaths per year and nearly $14 billion in damage.\n StormReady, a program started in 1999 in Tulsa, OK,\n helps arm America's communities with the communication and safety\n skills needed to save lives and property– before and during the event.\n StormReady helps community leaders and emergency managers strengthen local safety programs.\n \"\"\"\n after_b = \"\"\"\\\n Some 90% of all presidentially declared disasters are weather related,\n leading to around 500 deaths per year and nearly $14 billion in damage.\n StormReady, a program started in 1999 in Tulsa, OK,\n helps arm America's communities with the communication and safety\n skills needed to save lives and property– before and during the event.\n StormReady helps community leaders and emergency managers strengthen local safety programs.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"7.0\"),\n after_sel=(\"1.0\", \"7.0\"),\n command_name=\"center-region\",\n directives=\"@pagewidth 70\",\n )", "def validate_det1_region(regfile):\n err=-1\n import regions\n# from regions.io.ds9.read import DS9Parser\n from regions import Regions\n assert os.path.isfile(regfile), f'{regfile} does not exist!'\n \n# with open(regfile) as fh: \n# region_string = fh.read()\n# parser = DS9Parser(region_string)\n# assert parser.coordsys == 'image', \\\n# f'Region coordinate system is {parser.coordsys}, not image!'\n\n reg = Regions.read(regfile)\n\n\n # Check and make sure this is a \"pixel\" region and not a \"sky\" region\n\n assert 'Pixel' in f'{type(reg[0])}', \\\n f'Region coordinate system is not image coordinates for {regfile}\\n'\n\n # Check to make sure tha the first region in the file is an \"include\" region\n for ri in reg:\n assert ri.meta['include'] is True, \\\n f'\\n {regfile} has an exclusion region first! \\n Put the source region first instead!'\n break", "def test_contains(self):\n r = self.RNA(\"UCA\")\n assert \"U\" in r\n assert \"CA\" in r\n assert \"X\" not in r\n assert \"G\" not in r", "def test_count_region(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.4\", \"4.8\"),\n after_sel=(\"2.4\", \"4.8\"),\n command_name=\"count-region\",\n )", "def test_four_square_regions(self):\n\n vertices_by_region = np.array([[0, 1, 2, 3], [1, 2, 7, 8], [2, 5, 6, 7], [2, 3, 4, 5]])\n centers_by_region = np.array([[1, 1], [3, 1], [3, 3], [1, 3]])\n vertices = np.array([[0, 0], [2, 0], [2, 2], [0, 2], [0, 4], [2, 4], [4, 4], [4, 2], [4, 0]])\n world = data.convert_to_world(vertices_by_region, centers_by_region, vertices)\n\n self.assertEqual({0: {0}, 1: {0, 1}, 2: {0, 1, 2, 3}, 3: {0, 3}, 4: {3}, 5: {2, 3}, 6: {2}, 7: {1, 2}, 8: {1}},\n world.regions_touching_vertex)\n self.assertEqual({0: {1, 3}, 1: {0, 2, 8}, 2: {1, 3, 5, 7}, 3: {0, 2, 4},\n 4: {3, 5}, 5: {2, 4, 6}, 6: {5, 7}, 7: {2, 6, 8}, 8: {1, 7}},\n world.vertices_touching_vertex)", "def test__init(self):\n for chromosome in ['chr1', 'chromosome_2', 'chrom3', 'a', 'adfads', '100', 'scaffold_88']:\n for strand in ['+','-']:\n for position in [1,2,5,100,10000,4323423]:\n ins_pos_5prime = Insertion_position(chromosome,strand,position_before=position)\n ins_pos_3prime = Insertion_position(chromosome,strand,position_after=position)\n # test \"normal\" mutants - check all details, including position\n mutant_5prime = Insertional_mutant(insertion_position=ins_pos_5prime)\n mutant_3prime = Insertional_mutant(insertion_position=ins_pos_3prime)\n mutant_readcount_only = Insertional_mutant_readcount_only()\n mutant_multi_dataset = Insertional_mutant_multi_dataset(insertion_position=ins_pos_5prime)\n # test position details (only for the two \"normal\" mutants)\n assert mutant_5prime.position.min_position == position\n assert mutant_3prime.position.min_position == position-1\n assert mutant_5prime.position.max_position == position+1\n assert mutant_3prime.position.max_position == position\n assert mutant_5prime.position.full_position == \"%s-?\"%(position)\n assert mutant_3prime.position.full_position == \"?-%s\"%position\n # test non-readcount-related info for all mutants except mutant_readcount_only\n for mutant in [mutant_5prime, mutant_3prime, mutant_multi_dataset]:\n assert mutant.position.chromosome == chromosome\n assert mutant.position.strand == strand\n assert mutant.gene == SPECIAL_GENE_CODES.not_determined\n assert mutant.orientation == '?'\n assert mutant.gene_feature == '?'\n assert mutant.gene_distances == '?'\n # test readcount-related info for all mutants except mutant_multi_dataset\n for mutant in [mutant_5prime, mutant_3prime, mutant_readcount_only]:\n assert mutant.total_read_count == 0\n assert mutant.perfect_read_count == 0\n assert mutant.sequences_counts_positions_errors == {}\n # test readcount-related info for mutant_multi_dataset\n assert all([x.total_read_count == 0 for x in mutant_multi_dataset.by_dataset.values()])\n assert all([x.perfect_read_count == 0 for x in mutant_multi_dataset.by_dataset.values()])\n assert all([x.sequences_counts_positions_errors == {} for x in mutant_multi_dataset.by_dataset.values()])", "def testEnsemblToGeneFile(self):\n\n e2g = EnsemblToGeneFile(self.enstogenefile)\n\n self.assertTrue(e2g)\n\n self.assertTrue(len(e2g.geneids) == 38803)\n self.assertTrue(len(e2g.tranids) == 94647)", "def test_countries_regions_created(self):\n country_existing = CountryFactory(\n name=iso3166.countries.get('France').name,\n numeric=iso3166.countries.get('France').numeric,\n alpha_3=iso3166.countries.get('France').alpha3,\n )\n region_existing = RegionFactory(name='Existing Region')\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n # The Countries and Regions have been assigned to the correct PowerPlants and Projects\n self.assertEqual(Country.objects.count(), 4)\n self.assertEqual(Region.objects.count(), 3)\n greece = Country.objects.get(name='Greece')\n china = Country.objects.get(name='China')\n norway = Country.objects.get(name='Norway')\n mediterranean = Region.objects.get(name='Gulf and Mediterranean')\n northeast_asia = Region.objects.get(name='Northeast Asia')\n self.assertEqual(set(powerplant_ouessant.countries.all()), set([country_existing]))\n self.assertEqual(set(powerplant_ouessant.regions.all()), set([region_existing]))\n self.assertEqual(set(project_ouessant1.countries.all()), set([country_existing]))\n self.assertEqual(set(project_ouessant1.regions.all()), set([region_existing]))\n self.assertEqual(set(project_ouessant1.countries.all()), set([country_existing]))\n self.assertEqual(set(project_ouessant1.regions.all()), set([region_existing]))\n self.assertEqual(set(powerplant_ilarionas.countries.all()), set([greece]))\n self.assertEqual(set(powerplant_ilarionas.regions.all()), set([mediterranean]))\n self.assertEqual(set(project_liaoning.countries.all()), set([china]))\n self.assertEqual(set(project_liaoning.regions.all()), set([northeast_asia]))\n self.assertEqual(set(powerplant_tonstad.countries.all()), set([norway]))\n self.assertEqual(set(powerplant_tonstad.regions.all()), set([region_existing]))", "def check_has_regions(seq):\n for j in range(len(seq)):\n seq[j]['has_entries'] = 0\n if int(seq[j]['entry_count']) != 0 and int(seq[j]['is_country']) != 1:\n seq[j]['has_entries'] = 1\n\n if int(seq[j]['entry_count']) != 0 and ('regions' not in seq[j] or 'children' not in seq[j]) :\n seq[j]['has_entries'] = 1\n if 'children' in seq[j]:\n seq[j]['has_children'] = 1\n else:\n seq[j]['has_children'] = 0\n return seq", "def test_verify_insert(self):\n self._verify([self.applied_commands['insert']])", "def region_growing(imOr,reg,area,prof,conn,precision):\n\tif prof:\n\t\tif area > 0.085:\n\t\t\treg1 = morph('erode',reg,25)\n\t\t\tif reg1.max() == 1.0:\n\t\t\t\treg = reg1\n\t\t\telse:\n\t\t\t\treg1 = morph('erode',reg,15)\n\t\t\t\tif reg1.max() == 1.0:\n\t\t\t\t\treg = reg1\n\telse:\n\t\tif area > 0.15:\n\t\t\treg1 = morph('erode',reg,15)\n\t\t\tif reg1.max() == 1.0:\n\t\t\t\treg = reg1\n\n\telementos = contar (reg,1)\n\tseguir = True\n\twhile seguir:\n\t\treg = cv2.convertScaleAbs(reg)\n\t\t_,contours, h = cv2.findContours(reg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\t\treg[reg != 0] = 1\n\t\tmedia_region = sum(imOr[reg==1])/max(cv2.contourArea(contours[0]),0.001)\n\n\t\tfor elemento in contours[0]:\n\t\t\tif prof:\n\t\t\t\treg = expand(elemento,reg,imOr,media_region,precision/2,conn)\n\t\t\telse:\n\t\t\t\treg = expand(elemento,reg,imOr,media_region,precision,conn)\n\t\telementos_nuevo = contar (reg,1)\n\t\tif elementos == elementos_nuevo:\n\t\t\tseguir = False\n\t\telementos = elementos_nuevo\n\tse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))\n\treg = cv2.dilate(reg.astype(np.float32),se,iterations = 3)\n\tse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))\n\treg = cv2.erode(reg.astype(np.float32),se,iterations = 2)\n\n\treturn reg", "def try_insert_genome(self, genome):\n raise Exception(\"called abstract insert_genome method\")", "def test_genome():\n assert \"a\" in GENOME\n assert \"A\" in GENOME\n assert \"1\" in GENOME\n assert \" \" in GENOME\n assert type(GENOME) == list", "def contains(self, region):\n region = as_region(region)\n\n if region.chromosome != self.chromosome:\n return False\n\n if region.start >= self.start and region.end <= self.end:\n return True\n return False", "def insert_chromosome(mutated_genome):\n index = random.randint(0,len(mutated_genome))\n if color_mode == 'RGB':\n color_red = random.randint(0,255)\n color_green = random.randint(0,255)\n color_blue = random.randint(0,255)\n color = (color_red, color_blue, color_green)\n else: #color_mode == 'L':\n color = random.randint(0,255)\n opacity = random.randint(0,255)\n points = []\n mutated_genome.insert(index, [color,opacity,points])", "def test_valid_genes_file(self):\n\n # Create a valid genes file\n valid_genes_file = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), \"data\", \"valid_genes_file.bed\")\n\n ref_name = \"ref1\"\n\n genes = {\"gene1\": {\"start\": 0, \"end\": 100},\n \"gene 2\": {\"start\": 101, \"end\": 200}, # Spaces are allowed in the gene name\n \"gene3\": {\"start\": 201, \"end\": 300}}\n\n with open(valid_genes_file, \"w+\") as f:\n for gene in genes:\n f.write(\"%s\\t%s\\t%s\\t%s\\n\" % (ref_name, genes[gene][\"start\"],\n genes[gene][\"end\"], gene))\n\n parsed_genes = parse_genes_file(valid_genes_file, ref_name)\n\n for gene in parsed_genes:\n assert gene in genes\n assert parsed_genes[gene][\"start\"] == genes[gene][\"start\"]\n assert parsed_genes[gene][\"end\"] == genes[gene][\"end\"]\n assert parsed_genes[gene][\"frame\"] == genes[gene][\"start\"] % 3\n\n os.remove(valid_genes_file)", "def test_genomic(self):\n self.c.execute(\"\"\"select expIds,expScores from genomic_test\"\"\")\n rows = self.c.fetchall()\n self.assertEqual(len(rows), 1) # one probe\n self.assertEqual(rows[0][0], '0,1,2,3,4') # ordered by sample id\n values = map(lambda x: float(x), rows[0][1].split(',')) # scores are in correct order\n self.assertTrue(values[0] - 0.479005065149792 < self.tolerance)\n self.assertTrue(values[1] - 25.1 < self.tolerance)\n self.assertTrue(values[2] - 5.3 < self.tolerance)\n self.assertTrue(values[3] - 3.1 < self.tolerance)\n self.assertTrue(values[4] - -1.23 < self.tolerance)", "def _import_insee_region(self, cr, uid, ids, data_dir, context=None):\n if context is None:\n context = {}\n filepath = os.path.abspath(os.path.join(data_dir, 'reg2011.csv'))\n region_obj = self.pool.get('insee.region')\n with open(filepath, 'rb') as regfile:\n reader = csv.DictReader(regfile)\n for row in reader:\n values = {\n 'region': row['REGION'],\n 'cheflieu': row['CHEFLIEU'],\n 'tncc': row['TNCC'],\n 'ncc': row['NCC'],\n 'nccenr': row['NCCENR'],\n }\n region_obj.create(cr, uid, values, context=context)", "def annotateRegions(self, regions, annotations):\n\t\t\n\t\tregionsDict = dict()\n\t\tregionsDict['chr1'] = regions[:,0]\n\t\tregionsDict['s1'] = regions[:,1]\n\t\tregionsDict['e1'] = regions[:,2]\n\t\tregionsDict['chr2'] = regions[:,3]\n\t\tregionsDict['s2'] = regions[:,4]\n\t\tregionsDict['e2'] = regions[:,5]\n\t\t\n\t\t#merge the regions and annotations\n\t\tannotatedRegions = dict(regionsDict.items() + annotations.items())\n\t\t\n\t\treturn annotatedRegions", "def regions(self):\n\n class RegionIter(object):\n def __init__(self, region_based):\n self._region_based = region_based\n\n def __len__(self):\n return self._region_based._region_len()\n\n def __iter__(self):\n return self()\n\n def _fix_chromosome(self, regions):\n for r in regions:\n r.fix_chromosome(copy=True)\n\n def __call__(self, key=None, *args, **kwargs):\n fix_chromosome = kwargs.pop('fix_chromosome', False)\n\n if key is None:\n iterator = self._region_based._region_iter(*args, **kwargs)\n else:\n if isinstance(key, string_types) or isinstance(key, GenomicRegion):\n iterator = self._region_based.region_subset(key, *args, **kwargs)\n else:\n iterator = self._region_based._get_regions(key, *args, **kwargs)\n\n if fix_chromosome:\n return self._fix_chromosome(iterator)\n else:\n return iterator\n\n def __getitem__(self, item):\n if isinstance(item, string_types) or isinstance(item, GenomicRegion):\n return self._region_based.region_subset(item)\n return self._region_based._get_regions(item)\n\n return RegionIter(self)", "def check_regions(self, timestamp, bid, ofr, signal, allow=True):\n if self.regions:\n mutated = False\n\n # one ore many region, have to pass at least one test\n for region in self.regions:\n if region.can_delete(timestamp, bid, ofr):\n mutated |= True\n\n elif region.test_region(timestamp, signal):\n # match with at least one region\n return True\n\n if mutated:\n self.cleanup_regions(timestamp, bid, ofr)\n\n return False\n else:\n # no region always pass\n return allow", "def test_api_region(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['regions']).json()\n r = requests.get(r['regions'][0]['url']).json()\n self.assertIn('html', r)\n self.assertIn('id', r)\n self.assertIn('name', r)\n self.assertIn('url', r)\n self.assertIn('rivers', r)\n self.assertIn('sections', r)\n self.assertIn('gages', r)", "def filter_to_region(node, contig=None, coords=None):\n ((seq, coord), miss) = node\n if contig and seq != contig:\n return False\n if coords and coord < coords[0]:\n return False\n if coords and coord > coords[1]:\n return False\n return True", "def test_get_genome_8(self):\n self.tkt1.data_add = set([\"accession\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.accession, \"ABC123\")", "def _equals(self, region):\n\n region = as_region(region)\n\n if region.chromosome != self.chromosome:\n return False\n if region.start != self.start:\n return False\n if region.end != self.end:\n return False\n if region.strand != self.strand:\n return False\n return True", "def test_get_genome_6(self):\n self.tkt1.data_add = set([\"annotation_author\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_author, 1)", "def test_RNA_position_placement(self):\n \n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 + 60 60\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"+\", \"regions\" : [(0,100),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.60, .60))\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 60 60\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(0,100),\n ] \n }\n }\n \n #individual_fraction, total_fraction\n self.assertEqual(RNA_position(tool, location_dict), (.4, .4))", "def prepare_region(path: Path, region: mundi.Region):\n\n # Age distribution \n df = region.age_distribution\n distrib = df.values.copy()[:18:2]\n distrib += df.values[1:18:2]\n distrib[-1] += df.values[18:].sum()\n \n # Estimate cases from deaths\n curve = covid19.epidemic_curve(region, path=CASES)\n deaths = cast(pd.Series,\n curve[\"deaths\"]\n .rolling(WINDOW_SIZE, center=True, win_type=\"triang\")\n .mean()\n .fillna(method=\"bfill\")\n .dropna()\n )\n params = covid19.params(region=region)\n cases = (deaths / params.IFR).astype(\"int\")\n epicurve = cases.diff().fillna(0).astype(\"int\").values\n attack = 100 * cases.iloc[-1] / region.population\n print(\"Attack rate: {:n}%\".format(attack))\n \n # Clean epicurve\n i, j = 0, len(epicurve) - 1\n while epicurve[i] == 0:\n i += 1\n \n while epicurve[j] == 0:\n j -= 1\n \n if (n := len(epicurve) - j -1):\n m = n + WINDOW_SIZE // 2\n epicurve = list(epicurve)[:j - WINDOW_SIZE // 2]\n print(f'WARNING: {region.id} tail with {n} null items. trucanting epicurve to a {m} delay')\n n += WINDOW_SIZE // 2\n epicurve = epicurve[i:j]\n \n # Create config\n conf = TOML_TEMPLATE.format(\n num_iter=60,\n pop_counts=list(distrib),\n epicurve_data=list(epicurve),\n smoothness=0.75,\n delay=n,\n attack=attack,\n ) \n \n with open(path / 'conf.toml', 'w') as fd:\n fd.write(conf)", "def get_inter_cds_regions(annotations):\n # Determine locations of inter-CDS regions for each chromosome\n inter_cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n inter_cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate through genes and store the ranges between them;\n # for TriTrypDB files, the gene boundaries are generally the same\n # as the CDS boundaries.\n for gene in genes:\n # Determine location for the region up to start of the gene\n end = int(gene.location.start)\n\n # Skip over snoRNAs, etc. that are nested inside of other genes\n # For example: TcCLB TcChr22-2 179,000:180,000\n if end <= start:\n continue\n\n # Add CDS to relevant list based on strand\n if strand is None:\n # Left-most gene\n inter_cds_regions[chr_id][gene.location.strand].append((start, end))\n elif strand != gene.location.strand:\n # Add ORFs in both directions at transcription switch sites (TSSs)\n inter_cds_regions[chr_id][+1].append((start, end))\n inter_cds_regions[chr_id][-1].append((start, end))\n else:\n # Within PTU; look for ORFs on same strand\n inter_cds_regions[chr_id][strand].append((start, end))\n\n # update start counter and strand\n start = int(gene.location.end)\n strand = gene.location.strand\n\n # add region after last gene\n inter_cds_regions[chr_id][strand].append((start, ch_end))\n\n return inter_cds_regions", "def add_region_of_interest(self, event: str):\n\n mesh = self.comm.lasif.find_event_mesh(event)\n m = UnstructuredMesh.from_h5(mesh)\n mesh_layers = np.sort(np.unique(m.elemental_fields[\"layer\"]))[::-1].astype(int)\n layers = m.elemental_fields[\"layer\"]\n o_core_idx = layers[np.where(m.elemental_fields[\"fluid\"] == 1)[0][0]]\n o_core_idx = np.where(mesh_layers == o_core_idx)[0][0]\n correct_layers = mesh_layers[o_core_idx:]\n roi = np.zeros_like(layers)\n for layer in correct_layers:\n roi = np.logical_or(roi, layers == layer)\n\n m.attach_field(\"ROI\", roi)\n m.write_h5(mesh)", "def test_upcase_region(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n SOME 90% OF ALL PRESIDENTIALLY DECLARED DISASTERS ARE WEATHER RELATED, LEADING TO AROUND 500 DEATHS PER YEAR AND NEARLY $14 BILLION IN DAMAGE. STORMREADY, A PROGRAM STARTED IN 1999 IN TULSA, OK, HELPS ARM AMERICA'S COMMUNITIES WITH THE COMMUNICATION AND SAFETY SKILLS NEEDED TO SAVE LIVES AND PROPERTY– BEFORE AND DURING THE EVENT. STORMREADY HELPS COMMUNITY LEADERS AND EMERGENCY MANAGERS STRENGTHEN LOCAL SAFETY PROGRAMS.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.0\", \"4.0\"),\n after_sel=(\"3.0\", \"4.0\"),\n command_name=\"upcase-region\",\n )", "def check_region(self, region):\n region_slugs = [x.slug for x in self.manager.get_all_regions()]\n return region in region_slugs", "def check_region(self, region_id, action=\"check\"):\n self.init_structures()\n con = SimConnection()\n con.connect(self.gridinfo._url)\n scenedata = con._con.ogrescene_list({\"RegionID\":region_id})\n total = 0\n total_yes = 0\n for groupid, scenegroup in scenedata['res'].items():\n if getattr(self, action+\"_group\")(groupid, scenegroup):\n total_yes += 1\n total += 1\n report = []\n report.append(\"--. \\n\")\n report.append(\"total objects %s. \\n\"%(total,))\n for key in self._found.keys():\n report.append(\"total \"+key+\" %s. \\n\"%(self._total_server[key],))\n report.append(key+\" in blend %s\\n\"%(self._found[key],))\n return report", "def changeSubdivRegion(*args, action: int=0, level: int=0, **kwargs)->bool:\n pass", "def test_two_phase_region_new_phases_does_not_belong():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K\n assert tpr.compsets_belong_in_region(compsets_300_diff_phases) is False", "def test_get_genome_5(self):\n self.tkt1.data_add = set([\"annotation_status\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_status, \"final\")", "def test_number_of_regions(i07_nexus: I07Nexus, correct_num):\n assert i07_nexus._number_of_regions == correct_num", "def test_region_model_translation(self):\n region = self.create_region()\n region.slug_en = 'region-of-mockup'\n region.save()", "def test_two_phase_region_expands_as_compsets_are_added():\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_305 = CompsetPair([\n BinaryCompset('P1', 305, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 305, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_312 = CompsetPair([\n BinaryCompset('P1', 312, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 312, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K\n # compsets don't belong because they are outside the temperature tolerance (10 K)\n assert tpr.compsets_belong_in_region(compsets_312) is False\n assert tpr.compsets_belong_in_region(compsets_305)\n tpr.add_compsets(compsets_305)\n # 312 K compsets could be added now that the 305 K is within 10 K.\n assert tpr.compsets_belong_in_region(compsets_312)", "def get_valid_regions(self):\n pass", "def insert(self, g, g_xmin, g_xmax, g_ymin, g_ymax):\r\n step = 1\r\n g_xstart = rand.randrange(g_xmin, g_xmax - self.xspan, step)\r\n g_ystart = rand.randrange(g_ymin, g_ymax - self.yspan, step)\r\n for s_x in range(self.xspan):\r\n for s_y in range(self.yspan):\r\n g_x = g_xstart + s_x\r\n g_y = g_ystart + s_y\r\n s_state = self.cells[s_x][s_y]\r\n g.setcell(g_x, g_y, s_state)", "def which_region(self, g):\n return NotImplementedError", "def test_evi(self):\n scene = Landsat8Scene(self.filenames)\n geoimg = scene.evi()\n self.assertEquals(geoimg.nbands(), 1)\n self.assertTrue('evi' in geoimg.bandnames())", "def test1(self):\n start = 10\n end = 50\n e1_start = 10\n e1_end = 20\n e2_start = 30\n e2_end = 40\n e3_start = 45\n e3_end = 50\n\n t = Transcript(\"t1\", \"chr1\", start, end, \"+\", \"gene1\", None)\n e1 = Edge(\"e1\", \"chr1\", e1_start, e1_end, \"+\", \"gene1\", \"t1\", None)\n e2 = Edge(\"e2\", \"chr1\", e2_start, e2_end, \"+\", \"gene1\", \"t1\", None) \n e3 = Edge(\"e3\", \"chr1\", e3_start, e3_end, \"+\", \"gene1\", \"t1\", None)\n\n t.add_exon(e1)\n t.add_exon(e2)\n t.add_exon(e3)\n \n assert t.get_length() == 28", "def resize_invalid_genes_test(self):\n pass", "def overlaps(self, region):\n region = as_region(region)\n\n if region.chromosome != self.chromosome:\n return False\n\n if self.end is None or region.start is None or region.start <= self.end:\n if self.start is None or region.end is None or region.end >= self.start:\n return True\n return False", "def all_regions_present(self, gene_list, skipped_gene_matches, query_name, second_query_name=None):\n for region in utils.regions:\n if 'IGH' + region.upper() not in ':'.join(gene_list):\n print ' no %s genes in %s for %s %s' % (region, ':'.join(gene_list), query_name, '' if (second_query_name == None) else second_query_name)\n print ' skipped %s' % (':'.join(skipped_gene_matches))\n print 'giving up on query'\n return False\n\n return True", "def in_region(point, plotregion):\n\n if ((point[0] >= plotregion[0]) and\n (point[0] < plotregion[1]) and\n (point[1] >= plotregion[2]) and\n (point[1] < plotregion[3])):\n return True\n else:\n return False", "def which_region(self, g):\n raise NotImplementedError", "def test_get_coverage_of_region_split_read(self):\n\n # turn of read extension\n self.c.extendPairedEnds = False\n self.c.bamFilesList = [self.bamFile1]\n self.c.binLength = 10\n self.c.stepSize = 10\n resp, _ = self.c.count_reads_in_region('chr_cigar', 0, 100)\n nt.assert_array_equal(resp, np.array([[0.],\n [1.],\n [1.],\n [0.],\n [1.],\n [0.],\n [0.],\n [0.],\n [0.],\n [0.]]))", "def test_count_genomic_region_sizes(self):\n \n regions = OrderedDict()\n regions[\"exons\"] = \"Exon\"\n regions[\"utr3\"] = \"3' UTR\"\n regions[\"utr5\"] = \"5' UTR\"\n regions[\"proxintron500\"] = \"Proximal\\nIntron\"\n regions[\"distintron500\"] = \"Distal\\nIntron\"\n results = count_genomic_region_sizes(os.path.join(clipper.test_dir(), \"regions\"), regions, \"mm9\")", "def test_region_in_sync(self):\n region_updated_time = {\n 'regionName': 'RegionOne',\n 'regionTimestamp': '12345'\n }\n self.rpc.get_region_updated_time.return_value = region_updated_time\n self.sync_service._region_updated_time = None\n assert not self.sync_service._region_in_sync()\n self.sync_service._region_updated_time = region_updated_time\n assert self.sync_service._region_in_sync()", "def test_bkg_regions_len(i07_nexus: I07Nexus, regions):\n assert len(i07_nexus.background_regions) == len(regions)", "def test_aggregation_operation(self, space, time, expected):\n # Two regions, three intervals\n data = np.array([[333.333, 333.333, 333.333], [333.333, 333.333, 333.333]])\n intermediate = Adaptor.convert_with_coefficients(data, space, 0)\n actual = Adaptor.convert_with_coefficients(intermediate, time, 1)\n np.testing.assert_allclose(actual, expected, rtol=1e-2)", "def test_sequence(self, output, input_):\n input_ = \"\\n\".join(input_)\n g = Genes(input_)\n s = Sequence(genes=g, ages=g.size)\n s.run()\n self.assertEquals(s.population.get_survivor(Sequence.IMPOSSIBLE),\n output)", "def validate_region(region):\n # only allow supported domains\n if region['domain'] not in DOMAINS:\n raise ConfigError('domain')\n\n # search term state is inserted as province if province does not already\n # exist\n if 'state' in region:\n if (region['state'] is not None) and (region['province'] is None):\n region['province'] = region['state']\n\n # north american jobs should have a province/state provided\n if region['domain'] in ['com', 'ca'] and region['province'] is None:\n raise ConfigError('province')", "def region(self, region_string_or_chrom, start=None, end=None):\n region_match = self._match_region_string(region_string_or_chrom)\n if region_match:\n chrom, start, end = region_match.group(1), region_match.group(2), region_match.group(3)\n else:\n chrom = region_string_or_chrom\n try:\n start = int(start)\n end = int(end)\n except Exception as e:\n raise ValueError(\n (\"Since %(region_string_or_chrom)s is not a valid region string, it should be a chromosome, and \"\n \"start, stop args should be valid genomic positions. %(e)s\") % locals())\n\n self.command(\"region %(chrom)s %(start)s %(end)s\" % locals())", "def sync_region(self, region_id):\n self.init_structures()\n con = SimConnection()\n con.connect(self.gridinfo._url)\n scenedata = con._con.ogrescene_list({\"RegionID\":region_id})[\"res\"]\n objects = editor.getSelected()\n if not objects:\n objects = bpy.data.objects\n for obj in objects:\n obj_uuid = str(self.get_uuid(obj))\n if obj_uuid:\n if obj_uuid in scenedata:\n self.import_group(obj_uuid, scenedata[obj_uuid], 10)", "def tnuc_region_in_intron(np, beg, end):\n\n if beg.tpos == 0 or end.tpos == 0: return False\n if beg.pos == end.pos and beg.tpos*end.tpos > 0:\n return True\n if beg.pos+1 == end.pos and beg.tpos>0 and end.tpos<0:\n return True\n if end.pos+1 == beg.pos and beg.tpos<0 and end.tpos>0:\n return True\n\n return False", "def test_init_region_table(self):\n # With an artifical region table\n regionTable = path.join(self.rootDir, \"data/lambda.rgn.h5\")\n p = PBAlignFiles(self.inputFileName,\n self.referenceFile,\n self.outputFileName,\n regionTable)\n self.assertTrue(filecmp.cmp(p.regionTable, regionTable))", "def IntersectRegion(*args, **kwargs):\n return _gdi_.Region_IntersectRegion(*args, **kwargs)", "def push_regions(self, regions: [MouseRegion]):\n raise NotImplementedError", "def testSanity(self):\n\t\tga = GA.GA(2,3)\n\t\tgenomes = ga.seedGenomes()\n\t\tself.assertEqual(len(genomes), 2, \n\t\t \"Wrong number of genomes\")\n\t\tself.assertEqual(len(genomes[0]), 3, \n\t\t \"Wrong size in genomes\")\n\t\t#print genomes\n\t\t#live and learn\n\t\tfitnesses = [23, 45]\n\t\tga.fitnessUpdate(fitnesses)\n\t\tgenomes2 = ga.createNextGeneration()\n\t\tself.assertEqual(len(genomes2), 2, \n \"Wrong number of genomes\")\n\t\tself.assertEqual(len(genomes2[0]), 3, \n \"Wrong size in genomes\")", "def test_RNA_position_placement_split(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 + 125 125\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"+\", \"regions\" : [(0, 50),\n (100, 150),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75) )\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 25 25\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(100, 150),\n (0, 50),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.50, .75))", "def region_growing(im: np.ndarray, seed_points: list, T: int) -> np.ndarray:\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n # You can also define other helper functions\n segmented = np.zeros_like(im).astype(bool)\n\n (H, W) = im.shape\n\n for seed_row, seed_col in seed_points:\n region = []\n region.append([seed_row, seed_col])\n for row, col in region:\n for rows in range((row-1),(row+2)): # Check neighbouring pixels\n for cols in range((col-1),(col+2)):\n if rows < H and rows >= 0 and cols < W and cols >= 0: # Is pixel inside image?\n if (np.abs(im[seed_row, seed_col] - im[rows, cols]) <= T) and not segmented[row, col]:\n region.append([rows, cols])\n segmented[row, col] = True\n return segmented\n ### END YOUR CODE HERE ### ", "def IsEqual(*args, **kwargs):\n return _gdi_.Region_IsEqual(*args, **kwargs)", "def split():\n flag = 0\n for chromosome in region:\n for inf in region[chromosome]:\n if flag == 0:\n if chromosome not in test_set:\n test_set[chromosome] = [inf]\n else:\n test_set[chromosome].append(inf)\n else:\n if chromosome not in train_set:\n train_set[chromosome] = [inf]\n else:\n train_set[chromosome].append(inf)\n\n flag += 1\n flag %= 10", "def test_index_geq_3(self):\n self.insert()\n data = self.tbl[6:]\n assert self.check(self.idata[2:], data)", "def test_get_genome_4(self):\n self.tkt1.data_add = set([\"subcluster\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.subcluster, \"A2\")", "def test_is_gene_continuously_amplified_wrong_input(self):\n self.assertEqual(\"Wrong input data\", is_gene_continuously_amplified(13))", "def test_get_genome_2(self):\n self.tkt1.data_add = set([\"host_genus\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"Mycobacterium\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"\")", "def add_region(self, region):\n self._country_code_whitelist.append(\n phonenumbers.country_code_for_valid_region(region))" ]
[ "0.6306166", "0.5873809", "0.5715272", "0.5644633", "0.5633991", "0.5508607", "0.55017656", "0.5482008", "0.53994864", "0.53851366", "0.53696203", "0.53695005", "0.53581506", "0.5352486", "0.5298148", "0.5286712", "0.52787757", "0.5276704", "0.52690274", "0.5268633", "0.52497023", "0.5241601", "0.5193902", "0.5164972", "0.5160878", "0.51478887", "0.51445574", "0.51347893", "0.51325184", "0.51126933", "0.5110579", "0.51081425", "0.5098751", "0.50720227", "0.5065268", "0.50599605", "0.5059831", "0.5026172", "0.49905485", "0.4968419", "0.49488944", "0.49479693", "0.49471733", "0.49453112", "0.49351686", "0.49301338", "0.49256915", "0.4922918", "0.49215552", "0.4897578", "0.48908013", "0.48849407", "0.4884682", "0.48705393", "0.4841117", "0.48172587", "0.48099428", "0.48078197", "0.48045135", "0.48036465", "0.47994533", "0.47993782", "0.47959852", "0.47926766", "0.47895363", "0.4771587", "0.47656268", "0.4761484", "0.47494096", "0.47337744", "0.47337466", "0.4733232", "0.47325477", "0.47295514", "0.47260332", "0.47260228", "0.47245404", "0.47203466", "0.4717211", "0.47109905", "0.4708067", "0.47062498", "0.46938902", "0.46835503", "0.46819595", "0.4677972", "0.4677961", "0.46752936", "0.46725836", "0.4664941", "0.46629313", "0.46618593", "0.46563065", "0.46534437", "0.4652581", "0.46513236", "0.46508366", "0.46451646", "0.46431276", "0.46418864" ]
0.6274175
1
Counts occurrences of pattern within given genomic region.
def count_region( reference_seq, # type: pyfaidx.Fasta region, # type: Tuple[str, int, int] pattern=None # type: Optional[str] ): # type: (...) -> int chrom, start, end = region seq = reference_seq[chrom][int(start):int(end)] return _count_sequence(seq, regex=_build_regex(pattern))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def pattern_count(DNA, pattern, start=0, end=0, mutation_thresh=0):\n if start < 0 or start >= len(DNA):\n raise ValueError(\"The starting position should be between 0 and the size \" + \\\n \"of the DNA\")\n\n k = len(pattern)\n count = 0\n end = len(DNA) - k + 1 if end == 0 else end\n\n for i in range(0, end):\n if hamming_distance(DNA[i:i+k], pattern) <= mutation_thresh:\n count += 1\n\n return count", "def count_occurrences(text, pattern, d=0):\n return len(find_occurrences(text, pattern, d))", "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def count_patterns(pattern, file):\n count = 0\n with open(file, 'r') as f:\n for line in f:\n if re.search(pattern, line):\n count += 1\n print(\"The pattern '{}' appears {} times.\".format(pattern, count))", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def count_hits_region(location, region):\n l=len(region)\n c=0\n for i in range(0,l-1):\n if hits_border(location,region[i],region[i+1])==True:\n c=c+1\n return c", "def PatternCount(text, pattern):\n\n count = 0\n for i in range(0, len(text)-len(pattern)+1):\n if text[i:i+len(pattern)] == pattern:\n count += 1\n return count", "def get_pattern_count(sequence, pattern):\n return len(re.findall(r'(?=' + pattern + ')', sequence))", "def pattern_count(text, pattern):\n return len([i\n for i in range(0, len(text) - len(pattern) + 1)\n if text[i:i + len(pattern)] == pattern])", "def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count", "def CountOccurrences(pattern, bwt, starts, occ_counts_before, suffix_array):\r\n # 0$ 1A 2T 3G 4C\r\n letters = {'$':0, 'A':1, 'T':2, 'G':3, 'C':4}\r\n top=0\r\n bottom = len(bwt)-1\r\n matches_index = []\r\n while True:\r\n if len(pattern)!=0:\r\n char = pattern[-1]\r\n j = letters[char]\r\n pattern = pattern[:-1]\r\n found=False\r\n for i in range(top,bottom+1):\r\n if bwt[i] == char:\r\n top = occ_counts_before[i][j] + starts[char] -1\r\n bottom = occ_counts_before[bottom][j] + starts[char] -1\r\n found = True\r\n break\r\n\r\n if found==False:\r\n return matches_index\r\n # when pattern is finished proccessing\r\n else:\r\n for i in range(top, bottom+1):\r\n matches_index.append(suffix_array[i])\r\n return matches_index\r\n\r\n return matches_index", "def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])", "def utr3_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.three_prime_utr_sequence.upper()))", "def annotate_pattern_occurrences(\n record, pattern, feature_type=\"misc_feature\", prefix=\"!\"\n):\n new_record = deepcopy(record)\n label = prefix + str(pattern)\n for location in pattern.find_matches(str(record.seq)):\n annotate_record(\n new_record,\n location=(location.start, location.end),\n feature_type=feature_type,\n label=label,\n )\n return new_record", "def count_locs(file_type, comment_pattern):\n find = \"find . -name '*.{0}' -print0\".format(file_type)\n sed_pattern = \"'/^\\s*{0}/d;/^\\s*$/d'\".format(comment_pattern)\n\n cmd = \"{0} | xargs -0 sed {1} | wc -l\".format(find, sed_pattern)\n\n return check_output(cmd, shell = True).decode('utf-8').replace('\\n', '')", "def utr5_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.five_prime_utr_sequence.upper()))", "def count_total(\n reference_seq, # type: pyfaidx.Sequence\n pattern=None, # type: str\n intervals=None # type: Iterable[Tuple[str, int, int]]\n): # type: (...) -> int\n\n regex = _build_regex(pattern)\n\n if intervals is None:\n # Simply count for the entire sequence.\n count = sum(_count_sequence(reference_seq[seq], regex=regex)\n for seq in reference_seq.keys()) # yapf: disable\n else:\n # Flatten intervals, and then only count for sequences\n # within the flattened intervals.\n merged_intervals = list(merge_genomic_intervals(intervals))\n\n seqs = [\n reference_seq[chrom][start:end]\n for chrom, start, end in merged_intervals\n ]\n\n count = sum(_count_sequence(seq, regex=regex) for seq in seqs)\n\n return count", "def get_count(self):\n\n return len(self._pattern)", "def get_multi_pattern_count(word, patterns):\n\n distinct_positions = set()\n for pattern in patterns:\n result = Util.find_all_occurrences_knuth_morris_pratt(pattern,\n word)\n distinct_positions |= set(result)\n\n return distinct_positions", "def _count_sequence(sequence, regex=None):\n # type: (pyfaidx.Sequence, Pattern[str]) -> int\n\n if regex is None:\n count = len(sequence)\n else:\n count = sum((1 for _ in regex.finditer(str(sequence))))\n\n return count", "def find_occurrences(text, pattern, d=0):\n idx_of_last_pattern = len(text) - len(pattern)\n return [i for i in range(idx_of_last_pattern + 1) if hamming(text[i:i + len(pattern)], pattern) <= d]", "def count(grid):\n star='@'\n c = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j]==star: c += 1\n return c", "def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count", "def highlight_pattern(self, pad, pattern,\n tag, start=\"1.0\", end=\"end\", regexp=False):\n start = pad.index(start)\n end = pad.index(end)\n pad.mark_set(\"matchStart\", start)\n pad.mark_set(\"matchEnd\", start)\n pad.mark_set(\"searchLimit\", end)\n\n count = GUI.IntVar()\n while True:\n index = pad.search(pattern, \"matchEnd\", \"searchLimit\", count=count,\n regexp=regexp)\n if index == \"\":\n break\n pad.mark_set(\"matchStart\", index)\n pad.mark_set(\"matchEnd\", \"%s+%sc\" % (index, count.get()))\n pad.tag_add(tag, \"matchStart\", \"matchEnd\")", "def count(self, contig=None, start=None, stop=None, region=None,\n until_eof=False, tid=None, read_callback='nofilter',\n reference=None, end=None):\n\n # pass the signature to fetch\n signature = locals()\n signature.pop('read_callback')\n signature.pop('self')\n roi_reads = self.fetch(**signature)\n # make `nofilter` the default filter unless told otherwise\n # read_callback = kwargs.get('read_callback', 'nofilter')\n\n # go through all the reads over a given region and count them\n count = 0\n for read in roi_reads:\n if filter_read(read, read_callback):\n count += 1\n return count", "def count(sub_stng, stng):\n instance_count = 0\n start_index = 0\n while stng.find(sub_stng, start_index) != -1:\n instance_count += 1\n start_index = stng.find(sub_stng, start_index) + 1\n\n return instance_count", "def test_count_genomic_region_sizes(self):\n \n regions = OrderedDict()\n regions[\"exons\"] = \"Exon\"\n regions[\"utr3\"] = \"3' UTR\"\n regions[\"utr5\"] = \"5' UTR\"\n regions[\"proxintron500\"] = \"Proximal\\nIntron\"\n regions[\"distintron500\"] = \"Distal\\nIntron\"\n results = count_genomic_region_sizes(os.path.join(clipper.test_dir(), \"regions\"), regions, \"mm9\")", "def occurrences_re(pattern, string):\n exp = re.compile(pattern)\n o = []\n for i in exp.finditer(string):\n o.append([i.start(), i.end()])\n return o", "def count(self, word):\n pass", "def support_count(pattern, D):\n support_count = 0\n tmp_p = set(pattern)\n for transaction in D:\n if tmp_p <= set(transaction):\n support_count += 1\n return support_count", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def count_regexp():\r\n # Here's an example regular expression that roughly matches a valid email address.\r\n # The ones you write below should be shorter than this\r\n email = re.compile(\"[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\.[a-zA-Z]{2,5}\")\r\n\r\n ###### Write below #########\r\n subheading = re.compile(\"\\=\\=+.*\\=\\=+\")\r\n link_to_subheading = re.compile(\"\\[\\[[\\w\\'*\\-*\\:*\\(*\\)*\\_*\\s*]+[#][\\s*\\w\\\\'*\\-*\\:*\\(*\\)*\\_*s*]+\\|*\")\r\n doi_citation = re.compile(\"\\{\\{[c][ite](?!{{).*[dD][oO][iI]\\s*[:|,=\\/]*\\s*[0-9]+\\.[0-9]+.*\\}\\}\")\r\n ###### End of your work #########\r\n\r\n patterns = {\r\n \"emails\": email,\r\n \"subheadings\": subheading,\r\n \"links to subheadings\": link_to_subheading,\r\n \"citations with DOI numbers\": doi_citation,\r\n }\r\n\r\n with open(RAW_DUMP_XML, encoding=\"utf-8\") as f:\r\n dump_text = f.read()\r\n for name, pattern in patterns.items():\r\n if pattern is None:\r\n continue\r\n matches = pattern.findall(dump_text)\r\n count = len(matches)\r\n\r\n example_matches = [matches[i * (count // 5)] for i in range(5)]\r\n\r\n print(\"Found {} occurences of {}\".format(count, name))\r\n print(\"Here are examples:\")\r\n print(\"\\n\".join(example_matches))\r\n print(\"\\n\")", "def regioncounts(ribo, out, experiments, \n upperlength, lowerlength, title, horizontal, dump):\n\n return plot_region_counts_wrapper(\n ribo_file = ribo, \n experiment_list = experiments, \n range_lower = lowerlength, \n range_upper = upperlength, \n title = title,\n output_file = out, \n dump_to_file = dump,\n horizontal = horizontal)", "def count():", "def get_regions_counts(fname, seglen, mincounts):\n counts = defaultdict(int)\n seglen=int(seglen)\n with open(fname) as fin:\n infile = csv.DictReader(fin, delimiter='\\t')\n for line in infile:\n if int(line['interactions']) < mincounts:\n continue\n t_reg = (\n line['RNA1 chromosome'],int(int(line['Start of RNA1 first read'])/seglen)*seglen,\n line['RNA1 strand'], \n line['RNA2 chromosome'],int(int(line['Start of RNA2 last read'])/seglen)*seglen,\n line['RNA2 strand'])\n\n counts[t_reg] = int(line['interactions'])\n return counts", "def count_island(row, col, island):\n count = 0\n for i in range(row):\n for j in range(col):\n count = count + floodfill(i, j, row, col, island)\n return count", "def find_pattern(pattern, genome):\n\n tens_table = [pow(10, m) for m in xrange(len(pattern))]\n hash_pattern = get_hash(pattern, tens_table)\n index = []\n for current_index in xrange(len(genome) - len(pattern) + 1):\n\t\tif current_index == 0:\n\t\t\tcurrent_hash = get_hash(genome[0:len(pattern)], tens_table)\n\t\telse:\n\t\t\tcurrent_hash = ((current_hash - (nucleotide_value_map[genome[current_index-1]] * tens_table[len(pattern)-1])) * 10 + nucleotide_value_map[genome[current_index-1+len(pattern)]])\n if current_hash == hash_pattern:\n index.append(current_index)\n return index", "def count_annotation_values(graph, annotation):\n return Counter(iter_annotation_values(graph, annotation))", "def trace_region_count(self):\n cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS\n data = ctypes.c_uint32(0)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace region count.')\n return data.value", "def test_count_reads_in_region_total(self):\n self.c.skipZeros = False\n self.c.stepSize = 200\n self.c.binLength = 200\n resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200)\n nt.assert_equal(resp, np.array([[2, 4.]]))", "def count(seq):\n\treturn sum(1 for x in seq)", "def count_annotations(graph):\n return Counter(_annotation_iter_helper(graph))", "def counts(self, regex = \"\\w+\"): \n tokenizer = RegexpTokenizer(r'{}'.format(regex))\n count = []\n for i in tqdm(self.text):\n count.append(len(tokenizer.tokenize(i)))\n return count", "def CountAppStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tif debug:\n\t\tprint len(text)-len(pattern)+1\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\treturn count", "def pattern_overlap(self, pattern):\n return np.sum((2.*pattern.flatten() - 1.)*self.spins)/self.nspins", "def cellranger_counts(fname, genome=\"matrix\"):\n with tables.open_file(fname, \"r\") as f:\n try:\n group = f.get_node(f.root, genome)\n except tables.NoSuchNodeError:\n print(\"That genome does not exist in this file.\")\n return None\n gene_ids = getattr(group, \"features/id\").read()\n barcodes = getattr(group, \"barcodes\").read()\n data = getattr(group, \"data\").read()\n indices = getattr(group, \"indices\").read()\n indptr = getattr(group, \"indptr\").read()\n shape = getattr(group, \"shape\").read()\n\n matrix = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)\n gene_ids = np.array([x.decode() for x in gene_ids])\n barcodes = np.array([x.decode().replace(\"-1\", \"\") for x in barcodes])\n\n return CellRangerCounts(matrix, gene_ids, barcodes)", "def overlap_count(haystack, needle):\n count = 0\n index = 0\n while True:\n try:\n i = haystack.index(needle, index)\n except ValueError:\n break\n count += 1\n index = i+1\n return count", "def countAtom (dico_count, PDB_parsed, debug = 0):\n count = 0\n \n for atom in PDB_parsed : \n residue = tool.transformAA(atom[\"resName\"])\n if debug : print residue\n \n if residue in dico_count : \n atom_Name = atom[\"name\"]\n if atom_Name in dico_count[residue] : \n count = count + 1\n return count", "def count_variants(filename, content=None):\n open_fn = gzip.open if is_gz_file(filename) else open\n count = 0\n with open_fn(filename, \"rt\") as ifile:\n for line in ifile:\n if not line.startswith(\"#\"):\n if content:\n if content in line:\n count += 1\n else:\n count += 1\n return count", "def count_matrix(pb_seq):\n assert_same_size(pb_seq)\n pb_count = numpy.zeros((len(pb_seq[0]), len(NAMES)))\n for seq in pb_seq:\n for idx, block in enumerate(seq):\n if block in NAMES:\n pb_count[idx, NAMES.index(block)] += 1.0\n elif block not in [\"Z\", \"z\"]:\n raise InvalidBlockError(block=block)\n return pb_count", "def count(text):\n return len(text)", "def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def count_subs(x,y):\n\t# Encases diagonals in square grid of size 'square'\n\tsquare = x + y - 2\n\tsubs = 0\n\t# For every point counts the number of rectagles with (a,b) as upper left corner\n\tfor a in range(square):\n\t\tfor b in range(square):\n\t\t\tif valid(a,b,x,y):\n\t\t\t\tthis_subs = subs_at_point(a,b,x,y)\n\t\t\t\tprint \"%3d \" %(this_subs),\n\t\t\tprint \"\"\n\treturn subs", "def Counting(seq):\n\n #Scan the sequence, looking for motifs\n\n counting = {k: 0 for k in MOT} # Initialize the counting dictionary.\n # Scan all the motifs and find them in the sequence\n for motif in MOT:\n if len(seq) > len(motif): # Check if the sequence is longer than the motif itself.\n for i in range(len(seq)-len(motif)+1):\n if i == 0: # In case the motif is in the beginning of the sequence\n # print(\"start: \" + seq[i:i+len(motif)] + \" next nuc: \" + seq[i+len(motif)])\n if seq[i:i+len(motif)] == motif and seq[i+len(motif)] != motif[0]: # Check if the next nucleotide is in not part of the motif.\n counting[motif] += 1\n elif i == len(seq)-len(motif): # In case the motif is in the end of the sequence\n \n if seq[i:i+len(motif)] == motif and seq[i-1] != motif[0]: # Check if the previuos nucleotide is in not part of the motif.\n counting[motif] += 1\n elif len(seq) > len(motif)+1: # In case the motif is in the middle of the sequence.\n # Check if the motif is not part of another motif (e.g. TT is in TTT).\n\n if seq[i:i+len(motif)] == motif and seq[i+len(motif)] != motif[0] and seq[i-1] != motif[0]:\n counting[motif] += 1\n for nuc_nr in NUC_NR:\n counting[nuc_nr+\"_NR\"] = seq.count(nuc_nr)\n\n return counting", "def indapproxpattern(pattern, string, nummismatch):\n\n indarr = []\n# substringarr = []\n numchars = len(pattern)\n\n for i in xrange(0, len(string) - numchars + 1):\n \n substring = patterncount.subtext(string, i, numchars)\n \n if hammingdist(pattern, substring) <= nummismatch:\n \n indarr.append(i)\n# substringarr.append(substring)\n \n return indarr", "def num_regions(image_data):\n if len(image_data.shape) > 2:\n image_data = skimage.color.rgb2gray(image_data)\n _, num_labels = ndimage.label(image_data)\n return num_labels", "def compute_statistics(self, region):\n x = 0.0\n y = 0.0\n n = 1\n for pixel in region:\n n = n + 1\n x = x + pixel[0]\n y = y + pixel[1]\n\n x = x / n\n y = y / n\n k = 1\n print(\"Region: \" + str(k) + \", Centroid: (\" + str(x) + \",\" + str(y) + \"), Area: \" + str(n))\n\n # Please print your region statistics to stdout\n # <region number>: <location or center>, <area>\n # print(stats)\n\n return n", "def test_count_gaps(self):\n self.assertEqual(self.RNA(\"\").count_gaps(), 0)\n self.assertEqual(self.RNA(\"ACUGUCAGUACGHSDKCUCDNNS\").count_gaps(), 0)\n self.assertEqual(self.RNA(\"GUACGUACAKDC-SDHDSK\").count_gaps(), 1)\n self.assertEqual(self.RNA(\"-DSHUHDS\").count_gaps(), 1)\n self.assertEqual(self.RNA(\"UACHASADS-\").count_gaps(), 1)\n self.assertEqual(self.RNA(\"---CGAUgCAU---ACGHc---ACGUCAGU---\").count_gaps(), 12)", "def test_count_region(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.4\", \"4.8\"),\n after_sel=(\"2.4\", \"4.8\"),\n command_name=\"count-region\",\n )", "def count(self, sub) -> int:\n pass", "def occurences(words):\n\n\t# Add your code here\n\treturn", "def get_pattern_count(left, coins):\r\n if len(coins) == 0:\r\n return 1\r\n # Get next coin\r\n coin = coins[0]\r\n # See how many could go into left\r\n most = left // coin\r\n # Loop through possible\r\n count = 0\r\n for i in range(0, most + 1):\r\n remaining = left - i * coin\r\n count += get_pattern_count(remaining, coins[1:])\r\n\r\n return count", "def routine():\n genes = g.genes\n gene_db = db['ncbi_gene_docs']\n for gene in genes:\n count = gene_db.count({\"gene_id\": gene})\n if count is not 1:\n logger.debug(\"FixMe: {0};\\tCount: {1}\".format(gene, count))", "def num_patterns(self):\n return len(self._pattern_reg)", "def heatmap_counter_by_file_and_code(self, owner, fid, cid):\n\n count = 0\n cur = self.app.conn.cursor()\n sql_t = \"select count(cid) from code_text where owner like ? and cid=? and fid=?\"\n cur.execute(sql_t, [owner, cid, fid])\n result_t = cur.fetchone()\n if result_t is not None:\n count += result_t[0]\n sql_i = \"select count(cid) from code_image where owner like ? and cid=? and id=?\"\n cur.execute(sql_i, [owner, cid, fid])\n result_i = cur.fetchone()\n if result_i is not None:\n count += result_i[0]\n sql_av = \"select count(cid) from code_av where owner like ? and cid=? and id=?\"\n cur.execute(sql_av, [owner, cid, fid])\n result_av = cur.fetchone()\n if result_av is not None:\n count += result_av[0]\n return count", "def main():\n row, col, island = make_matrix()\n print(count_island(row, col, island))", "def count(self, sub, start=0, end=None):\n return count(self, sub, start, end)", "def __len__(self: TokenMatcher) -> int:\n return len(self._patterns)", "def keyword_count(searches, doc):\n for search in searches:\n print \"\\\"{0}\\\": {1}\".format(search, len(re.findall(searches[search], doc)))", "def h(self, node):\n count_peg = -1\n for line in node.state.board:\n count_peg += line.count(c_peg())\n return count_peg", "def count(self, patterns=[], aggregators=None, queries=[]):\n import hxl.filters\n return hxl.filters.CountFilter(\n self, patterns=patterns, aggregators=aggregators, queries=queries\n )", "def find_all_occurrences_brute_force(pattern, text):\n\n result = []\n\n if len(text) < len(pattern):\n return result\n\n for i in range(0, len(text) - len(pattern) + 1):\n matched = True\n\n k = 0\n for j in range(i, i + len(pattern)):\n if pattern[k] != text[j]:\n matched = False\n break\n k += 1\n\n if matched:\n result.append(i)\n\n return result", "def count_nucleotides(strand: str) -> dict:\n return dict(Counter(strand))", "def num_cusps_of_regions(self):\n G = self._get_puncturefinder_graph()\n # return [sum(G.subgraph(vertices=region).edge_labels())\n # for region in G.connected_components()]\n return [sum(edge[2]['weight']\n for edge in subgraph.edges(data=True))\n for subgraph in nx.connected_component_subgraphs(G)]", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def getNumberTrajectoryFromRegion(self, spRegex=None, species=None, regRegex=None, region=None, replicate=None, \n frameStart=None, frameEnd=None, timeStart=None, timeEnd=None):\n spIdList = np.array(self._speciesIdParse(spRegex, species, startIndex=1), dtype=np.int32)\n regIdList = np.array(self._regionIdParse(regRegex, region), dtype=np.int32)\n replicate = self._replicateParse(replicate)\n if len(replicate) > 1:\n raise ValueError(\"only one replicate may be specified\")\n\n replicate = \"{:07d}\".format(replicate[0])\n ts = self.h5['Simulations'][replicate]['LatticeTimes'][frameStart:frameEnd]\n\n cacheKey = \"SiteParticleCount\", replicate\n spc = self._cachedResult(cacheKey)\n if spc is None:\n sCount, spc = self._postprocess_siteparticlecounts(replicate)\n self._cachedResult(cacheKey, spc)\n self._cachedResult((\"SiteCount\", replicate), sCount)\n\n # cartesian product over sites and particles\n rs = np.tile(regIdList, len(spIdList))\n ss = np.repeat(spIdList, len(regIdList))\n if len(rs) == 1:\n traj = spc[:,rs[0],ss[0]]\n else:\n traj = np.sum(spc[:,np.tile(regIdList, len(spIdList)), np.repeat(spIdList, len(regIdList))], axis=-1)\n\n return ts, traj", "def text_cond_count(self, condition):\n res = 0\n for intv in self:\n if condition(intv._text):\n res += 1\n return res", "def count_nucleotides(mat):\n\n final_counts = np.ones((4, mat.shape[1]))\n\n for i in range(len(mat[0, :])):\n cur_nucleotides = np.ones((4, 1))\n a_count = 0\n c_count = 0\n g_count = 0\n t_count = 0\n for j in range(len(mat[:, 0])):\n if mat[j, i] == 'A':\n a_count = a_count + 1\n elif mat[j, i] == 'C':\n c_count = c_count + 1\n elif mat[j, i] == 'G':\n g_count = g_count + 1\n elif mat[j, i] == 'T':\n t_count = t_count + 1\n cur_nucleotides = np.array([a_count, c_count, g_count, t_count])\n final_counts[:, i] = cur_nucleotides\n return final_counts", "def count_segments(markers) -> int:\n cnt = Counter()\n for row in markers:\n cnt.update(row)\n n_cnt = dict(takewhile(lambda x: x[1] >= 10, cnt.most_common()))\n del n_cnt[1]\n del n_cnt[-1]\n return len(n_cnt.keys())", "def recCountString():\r\n target = raw_input(\"Enter target string: \")\r\n key = raw_input(\"Enter key string: \")\r\n matches = subStringMatchExact(target,key)\r\n print \"match(es) =\",matches", "def count_rasm(text, system=None):\n\n #\"there are a intersection between subsets\"\n if system == None:\n alphabetMap = dict()\n\n indx = 0\n for char in alphabet:\n alphabetMap.update({char: indx})\n indx = indx + 1\n alphabetMap.update({\" \": 70})\n p=len(alphabet)#+1 #the last one for space char\n\n else:\n for subSys in system:\n if not isinstance(subSys, list):\n raise ValueError (\"system must be list of list not list\")\n if shapeHelper.check_repetation(system):\n raise ValueError(\"there are a repetation in your system\")\n\n p = len(alphabet) - len(list(set(chain(*system)))) + len(system)\n alphabetMap = shape(system)\n n=len(text)\n A=numpy.zeros((n, p), dtype=numpy.int)\n i=0\n j=0\n charCount =[]\n for verse in text:\n verse=shapeHelper.convert_text_to_numbers(verse, alphabetMap)\n for k in range(0,p,1) :\n charCount.insert(j, verse.count(k))\n j+=1\n A[i, :] =charCount\n i+=1\n charCount=[]\n j=0\n\n return A", "def count(self):\n return len(self.find())", "def test_ababab():\n assert part_01.count_for('ababab', 2) == 0\n assert part_01.count_for('ababab', 3) == 1", "def count_trees(matrix, dx, dy):\n\n # We begin in the upper left corner\n x = 0\n y = 0\n count = 0\n\n # We continue until y > [height of matrix]\n while(y < len(matrix)):\n if matrix[y][x] == '#':\n count += 1\n\n # X is special since it needs to be wrapped around\n x = (x + dx) % len(matrix[0])\n y += dy\n\n return count", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def count_matches(sam_input):\n logging.info(\"Counting aligned bases in %s ...\", sam_input.name)\n\n total_bases = 0\n with pysam.AlignmentFile(sam_input, \"r\") as sam:\n for read in sam:\n total_bases += aligned_bases(read.cigar)\n return total_bases", "def count_contents(\n target : str,\n contents : dict[str, dict[str, int]]\n ) -> int:\n\n @lru_cache() # Cache results to speed up recursion\n def rec_count(color : str) -> int:\n \"\"\"Recursively count the contents of a given color.\"\"\"\n return sum(\n (1 + rec_count(child)) * count\n for child, count in contents[color].items()\n )\n\n return rec_count(target)", "def countOccupied(data):\n\tcounter = 0\n\n\t# loop through rows and columns and\n\t# count the number of '#'s\n\tfor r in range(len(data)):\n\t\tfor c in range(len(data[r])):\n\t\t\tif data[r][c] == '#':\n\t\t\t\tcounter += 1\n\n\treturn counter", "def detect_pattern(array, pattern, ppos=None, dpos=0):\n # Inner parameters\n shape = array.shape\n hits = np.zeros(shape, dtype=np.bool)\n pattern = np.asarray(pattern)\n pshape = pattern.shape\n\n # Check the input parameters\n if pattern.ndim != 1:\n raise ValueError(\"Invalid pattern '{0}'.\".format(pattern))\n\n # Pattern instersection\n nb_of_hits = shape[0] - pshape[0] + 1\n hits = np.ones((nb_of_hits, shape[1]), dtype=np.bool)\n for cnt, pattern_value in enumerate(pattern):\n local_match = (array[cnt: cnt + nb_of_hits, :] == pattern_value)\n hits = np.logical_and(hits, local_match)\n\n return hits", "def findall(pattern, string, overlapping=True, sensitive=True, regexp=False):\n if regexp:\n return SE.occurrences_re(pattern, string)\n if overlapping:\n return SE.occurrences(pattern, string, sensitive)\n else:\n return SE.full_words(pattern, string, sensitive)", "def flagser_contain(adjacency_matrix):\n N=adjacency_matrix.shape[0]\n row,col=convertCOO(adjacency_matrix,ret_data=False)\n return compute_cell_count(N, np.transpose(np.array( (row,col))))", "def custom_count(string1, search_string):\n count = 0\n for index in range(0, len(string1)):\n phrase = string1[index:index + len(search_string)]\n count += (phrase == search_string)\n return count", "def num_complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components_number()\n return nx.number_connected_components(g)", "def check_string_count_in_page( self, pattern, min_count, max_count=None ):\n page = self.last_page()\n pattern_count = page.count( pattern )\n if max_count is None:\n max_count = min_count\n # The number of occurrences of pattern in the page should be between min_count\n # and max_count, so show error if pattern_count is less than min_count or greater\n # than max_count.\n if pattern_count < min_count or pattern_count > max_count:\n fname = self.write_temp_file( page )\n errmsg = \"%i occurrences of '%s' found (min. %i, max. %i).\\npage content written to '%s' \" % \\\n ( pattern_count, pattern, min_count, max_count, fname )\n raise AssertionError( errmsg )" ]
[ "0.690619", "0.6891801", "0.6734169", "0.661228", "0.64735407", "0.64611524", "0.645101", "0.6441295", "0.643269", "0.63974696", "0.6269934", "0.6190485", "0.61015546", "0.5953266", "0.5953266", "0.58486587", "0.57067573", "0.56916803", "0.56484526", "0.56377214", "0.56303567", "0.5600146", "0.55988044", "0.5591743", "0.55828637", "0.5572805", "0.55492663", "0.5517274", "0.550821", "0.54543614", "0.5447163", "0.54111356", "0.5394653", "0.5347886", "0.5347083", "0.5334403", "0.5306089", "0.5306064", "0.52921957", "0.5284626", "0.52735394", "0.52705073", "0.52648723", "0.5256008", "0.52497536", "0.5244282", "0.5234881", "0.52268875", "0.52106804", "0.5173182", "0.5171161", "0.51623034", "0.51588625", "0.5136272", "0.5103786", "0.5096193", "0.50788087", "0.5057316", "0.50533986", "0.5051851", "0.5043303", "0.50223863", "0.50162417", "0.5003303", "0.49986592", "0.49930257", "0.49915707", "0.497393", "0.49713996", "0.4966775", "0.49665108", "0.4964121", "0.49640474", "0.4960191", "0.49377054", "0.49306738", "0.49144852", "0.49054065", "0.49008217", "0.48981488", "0.48923683", "0.4880283", "0.4877018", "0.48733926", "0.48710024", "0.48700625", "0.48633993", "0.4858858", "0.48531407", "0.48486155", "0.48484865", "0.4847684", "0.48461512", "0.48450455", "0.48447156", "0.48403904", "0.4837857", "0.48364893", "0.48314166", "0.48310992" ]
0.7694619
0
Counts occurrences of pattern in sequence.
def _count_sequence(sequence, regex=None): # type: (pyfaidx.Sequence, Pattern[str]) -> int if regex is None: count = len(sequence) else: count = sum((1 for _ in regex.finditer(str(sequence)))) return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pattern_count(sequence, pattern):\n return len(re.findall(r'(?=' + pattern + ')', sequence))", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def count_occurrences(text, pattern, d=0):\n return len(find_occurrences(text, pattern, d))", "def count(seq):\n\treturn sum(1 for x in seq)", "def pattern_count(text, pattern):\n return len([i\n for i in range(0, len(text) - len(pattern) + 1)\n if text[i:i + len(pattern)] == pattern])", "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def PatternCount(text, pattern):\n\n count = 0\n for i in range(0, len(text)-len(pattern)+1):\n if text[i:i+len(pattern)] == pattern:\n count += 1\n return count", "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count", "def pattern_count(DNA, pattern, start=0, end=0, mutation_thresh=0):\n if start < 0 or start >= len(DNA):\n raise ValueError(\"The starting position should be between 0 and the size \" + \\\n \"of the DNA\")\n\n k = len(pattern)\n count = 0\n end = len(DNA) - k + 1 if end == 0 else end\n\n for i in range(0, end):\n if hamming_distance(DNA[i:i+k], pattern) <= mutation_thresh:\n count += 1\n\n return count", "def count_patterns(pattern, file):\n count = 0\n with open(file, 'r') as f:\n for line in f:\n if re.search(pattern, line):\n count += 1\n print(\"The pattern '{}' appears {} times.\".format(pattern, count))", "def get_count(self):\n\n return len(self._pattern)", "def CountOccurrences(pattern, bwt, starts, occ_counts_before, suffix_array):\r\n # 0$ 1A 2T 3G 4C\r\n letters = {'$':0, 'A':1, 'T':2, 'G':3, 'C':4}\r\n top=0\r\n bottom = len(bwt)-1\r\n matches_index = []\r\n while True:\r\n if len(pattern)!=0:\r\n char = pattern[-1]\r\n j = letters[char]\r\n pattern = pattern[:-1]\r\n found=False\r\n for i in range(top,bottom+1):\r\n if bwt[i] == char:\r\n top = occ_counts_before[i][j] + starts[char] -1\r\n bottom = occ_counts_before[bottom][j] + starts[char] -1\r\n found = True\r\n break\r\n\r\n if found==False:\r\n return matches_index\r\n # when pattern is finished proccessing\r\n else:\r\n for i in range(top, bottom+1):\r\n matches_index.append(suffix_array[i])\r\n return matches_index\r\n\r\n return matches_index", "def find_occurrences(text, pattern, d=0):\n idx_of_last_pattern = len(text) - len(pattern)\n return [i for i in range(idx_of_last_pattern + 1) if hamming(text[i:i + len(pattern)], pattern) <= d]", "def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found", "def counts(self, regex = \"\\w+\"): \n tokenizer = RegexpTokenizer(r'{}'.format(regex))\n count = []\n for i in tqdm(self.text):\n count.append(len(tokenizer.tokenize(i)))\n return count", "def support_count(pattern, D):\n support_count = 0\n tmp_p = set(pattern)\n for transaction in D:\n if tmp_p <= set(transaction):\n support_count += 1\n return support_count", "def count(seq):\n\n if not seq:\n return 0\n elif isinstance(seq[0], list):\n return count(seq[0]) + count(seq[1:])\n else:\n return 1 + count(seq[1:])", "def count_total(\n reference_seq, # type: pyfaidx.Sequence\n pattern=None, # type: str\n intervals=None # type: Iterable[Tuple[str, int, int]]\n): # type: (...) -> int\n\n regex = _build_regex(pattern)\n\n if intervals is None:\n # Simply count for the entire sequence.\n count = sum(_count_sequence(reference_seq[seq], regex=regex)\n for seq in reference_seq.keys()) # yapf: disable\n else:\n # Flatten intervals, and then only count for sequences\n # within the flattened intervals.\n merged_intervals = list(merge_genomic_intervals(intervals))\n\n seqs = [\n reference_seq[chrom][start:end]\n for chrom, start, end in merged_intervals\n ]\n\n count = sum(_count_sequence(seq, regex=regex) for seq in seqs)\n\n return count", "def count(seq, predicate):\n count = 0\n for item in seq:\n if predicate(item):\n count += 1\n return count", "def CountAppStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tif debug:\n\t\tprint len(text)-len(pattern)+1\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\treturn count", "def utr3_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.three_prime_utr_sequence.upper()))", "def occurrences_re(pattern, string):\n exp = re.compile(pattern)\n o = []\n for i in exp.finditer(string):\n o.append([i.start(), i.end()])\n return o", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def counts(sequence):\n # initialize the countainer\n count = defaultdict(int)\n # iterates through sequence elements\n for item in sequence:\n # if element not in counts add 0\n # else add 1\n count[item] = count.get(item, 0) + 1\n return dict(count)", "def get_terminals_count(self, sequence: str) -> int:\n\n res = 0\n\n for terminal in self._terminals:\n if terminal != '':\n res += sequence.count(terminal)\n\n return res", "def at_frequency(self):\n result = str(self.seq).count(\"A\") + str(self.seq).count(\"T\")\n return result", "def utr5_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.five_prime_utr_sequence.upper()))", "def get_multi_pattern_count(word, patterns):\n\n distinct_positions = set()\n for pattern in patterns:\n result = Util.find_all_occurrences_knuth_morris_pratt(pattern,\n word)\n distinct_positions |= set(result)\n\n return distinct_positions", "def count():", "def num_patterns(self):\n return len(self._pattern_reg)", "def count_runlength_per_character(sequence):\n character_counts = defaultdict(list)\n current_character = None\n\n for character in sequence:\n if character != current_character:\n character_counts[character].append(1)\n else:\n character_counts[character][-1] += 1\n\n current_character = character\n\n return character_counts", "def count(sub_stng, stng):\n instance_count = 0\n start_index = 0\n while stng.find(sub_stng, start_index) != -1:\n instance_count += 1\n start_index = stng.find(sub_stng, start_index) + 1\n\n return instance_count", "def count(self, word):\n pass", "def count_all(self):\n return Counter(self._sequence)", "def count_region(\n reference_seq, # type: pyfaidx.Fasta\n region, # type: Tuple[str, int, int]\n pattern=None # type: Optional[str]\n): # type: (...) -> int\n\n chrom, start, end = region\n seq = reference_seq[chrom][int(start):int(end)]\n\n return _count_sequence(seq, regex=_build_regex(pattern))", "def annotate_pattern_occurrences(\n record, pattern, feature_type=\"misc_feature\", prefix=\"!\"\n):\n new_record = deepcopy(record)\n label = prefix + str(pattern)\n for location in pattern.find_matches(str(record.seq)):\n annotate_record(\n new_record,\n location=(location.start, location.end),\n feature_type=feature_type,\n label=label,\n )\n return new_record", "def number_positives(seq):\n # Convert sequence to upper case\n seq = seq.upper()\n\n # Check for a valid sequence\n for aa in seq:\n if aa not in bootcamp_utils.aa.keys():\n raise RuntimeError(aa + ' is not a valid amino acid.')\n\n return seq.count('R') + seq.count('K') + seq.count('H')", "def count(self):\n return len(self.find())", "def count(self):\n\n count = 0\n x = self.begin\n\n if self.begin == self.end == None:\n return 0\n\n elif self.begin == self.end:\n return 1\n\n else:\n while x:\n count += 1\n x = x.next\n\n return count", "def count_matrix(pb_seq):\n assert_same_size(pb_seq)\n pb_count = numpy.zeros((len(pb_seq[0]), len(NAMES)))\n for seq in pb_seq:\n for idx, block in enumerate(seq):\n if block in NAMES:\n pb_count[idx, NAMES.index(block)] += 1.0\n elif block not in [\"Z\", \"z\"]:\n raise InvalidBlockError(block=block)\n return pb_count", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def count(s, value):\n total, index = 0, 0\n while index < len(s):\n element = s[index]\n if element == value:\n total += 1\n index += 1\n return total", "def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count", "def __len__(self: TokenMatcher) -> int:\n return len(self._patterns)", "def count_sequences(self, size):\n raise NotImplementedError", "def counts(self) -> dict:\n return Counter(self.sequence)", "def Counting(seq):\n\n #Scan the sequence, looking for motifs\n\n counting = {k: 0 for k in MOT} # Initialize the counting dictionary.\n # Scan all the motifs and find them in the sequence\n for motif in MOT:\n if len(seq) > len(motif): # Check if the sequence is longer than the motif itself.\n for i in range(len(seq)-len(motif)+1):\n if i == 0: # In case the motif is in the beginning of the sequence\n # print(\"start: \" + seq[i:i+len(motif)] + \" next nuc: \" + seq[i+len(motif)])\n if seq[i:i+len(motif)] == motif and seq[i+len(motif)] != motif[0]: # Check if the next nucleotide is in not part of the motif.\n counting[motif] += 1\n elif i == len(seq)-len(motif): # In case the motif is in the end of the sequence\n \n if seq[i:i+len(motif)] == motif and seq[i-1] != motif[0]: # Check if the previuos nucleotide is in not part of the motif.\n counting[motif] += 1\n elif len(seq) > len(motif)+1: # In case the motif is in the middle of the sequence.\n # Check if the motif is not part of another motif (e.g. TT is in TTT).\n\n if seq[i:i+len(motif)] == motif and seq[i+len(motif)] != motif[0] and seq[i-1] != motif[0]:\n counting[motif] += 1\n for nuc_nr in NUC_NR:\n counting[nuc_nr+\"_NR\"] = seq.count(nuc_nr)\n\n return counting", "def process_stack(pattern):\n stack = deque()\n\n for char in pattern:\n if stack and react((char, stack[-1])):\n stack.pop()\n else:\n stack.append(char)\n return len(stack)", "def scan_seq(seq, pattern):\n\n # Look for matches in the sequence\n matches = [str(match.group(1)) for match in re.finditer(pattern, seq)]\n\n # Look for matches in the reverse complementary of the sequence\n revcomp_seq = reverse_complementary(seq)\n matches += [str(match.group(1)) for match in re.finditer(pattern, revcomp_seq)]\n\n return matches", "def count(text):\n return len(text)", "def frequencies(seq):\n d = dict()\n for item in seq:\n try:\n d[item] += 1\n except KeyError:\n d[item] = 1\n return d", "def count(self, char):\n return self._sequence.count(char)", "def text_cond_count(self, condition):\n res = 0\n for intv in self:\n if condition(intv._text):\n res += 1\n return res", "def countAtom (dico_count, PDB_parsed, debug = 0):\n count = 0\n \n for atom in PDB_parsed : \n residue = tool.transformAA(atom[\"resName\"])\n if debug : print residue\n \n if residue in dico_count : \n atom_Name = atom[\"name\"]\n if atom_Name in dico_count[residue] : \n count = count + 1\n return count", "def count(self, sub) -> int:\n pass", "def get_sequence_count(input_fasta_files):\n \n # Correction for the case that only one file passed\n if type(input_fasta_files)==str:\n input_fasta_files=[input_fasta_files]\n \n count=0\n for n in input_fasta_files:\n fasta_f=open(n,'U')\n for label,seq in MinimalFastaParser(fasta_f):\n count+=1\n fasta_f.close()\n return count", "def __len__(self) -> int:\n n_fuzzy_patterns = sum(len(p[\"patterns\"]) for p in self.fuzzy_patterns.values())\n n_regex_patterns = sum(len(p[\"patterns\"]) for p in self.regex_patterns.values())\n return n_fuzzy_patterns + n_regex_patterns", "def count_regexp():\r\n # Here's an example regular expression that roughly matches a valid email address.\r\n # The ones you write below should be shorter than this\r\n email = re.compile(\"[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\.[a-zA-Z]{2,5}\")\r\n\r\n ###### Write below #########\r\n subheading = re.compile(\"\\=\\=+.*\\=\\=+\")\r\n link_to_subheading = re.compile(\"\\[\\[[\\w\\'*\\-*\\:*\\(*\\)*\\_*\\s*]+[#][\\s*\\w\\\\'*\\-*\\:*\\(*\\)*\\_*s*]+\\|*\")\r\n doi_citation = re.compile(\"\\{\\{[c][ite](?!{{).*[dD][oO][iI]\\s*[:|,=\\/]*\\s*[0-9]+\\.[0-9]+.*\\}\\}\")\r\n ###### End of your work #########\r\n\r\n patterns = {\r\n \"emails\": email,\r\n \"subheadings\": subheading,\r\n \"links to subheadings\": link_to_subheading,\r\n \"citations with DOI numbers\": doi_citation,\r\n }\r\n\r\n with open(RAW_DUMP_XML, encoding=\"utf-8\") as f:\r\n dump_text = f.read()\r\n for name, pattern in patterns.items():\r\n if pattern is None:\r\n continue\r\n matches = pattern.findall(dump_text)\r\n count = len(matches)\r\n\r\n example_matches = [matches[i * (count // 5)] for i in range(5)]\r\n\r\n print(\"Found {} occurences of {}\".format(count, name))\r\n print(\"Here are examples:\")\r\n print(\"\\n\".join(example_matches))\r\n print(\"\\n\")", "def test_ababab():\n assert part_01.count_for('ababab', 2) == 0\n assert part_01.count_for('ababab', 3) == 1", "def get_pattern_count(left, coins):\r\n if len(coins) == 0:\r\n return 1\r\n # Get next coin\r\n coin = coins[0]\r\n # See how many could go into left\r\n most = left // coin\r\n # Loop through possible\r\n count = 0\r\n for i in range(0, most + 1):\r\n remaining = left - i * coin\r\n count += get_pattern_count(remaining, coins[1:])\r\n\r\n return count", "def sequence_sorted_count(self, x, reverse=False):\n c = 0\n if reverse: it = reversed(self)\n else: it = iter(self)\n for v in it:\n if x == v:\n c += 1\n break\n for v in it:\n if x == v: c += 1\n else: break\n return c", "def count_for(s, value):\n total = 0\n for elem in s:\n if elem == value:\n total = total + 1\n return total", "def count_common_prefix(str_seq, prefix):\r\n\r\n count = 0\r\n for element in str_seq:\r\n if element.startswith(prefix):\r\n count += 1\r\n return count", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def find_all_occurrences_brute_force(pattern, text):\n\n result = []\n\n if len(text) < len(pattern):\n return result\n\n for i in range(0, len(text) - len(pattern) + 1):\n matched = True\n\n k = 0\n for j in range(i, i + len(pattern)):\n if pattern[k] != text[j]:\n matched = False\n break\n k += 1\n\n if matched:\n result.append(i)\n\n return result", "def count_locs(file_type, comment_pattern):\n find = \"find . -name '*.{0}' -print0\".format(file_type)\n sed_pattern = \"'/^\\s*{0}/d;/^\\s*$/d'\".format(comment_pattern)\n\n cmd = \"{0} | xargs -0 sed {1} | wc -l\".format(find, sed_pattern)\n\n return check_output(cmd, shell = True).decode('utf-8').replace('\\n', '')", "def count_while(s, value):\n total, index = 0, 0\n while index < len(s):\n if s[index] == value:\n total = total + 1\n index = index + 1\n return total", "def frequency_array(text, k):\r\n freq_list=[]\r\n p_list=pattern_list(k)\r\n for i in p_list:\r\n freq_list.append(PatternCount(i,text))\r\n return freq_list", "def get_encoding_count(sequence, length=None):\n if length == None:\n length = len(sequence)\n\n # Handle base cases\n if length in (0, 1):\n return 1\n\n count = 0\n\n if sequence[length - 1] > '0':\n count += get_encoding_count(sequence, length - 1)\n\n if (\n sequence[length - 2] == '1' or\n (sequence[length - 2] == '1' and sequence[length - 1] < '7')):\n count += get_encoding_count(sequence, length - 2)\n\n return count", "def next_occurrence_re(pattern, string, start=0):\n exp = re.compile(pattern)\n for i in exp.finditer(string):\n if i.start() >= start:\n return [i.start(), i.end()]\n return []", "def counter(_class, l):\n\t\t# CITE: http://docs.python.org/2/library/itertools.html#itertools.groupby\n\t\toccurrences = {}\n\t\t# Quick explanation:\n\t\t# >>> c4_1, c4_2 = note.Note(\"C4\"), c4_2 = note.Note(\"C4\")\n\t\t# >>> hash(c4_1) == hash(c4_2) # False (why, I have no idea)\n\t\t# >>> hash(c4_1.fullName) == hash(c4_2.fullName) # True\n\n\t\tfixedList = [ \"n_\"+elem.nameWithOctave if isinstance(elem, note.Note) \n\t\t\t\t\t else None \n\t\t\t\t\t for elem in l ] \n\n\t\tfor elem in set(fixedList):\n\t\t\tif ( type(elem) == str and elem[0:2] == \"n_\" ):\n\t\t\t\toccurrences[note.Note(elem[2:])] = fixedList.count(elem)\n\t\t\telse:\n\t\t\t\toccurrences[elem] = l.count(elem)\n\t\treturn occurrences", "def count_until(match_value):", "def count(self, base):\n return self._dna.count(base)", "def count(args):\n path = os.path.abspath(args.path)\n total = 0\n\n if args.recursive:\n if os.path.exists(args.path):\n for item in os.listdir(path):\n little_path = os.path.join(path, item)\n if os.path.isfile(little_path):\n total += parse_file_count(little_path, args)\n else:\n total += count(little_path)\n else:\n print(\"EROARE: <\" + args.path +\n \"> invalid, nu putem ajunge acolo\")\n else:\n if os.path.isfile(args.path):\n total += parse_file_count(args.path, args)\n else:\n print(\"EROARE: <\" + args.pattern +\n \"> invalid, nu este fisier\")\n return total", "def count(self, sub, start=0, end=None):\n return count(self, sub, start, end)", "def count() -> int:\n pass", "def count(self, character):\n c_list = [i for i in self.input_words]\n n = sum([True for char in c_list if char == character])\n return n", "def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count", "def count_abbas(str):\r\n i = 0\r\n count = 0\r\n for i in range(0, len(str)):\r\n if str.startswith(\"abba\", i):\r\n count += 1\r\n return count", "def count(self, value): # real signature unknown; restored from __doc__\n return 0", "def count(a, sub, start=0, end=None):\n return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end))", "def count_elements(path):\n count = 0\n with open(path, 'r') as f:\n groups = f.read().split('\\n\\n')\n for idx in range(len(groups)):\n word = groups[idx].split('\\n')\n no_of_ele = len(word)\n for i in range(no_of_ele-1):\n word[0] = word[0]+word[i+1]\n count += len(''.join(set(word[0])))\n return count", "def numberOfSubstrings(self, s: str) -> int:\n i = 0\n res = 0\n d = {c:0 for c in 'abc'}\n \n for j, val in enumerate(s):\n d[val] += 1\n while all(d.values()):\n d[s[i]] -= 1\n i += 1\n res += i\n \n return res", "def test_count_ab(self):\n AB = get_moltype(\"ab\")\n seq = AB.make_array_seq(\"aaba-\", alphabet=AB.alphabet.with_gap_motif())\n c = seq.counts()\n self.assertEqual(c.to_dict(), {\"a\": 3, \"b\": 1})\n c = seq.counts(allow_gap=True)\n self.assertEqual(c.to_dict(), {\"a\": 3, \"b\": 1, \"-\": 1})", "def count(self, value):\n self.__validate_value(value)\n counter = 0\n for v in self.__list:\n if v == value:\n counter += 1\n return counter", "def count(self):\n\n raise NotImplementedError", "def count_sub(sub, s):\n count = 0\n for i in range(len(s) - len(sub) + 1):\n if s[i:i + len(sub)] == sub:\n count += 1\n return count", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def get_pattern_positions(sequence, pattern):\n return [pos.span()[0] for pos in re.finditer(r'(' + pattern + ')', sequence)]", "def requests_count(regexp, data):\n requests_list = re.findall(regexp, data)\n return int(list(Counter(requests_list).values())[0])", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def count_occurrences(x):\r\n tmp_x = sorted(copy(x))\r\n ux = unique(x)\r\n return searchsorted(tmp_x, ux, 'right') - searchsorted(tmp_x, ux, 'left')", "def get_count_of_elements_by_condition(sequence):\n elements_and_indexes = {sequence[i]: i + 1\n for i in range(1, len(sequence) - 1)}\n filtered_values = filter(lambda element:\n 2 ** element[1] < element[0] < math.factorial(element[1]),\n elements_and_indexes.items())\n return len(dict(filtered_values))", "def countAdjectives( aList ):\n\ttotalAdjectiveCount = 0\n\tadjectiveCount = 0\n\tfindAdjective = re.compile('JJ')\n\tfor x in aList:\n\t\tfor y in x:\n\t\t\tif findAdjective.search(str(x)) is not None:\n\t\t\t\tadjectiveCount += 1\n\t\tprint adjectiveCount\n\t\tprint (\"\\n\")\n\t\ttotalAdjectiveCount += adjectiveCount\n\t\tadjectiveCount = 0\n\treturn totalAdjectiveCount" ]
[ "0.8225791", "0.757032", "0.74906814", "0.7362785", "0.7350869", "0.73212504", "0.7278474", "0.7140979", "0.707895", "0.6971106", "0.6928305", "0.6643842", "0.65604806", "0.64908123", "0.6395848", "0.6317644", "0.6313916", "0.6293871", "0.62750435", "0.62558323", "0.6236206", "0.62350285", "0.61474967", "0.6138225", "0.6138225", "0.61251825", "0.6114028", "0.60748726", "0.60699207", "0.60663867", "0.6010719", "0.599708", "0.5994336", "0.5968319", "0.5967515", "0.59546864", "0.59543395", "0.5922288", "0.5860685", "0.5850687", "0.5806497", "0.5792051", "0.57867557", "0.57867557", "0.57830703", "0.57824636", "0.5770431", "0.5760685", "0.57504123", "0.57442814", "0.573316", "0.5714124", "0.57043517", "0.5696373", "0.5688114", "0.5680592", "0.56778866", "0.56569815", "0.5651194", "0.56511676", "0.5650541", "0.5641718", "0.56385446", "0.56220835", "0.5617833", "0.55939907", "0.5591817", "0.5581016", "0.5570847", "0.55649966", "0.5555351", "0.554656", "0.55384654", "0.553531", "0.5509955", "0.5504529", "0.54767525", "0.54676384", "0.5448703", "0.5446217", "0.5444851", "0.54438716", "0.5438482", "0.5420951", "0.5414942", "0.53996664", "0.5396892", "0.53949094", "0.5394581", "0.53940946", "0.53931016", "0.5391743", "0.53862053", "0.53847915", "0.53847915", "0.53847915", "0.53847915", "0.5375598", "0.5368552", "0.5366149" ]
0.76051104
1
Counts total occurrences of pattern in reference.
def count_total( reference_seq, # type: pyfaidx.Sequence pattern=None, # type: str intervals=None # type: Iterable[Tuple[str, int, int]] ): # type: (...) -> int regex = _build_regex(pattern) if intervals is None: # Simply count for the entire sequence. count = sum(_count_sequence(reference_seq[seq], regex=regex) for seq in reference_seq.keys()) # yapf: disable else: # Flatten intervals, and then only count for sequences # within the flattened intervals. merged_intervals = list(merge_genomic_intervals(intervals)) seqs = [ reference_seq[chrom][start:end] for chrom, start, end in merged_intervals ] count = sum(_count_sequence(seq, regex=regex) for seq in seqs) return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def count_occurrences(text, pattern, d=0):\n return len(find_occurrences(text, pattern, d))", "def calculate_reference(gram_list, references):\n gram_sub_str = ' '.join(gram_list)\n gram_count = []\n for item in references:\n # calculate the count of the sub string\n gram_count.append(len(re.findall(gram_sub_str, item)))\n return gram_count", "def calculate_reference(gram_list, references):\r\n gram_sub_str = ' '.join(gram_list)\r\n gram_count = []\r\n for item in references:\r\n # calculate the count of the sub string\r\n gram_count.append(len(re.findall(gram_sub_str, item)))\r\n return gram_count", "def get_pattern_count(sequence, pattern):\n return len(re.findall(r'(?=' + pattern + ')', sequence))", "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def get_count(self):\n\n return len(self._pattern)", "def refCount(self, node):\n return self._references.get(node, 0)", "def referencecount(self) :\n\t\ttry :\n\t\t\treturn self._referencecount\n\t\texcept Exception as e:\n\t\t\traise e", "def pattern_count(DNA, pattern, start=0, end=0, mutation_thresh=0):\n if start < 0 or start >= len(DNA):\n raise ValueError(\"The starting position should be between 0 and the size \" + \\\n \"of the DNA\")\n\n k = len(pattern)\n count = 0\n end = len(DNA) - k + 1 if end == 0 else end\n\n for i in range(0, end):\n if hamming_distance(DNA[i:i+k], pattern) <= mutation_thresh:\n count += 1\n\n return count", "def count_patterns(pattern, file):\n count = 0\n with open(file, 'r') as f:\n for line in f:\n if re.search(pattern, line):\n count += 1\n print(\"The pattern '{}' appears {} times.\".format(pattern, count))", "def PatternCount(text, pattern):\n\n count = 0\n for i in range(0, len(text)-len(pattern)+1):\n if text[i:i+len(pattern)] == pattern:\n count += 1\n return count", "def get_type_refcount(pattern=None):\n gc.collect()\n\n refcounts_per_type = defaultdict(int)\n for obj in gc.get_objects():\n obj_type_name = type(obj).__name__\n # If `pattern` is not None, keep only matching types.\n if pattern is None or pattern in obj_type_name:\n refcounts_per_type[obj_type_name] += 1\n\n return refcounts_per_type", "def CountOccurrences(pattern, bwt, starts, occ_counts_before, suffix_array):\r\n # 0$ 1A 2T 3G 4C\r\n letters = {'$':0, 'A':1, 'T':2, 'G':3, 'C':4}\r\n top=0\r\n bottom = len(bwt)-1\r\n matches_index = []\r\n while True:\r\n if len(pattern)!=0:\r\n char = pattern[-1]\r\n j = letters[char]\r\n pattern = pattern[:-1]\r\n found=False\r\n for i in range(top,bottom+1):\r\n if bwt[i] == char:\r\n top = occ_counts_before[i][j] + starts[char] -1\r\n bottom = occ_counts_before[bottom][j] + starts[char] -1\r\n found = True\r\n break\r\n\r\n if found==False:\r\n return matches_index\r\n # when pattern is finished proccessing\r\n else:\r\n for i in range(top, bottom+1):\r\n matches_index.append(suffix_array[i])\r\n return matches_index\r\n\r\n return matches_index", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def pattern_count(text, pattern):\n return len([i\n for i in range(0, len(text) - len(pattern) + 1)\n if text[i:i + len(pattern)] == pattern])", "def _calc_refs(counts):\n refs = 0\n for allele in counts.keys():\n refs += counts[allele]\n return refs", "def count(self):\n return len(self.find())", "def count_region(\n reference_seq, # type: pyfaidx.Fasta\n region, # type: Tuple[str, int, int]\n pattern=None # type: Optional[str]\n): # type: (...) -> int\n\n chrom, start, end = region\n seq = reference_seq[chrom][int(start):int(end)]\n\n return _count_sequence(seq, regex=_build_regex(pattern))", "def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found", "def count():", "def __len__(self) -> int:\n n_fuzzy_patterns = sum(len(p[\"patterns\"]) for p in self.fuzzy_patterns.values())\n n_regex_patterns = sum(len(p[\"patterns\"]) for p in self.regex_patterns.values())\n return n_fuzzy_patterns + n_regex_patterns", "def count_locs(file_type, comment_pattern):\n find = \"find . -name '*.{0}' -print0\".format(file_type)\n sed_pattern = \"'/^\\s*{0}/d;/^\\s*$/d'\".format(comment_pattern)\n\n cmd = \"{0} | xargs -0 sed {1} | wc -l\".format(find, sed_pattern)\n\n return check_output(cmd, shell = True).decode('utf-8').replace('\\n', '')", "def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count", "def count(self, word):\n pass", "def circular_reference_count(obj: typing.Any) -> int:\r\n if np is not None:\r\n result = _numpy_circular_ref_count(obj)\r\n if result is not NotImplemented:\r\n return result\r\n return _get_circular_ref_count(obj)", "def get_ref_length(self, ref):\n tbl = self._get_references_node()\n return get_ref_length(tbl, ref)", "def support_count(pattern, D):\n support_count = 0\n tmp_p = set(pattern)\n for transaction in D:\n if tmp_p <= set(transaction):\n support_count += 1\n return support_count", "def count(sub_stng, stng):\n instance_count = 0\n start_index = 0\n while stng.find(sub_stng, start_index) != -1:\n instance_count += 1\n start_index = stng.find(sub_stng, start_index) + 1\n\n return instance_count", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def __len__(self: TokenMatcher) -> int:\n return len(self._patterns)", "def count(self, sub) -> int:\n pass", "def rec_count(color : str) -> int:\n return sum(\n (1 + rec_count(child)) * count\n for child, count in contents[color].items()\n )", "def count(self):\n\n raise NotImplementedError", "def count_contents(\n target : str,\n contents : dict[str, dict[str, int]]\n ) -> int:\n\n @lru_cache() # Cache results to speed up recursion\n def rec_count(color : str) -> int:\n \"\"\"Recursively count the contents of a given color.\"\"\"\n return sum(\n (1 + rec_count(child)) * count\n for child, count in contents[color].items()\n )\n\n return rec_count(target)", "def CountAppStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tif debug:\n\t\tprint len(text)-len(pattern)+1\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\treturn count", "def num_patterns(self):\n return len(self._pattern_reg)", "def calculate_number_of_references(div):\n n_publication_ref = len(\n [ref for ref in div.find_all(\"ref\") if ref.attrs.get(\"type\") == \"bibr\"]\n )\n n_figure_ref = len(\n [ref for ref in div.find_all(\"ref\") if ref.attrs.get(\"type\") == \"figure\"]\n )\n return {\"n_publication_ref\": n_publication_ref, \"n_figure_ref\": n_figure_ref}", "def count(self, value): # real signature unknown; restored from __doc__\n return 0", "def counts(self, regex = \"\\w+\"): \n tokenizer = RegexpTokenizer(r'{}'.format(regex))\n count = []\n for i in tqdm(self.text):\n count.append(len(tokenizer.tokenize(i)))\n return count", "def count(self, sub, start=0, end=None):\n return count(self, sub, start, end)", "def count(self):\n\t\treturn sum(read.copy for read in self.__iter__())", "def calculate_p(candidate, reference):\n matches = 0\n for grama in candidate:\n if grama in reference:\n matches += 1\n return matches/len(candidate)", "def count(self):\n # TODO not implemented yet\n return 0", "def __countArrRefs(self, tnode, count_table, aref_seq):\n\n if isinstance(tnode, ast.NumLitExp):\n return\n\n elif isinstance(tnode, ast.StringLitExp):\n return\n\n elif isinstance(tnode, ast.IdentExp):\n return\n\n elif isinstance(tnode, ast.ArrayRefExp):\n aref_str = str(tnode)\n if aref_str in count_table:\n count_table[aref_str] += 1\n else:\n count_table[aref_str] = 1\n aref_seq.append(tnode.replicate())\n\n elif isinstance(tnode, ast.FunCallExp):\n self.__countArrRefs(tnode.exp, count_table, aref_seq)\n for a in tnode.args:\n self.__countArrRefs(a, count_table, aref_seq)\n\n elif isinstance(tnode, ast.UnaryExp):\n self.__countArrRefs(tnode.exp, count_table, aref_seq)\n\n elif isinstance(tnode, ast.BinOpExp):\n self.__countArrRefs(tnode.lhs, count_table, aref_seq)\n self.__countArrRefs(tnode.rhs, count_table, aref_seq)\n\n elif isinstance(tnode, ast.ParenthExp):\n self.__countArrRefs(tnode.exp, count_table, aref_seq)\n\n elif isinstance(tnode, ast.ExpStmt):\n if tnode.exp:\n self.__countArrRefs(tnode.exp, count_table, aref_seq)\n\n elif isinstance(tnode, ast.CompStmt):\n for s in tnode.stmts:\n self.__countArrRefs(s, count_table, aref_seq)\n\n elif isinstance(tnode, ast.IfStmt):\n self.__countArrRefs(tnode.test, count_table, aref_seq)\n self.__countArrRefs(tnode.true_stmt, count_table, aref_seq)\n if tnode.false_stmt:\n self.__countArrRefs(tnode.false_stmt, count_table, aref_seq)\n\n elif isinstance(tnode, ast.ForStmt):\n if tnode.init:\n self.__countArrRefs(tnode.init, count_table, aref_seq)\n if tnode.test:\n self.__countArrRefs(tnode.test, count_table, aref_seq)\n if tnode.iter:\n self.__countArrRefs(tnode.iter, count_table, aref_seq)\n self.__countArrRefs(tnode.stmt, count_table, aref_seq)\n\n else:\n err(\n \"orio.module.ortildriver.transformation internal error:OrTilDriver: unknown type of AST: %s\"\n % tnode.__class__.__name__\n )", "def get_multi_pattern_count(word, patterns):\n\n distinct_positions = set()\n for pattern in patterns:\n result = Util.find_all_occurrences_knuth_morris_pratt(pattern,\n word)\n distinct_positions |= set(result)\n\n return distinct_positions", "def getOccurence(self) -> int:\n ...", "def n_refs(self):\n return self._n_refs", "def count(seq):\n\treturn sum(1 for x in seq)", "def _count_sequence(sequence, regex=None):\n # type: (pyfaidx.Sequence, Pattern[str]) -> int\n\n if regex is None:\n count = len(sequence)\n else:\n count = sum((1 for _ in regex.finditer(str(sequence))))\n\n return count", "def count(args):\n path = os.path.abspath(args.path)\n total = 0\n\n if args.recursive:\n if os.path.exists(args.path):\n for item in os.listdir(path):\n little_path = os.path.join(path, item)\n if os.path.isfile(little_path):\n total += parse_file_count(little_path, args)\n else:\n total += count(little_path)\n else:\n print(\"EROARE: <\" + args.path +\n \"> invalid, nu putem ajunge acolo\")\n else:\n if os.path.isfile(args.path):\n total += parse_file_count(args.path, args)\n else:\n print(\"EROARE: <\" + args.pattern +\n \"> invalid, nu este fisier\")\n return total", "def countRecursiveReferences(self) -> Tuple[Dict[str, List[str]], Dict[str, Dict[str, int]]]:\n countReferences = {}\n isReferencedBy = {}\n for nt in self.non_terminals: # for each non-terminal in the grammar\n for production in self.grammar[nt]: # for each possible production on that non terminal\n count = {}\n for option in production.split(): # iterate over the production's terms\n count.setdefault(option, 0)\n if option in self.non_terminals: # if the term is a non terminal\n count[option] += 1 # the number of times that option has been referenced increases\n isReferencedBy.setdefault(option, set())\n isReferencedBy[option].add(nt)\n \n for key in count:\n count.setdefault(key, 0)\n countReferences.setdefault(key, {})\n countReferences[key].setdefault(nt, 0)\n\n countReferences[key][nt] = max(\n countReferences[key][nt], count[key]) # the number of references of the non terminal is for this\n # term is the maximum between all productions in this non terminal\n\n return isReferencedBy, countReferences", "def reference_stats(refs, output_len):\n\n ngrams = Counter()\n closest_diff = None\n closest_len = None\n\n for ref in refs:\n tokens = ref.split()\n reflen = len(tokens)\n diff = abs(output_len - reflen)\n if closest_diff is None or diff < closest_diff:\n closest_diff = diff\n closest_len = reflen\n elif diff == closest_diff:\n if reflen < closest_len:\n closest_len = reflen\n\n ngrams_ref = CachedBLEU.extract_ngrams(ref)\n for ngram in ngrams_ref.keys():\n ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])\n\n return ngrams, closest_diff, closest_len", "def word_count_by_ref(ref_name, lang='he', version=None):\n\n counts = {}\n\n # get version title if not supplied\n if not version:\n version = next(v for v in Ref(ref_name).version_list() if v['language'] == lang)\n\n # convert ref into a list of words\n text = TextChunk(Ref(ref_name), lang, version['versionTitle']).as_string()\n text = text.split()\n\n for word in text:\n if word in list(counts.keys()):\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts", "def count(self):\n\n count = 0\n x = self.begin\n\n if self.begin == self.end == None:\n return 0\n\n elif self.begin == self.end:\n return 1\n\n else:\n while x:\n count += 1\n x = x.next\n\n return count", "def count(self, tokens):\n return self._count[tuple(tokens)]", "def countAtom (dico_count, PDB_parsed, debug = 0):\n count = 0\n \n for atom in PDB_parsed : \n residue = tool.transformAA(atom[\"resName\"])\n if debug : print residue\n \n if residue in dico_count : \n atom_Name = atom[\"name\"]\n if atom_Name in dico_count[residue] : \n count = count + 1\n return count", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def total_occurrences(word1, word2, flag):\n result = 0\n word1_length = len(word1)\n for i in range(word1_length):\n if word1[i] == flag:\n result += 1\n\n word2_length = len(word2)\n for i in range(word2_length):\n if word2[i] == flag:\n result += 1\n\n return result", "def count_regexp():\r\n # Here's an example regular expression that roughly matches a valid email address.\r\n # The ones you write below should be shorter than this\r\n email = re.compile(\"[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\.[a-zA-Z]{2,5}\")\r\n\r\n ###### Write below #########\r\n subheading = re.compile(\"\\=\\=+.*\\=\\=+\")\r\n link_to_subheading = re.compile(\"\\[\\[[\\w\\'*\\-*\\:*\\(*\\)*\\_*\\s*]+[#][\\s*\\w\\\\'*\\-*\\:*\\(*\\)*\\_*s*]+\\|*\")\r\n doi_citation = re.compile(\"\\{\\{[c][ite](?!{{).*[dD][oO][iI]\\s*[:|,=\\/]*\\s*[0-9]+\\.[0-9]+.*\\}\\}\")\r\n ###### End of your work #########\r\n\r\n patterns = {\r\n \"emails\": email,\r\n \"subheadings\": subheading,\r\n \"links to subheadings\": link_to_subheading,\r\n \"citations with DOI numbers\": doi_citation,\r\n }\r\n\r\n with open(RAW_DUMP_XML, encoding=\"utf-8\") as f:\r\n dump_text = f.read()\r\n for name, pattern in patterns.items():\r\n if pattern is None:\r\n continue\r\n matches = pattern.findall(dump_text)\r\n count = len(matches)\r\n\r\n example_matches = [matches[i * (count // 5)] for i in range(5)]\r\n\r\n print(\"Found {} occurences of {}\".format(count, name))\r\n print(\"Here are examples:\")\r\n print(\"\\n\".join(example_matches))\r\n print(\"\\n\")", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def get_pattern_count(left, coins):\r\n if len(coins) == 0:\r\n return 1\r\n # Get next coin\r\n coin = coins[0]\r\n # See how many could go into left\r\n most = left // coin\r\n # Loop through possible\r\n count = 0\r\n for i in range(0, most + 1):\r\n remaining = left - i * coin\r\n count += get_pattern_count(remaining, coins[1:])\r\n\r\n return count", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def count(self, base):\n return self._dna.count(base)", "def count(self):\n return sum(1 for _ in self)", "def search_and_count_chars(dictionary, lookup):\n\n c = (sum(sum(len(re.findall(lookup,s)) for s in subList) for subList in dictionary.values()))\n return c", "def nreferences(self):\n return self.__nreferences", "def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):\n\n # inStart = int(annEntry[6])\n # numNotes = int(annEntry[8])\n numNotes = inEnd - inStart + 1 # including endpoints\n\n # add number of ties before start index from start index; meertens\n # DOESN'T count tied notes as notes but music21 DOES\n allNotes = songs[songName].score.flat.notes.stream()\n # subtract 1 here to get the first note of the occurence in the slice\n # so that we can get rid of it if it's a rest\n if(useTies):\n beforeSlice = allNotes[:inStart-1]\n numTies = 0\n for n in beforeSlice:\n if(n.tie is not None):\n if(n.tie.type == 'start'):\n numTies += 1\n\n inStart += numTies\n\n # do the same for ties inside of the snippet, but also keep track of where\n # they are and save that information with the pattOcc so we don't have to go\n # through this procedure again (TODO)\n numTies = 0\n inSlice = allNotes[inStart:(inStart+numNotes)]\n for n in inSlice:\n if(n.tie is not None):\n if(n.tie.type == 'start'):\n numTies += 1\n\n # this new numNotes will work with music21\n numNotes += numTies\n\n pattOcc = allNotes[inStart:(inStart+numNotes)]\n\n return pattOcc", "def len(self):\n start = self.head\n count = 0\n while start:\n count+=1\n start = start.getLink()\n return count", "def count(self):\n return clone_counter._count", "def count_matches(sam_input):\n logging.info(\"Counting aligned bases in %s ...\", sam_input.name)\n\n total_bases = 0\n with pysam.AlignmentFile(sam_input, \"r\") as sam:\n for read in sam:\n total_bases += aligned_bases(read.cigar)\n return total_bases", "def count(self, item):\n # TODO: complete this function!\n if item not in self:\n return 0\n else:\n num_occur = 0\n if self._first == item:\n num_occur += 1\n num_occur += self._rest.count(item)\n return num_occur", "def refspec_count(self):\n\n return C.git_remote_refspec_count(self._remote)", "def countSubStringMatchRecursive(target,key,count):\r\n print target\r\n index = find(target,key)\r\n if index < 0 :\r\n return 0\r\n else :\r\n count += countSubStringMatchRecursive(target[index+len(key):len(target)+1],key,count)\r\n count += 1\r\n print count\r\n return count", "def count(self, tokens):\n return self.counts[tokens]", "def count_clip(gram, grams, reference):\n clip = 0\n n = len(gram.split(' '))\n count_wi = 0\n for g in grams:\n if gram == g:\n count_wi += 1\n # print('count_wi:', count_wi)\n for ref in reference:\n ref_list = ref.split(' ')\n count_ref = 0\n for i in range(len(ref_list) - n + 1):\n if gram == ' '.join(ref_list[i:i+n]):\n count_ref += 1\n # print('count_ref: ', count_ref)\n count = min(count_wi, count_ref)\n if count > clip:\n clip = count\n return clip", "def count(self):\n with self._block:\n counter = re.search(r'count=(\\d+) ', repr(self))\n return int(counter.group(1))", "def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres", "def count(self, patterns=[], aggregators=None, queries=[]):\n import hxl.filters\n return hxl.filters.CountFilter(\n self, patterns=patterns, aggregators=aggregators, queries=queries\n )", "def count(self):\n return self.ming_cursor.count()", "def get_count(name, key):\n total = 0\n query = CounterShard.all().filter('name = ', name).filter('reference_key = ', key)\n for counter in query:\n total += counter.count\n \n return total", "def count() -> int:\n pass", "def count(self, item):\n # type: (Any) -> int\n return list.count(self, self.ref(item))", "def getNumReferenceGlyphs(self):\n return _libsbml.GeneralGlyph_getNumReferenceGlyphs(self)", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def occurences(words):\n\n\t# Add your code here\n\treturn", "def matched_length(self) -> int:\n return sum(seg.matched_length for seg in self.segments)", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def total(self) -> int:\n return len(self.fixes)", "def getNumReferents(self):\n return _libsbml.ReplacedElement_getNumReferents(self)", "def get_matches_count():\n\n return ''\n \"\"\"\n TODO: count matches\n dtr5app_flag.sender\n dtr5app_flag.receiver\n dtr5app_flag.flag\n \"\"\"", "def annotate_pattern_occurrences(\n record, pattern, feature_type=\"misc_feature\", prefix=\"!\"\n):\n new_record = deepcopy(record)\n label = prefix + str(pattern)\n for location in pattern.find_matches(str(record.seq)):\n annotate_record(\n new_record,\n location=(location.start, location.end),\n feature_type=feature_type,\n label=label,\n )\n return new_record", "def find_occurrences(text, pattern, d=0):\n idx_of_last_pattern = len(text) - len(pattern)\n return [i for i in range(idx_of_last_pattern + 1) if hamming(text[i:i + len(pattern)], pattern) <= d]", "def utr3_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.three_prime_utr_sequence.upper()))" ]
[ "0.71985865", "0.7126463", "0.693929", "0.6936579", "0.6664656", "0.6643048", "0.6616412", "0.65986395", "0.65699697", "0.6548704", "0.644031", "0.6386957", "0.63118064", "0.6282825", "0.6272797", "0.6222616", "0.6174506", "0.61413646", "0.60996157", "0.6062659", "0.606119", "0.60587156", "0.60433906", "0.6037991", "0.6029941", "0.60022235", "0.59635264", "0.59601325", "0.5939341", "0.58867043", "0.58867043", "0.58867043", "0.58867043", "0.58737993", "0.5849515", "0.5834643", "0.5821058", "0.58178025", "0.5729819", "0.5728899", "0.571281", "0.5711139", "0.5706865", "0.5673914", "0.567301", "0.565726", "0.56567794", "0.56561", "0.5653708", "0.5652653", "0.5651219", "0.5607813", "0.56016934", "0.5588743", "0.5577224", "0.5573549", "0.5570946", "0.5570787", "0.5561663", "0.5551285", "0.5543413", "0.5543413", "0.55390614", "0.5526349", "0.5525403", "0.55205333", "0.5516829", "0.5516829", "0.55092436", "0.5507761", "0.55047905", "0.5491", "0.54715294", "0.5467718", "0.54654914", "0.5462242", "0.5449268", "0.54351074", "0.5434588", "0.5431749", "0.5425286", "0.5418753", "0.54175174", "0.54059386", "0.5405209", "0.5398737", "0.5395755", "0.53948325", "0.53947306", "0.5390358", "0.53899103", "0.5384416", "0.5374481", "0.53709656", "0.5369056", "0.5367287", "0.536156", "0.5359822", "0.5355908", "0.53545344" ]
0.68921715
4
Merges overlapping genomic intervals.
def merge_genomic_intervals(intervals): # type: (Iterable[Tuple[str, int, int]]) -> Iterable[Tuple[str, int, int]] # Group intervals by chromosome. grouped_intervals = itertools.groupby( sorted(intervals), operator.itemgetter(0)) # Now yield merged intervals per chromosome. for chrom, grp in grouped_intervals: chrom_intervals = [interval[1:] for interval in grp] for low, high in merge_intervals(chrom_intervals, is_sorted=True): yield chrom, low, high
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_ranges():", "def test_merge_intervals():\n\n a = pybedtools.example_bedtool(\"a.bed\") # path to test file a\n # This file looks like this:\n # chr1\t1\t100\tfeature1\t0\t+\n # chr1\t100\t200\tfeature2\t0\t+\n # chr1\t150\t500\tfeature3\t0\t-\n # chr1 900\t950\tfeature4\t0\t+\n\n assert len(a) == 4\n\n b = pybedtools.example_bedtool(\"b.bed\") # path to test file b\n # This file looks like this:\n # chr1\t155\t200\tfeature5\t0\t-\n # chr1\t800\t901\tfeature6\t0\t+\n\n assert len(b) == 2\n\n merged_bed = merge_intervals([a, b])\n assert len(merged_bed) == 2\n # Merged file looks like this:\n # chr1\t1\t500\n # chr1\t800\t950", "def merge_overlapping_on_chrm_and_strand(intervals, coverage):\n sorted_by_lower_bound = sorted(intervals, key=lambda x: x.left)\n merged = []\n for higher in sorted_by_lower_bound:\n if not merged:\n merged.append(higher)\n else:\n lower = merged[-1]\n # test for intersection between lower and higher:\n # we know via sorting that lower[0] <= higher[0]\n if higher.left <= lower.right:\n upper_bound = int(max(lower.right, higher.right))\n new_peak = peak(lower.chrm, lower.left, upper_bound, lower.strand)\n new_peak.height = 0\n window = HTSeq.GenomicInterval(lower.chrm, lower.left, upper_bound, lower.strand)\n wincvg = np.fromiter(coverage[window], dtype='i')\n new_peak.height = int(max(wincvg))\n merged[-1] = new_peak # replace by merged interval\n else:\n merged.append(higher)\n return merged", "def merge_overlapping(self, stat=intervals_weighted_mean, sort=True):\n if sort:\n bed = self\n else:\n bed = self.sort()\n\n current_intervals = []\n for interval in bed:\n if len(current_intervals) == 0 or (current_intervals[-1].start < interval.end and\n current_intervals[-1].end > interval.start and\n current_intervals[-1].chrom == interval.chrom):\n current_intervals.append(interval)\n else:\n # merge\n intervals = np.array([(current.start, current.end,\n float(current.score) if current.score != '.' else np.nan)\n for current in current_intervals])\n merged_score = \"{:0.6f}\".format(stat(intervals))\n merged_strand = current_intervals[0].strand\n merged_start = min(intervals[:, 0])\n merged_end = max(intervals[:, 1])\n merged_chrom = current_intervals[0].chrom\n merged_name = current_intervals[0].name\n merged_interval = pybedtools.Interval(merged_chrom, merged_start, merged_end, name=merged_name,\n score=merged_score, strand=merged_strand)\n current_intervals = [interval]\n yield merged_interval", "def merge_overlapping_regions(regions):\n sorted_regions = sorted(regions, key=lambda r: (r.chromosome, r.start))\n\n merged_regions = []\n current_regions = []\n last_end = None\n for region in sorted_regions:\n if len(current_regions) == 0:\n current_regions.append(region)\n last_end = region.end\n elif region.chromosome == current_regions[0].chromosome and region.start < last_end:\n current_regions.append(region)\n last_end = max(last_end, region.end)\n else:\n merged_region = GenomicRegion(chromosome=current_regions[0].chromosome,\n start=current_regions[0].start, end=last_end,\n strand=current_regions[0].strand)\n merged_regions.append(merged_region)\n current_regions = [region]\n last_end = region.end\n\n merged_region = GenomicRegion(chromosome=current_regions[0].chromosome,\n start=current_regions[0].start, end=last_end,\n strand=current_regions[0].strand)\n merged_regions.append(merged_region)\n\n return merged_regions", "def merge_df_intervals(df, iv_func=lambda iv: iv.merge_hull()):\n if not \"strand\" in df.columns:\n df = df.assign(strand=1)\n strand_added = True\n else:\n strand_added = False\n joined = _df_to_tup(df)\n\n out = []\n for chr_strand, sub_group in itertools.groupby(joined, lambda tup: tup[0]):\n args = [x[1:] for x in sub_group]\n iv = IntervalSet.from_tuples_with_id(args)\n new_order = iv_func(iv).to_tuples_last_id()\n new_df = df.iloc[[x[2] for x in new_order]].copy()\n new_df.loc[:, \"start\"] = [x[0] for x in new_order]\n new_df.loc[:, \"stop\"] = [x[1] for x in new_order]\n out.append(new_df)\n res = pd.concat(out)\n if strand_added:\n res = res.drop(\"strand\", axis=1)\n return res.sort_values([\"chr\", \"start\"])", "def mergeOverlapping(intlist):\n \n intlist.sort(key=lambda x: x.minval)\n newint=[intlist[0]]\n\n for elem in intlist[1:]:\n try:\n newint[-1]=mergeIntervals(elem,newint[-1])\n except:\n newint.append(elem)\n \n return newint", "def test_merge_demo_intervals():\n a = pybedtools.BedTool(panel1_path)\n assert len(a) == 4\n b = pybedtools.BedTool(panel2_path)\n assert len(b) == 3\n\n merged_bed = merge_intervals([a, b])\n assert len(merged_bed) == len(a) + len(b) - 1 # a and b have a shared interval", "def _combine_ind_ranges(ind_ranges_to_merge):\n ind_ranges_to_merge = sorted(ind_ranges_to_merge)\n stack = []\n result = []\n for curr in ind_ranges_to_merge:\n if len(stack) == 0:\n stack.append(curr)\n elif stack[-1][-1] >= curr[0]:\n prev = stack.pop()\n merged = sorted(list(set(prev + curr)))\n stack.append(merged)\n else:\n prev = stack.pop()\n result.append(prev)\n stack.append(curr)\n result += stack\n return result", "def test_rangesMerged(self):\n\n mergeAfter = MessageSet(1, 3)\n mergeBefore = MessageSet(6, 8)\n\n mergeBetweenSequence = mergeAfter + mergeBefore\n mergeBetweenNumber = mergeAfter + MessageSet(5, 7)\n\n self.assertEqual(list(mergeAfter + (2, 4)), [1, 2, 3, 4])\n self.assertEqual(list(mergeAfter + (3, 5)), [1, 2, 3, 4, 5])\n\n self.assertEqual(list(mergeBefore + (5, 7)), [5, 6, 7, 8])\n self.assertEqual(list(mergeBefore + (4, 6)), [4, 5, 6, 7, 8])\n\n self.assertEqual(list(mergeBetweenSequence + (3, 5)),\n [1, 2, 3, 4, 5, 6, 7, 8])\n self.assertEqual(list(mergeBetweenNumber + MessageSet(4)),\n [1, 2, 3, 4, 5, 6, 7])", "def test_mergeOverlapping(self):\n\n # test expected behavior for correctly formatted inputs\n int1 = interval('[1,2)')\n int2 = interval('(1,2]')\n int12 = interval('[1,2]')\n merged12 = mergeOverlapping([int1, int2])\n self.assertEqual([int12], merged12)\n int3 = interval('[3,3]')\n int13 = interval('[1,3]')\n intneg1 = interval('[-1,0)')\n int0 = interval('[0,1)')\n intneg13 = interval('[-1,3]')\n self.assertEqual([intneg13], mergeOverlapping([intneg1, int0, int13]))\n self.assertEqual([intneg1, int3], mergeOverlapping([intneg1, int3]))\n self.assertEqual([int13], mergeOverlapping([int12, int3]))\n int4 = interval('(3,4]')\n int58 = interval('[5,8]')\n intnothing = mergeOverlapping([])\n self.assertEqual([], intnothing)\n self.assertEqual([int13, int58], mergeOverlapping([int12, int3, int58]))\n self.assertEqual([int13, int58], mergeOverlapping([int58, int13]))\n self.assertEqual([int13], mergeOverlapping([int1, int2, int3]))\n self.assertEqual([int13], mergeOverlapping([int1, int2, int2, int3, int12]))\n self.assertEqual([int1], mergeOverlapping([int1]))\n\n # test expected behavior for incorrectly formatted inputs\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([int1, 4])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([3, int1])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([3, \"not an interval\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([3, \"[1,3]\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([[], \"\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([[12, \"hi\"], \"interval\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([int1, \"\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([[], int2])\n print(\"merge overlapping list test complete\")", "def merge_peaks(peaks, peak_size, merge_overlap, chrom_len):\n max_overlap = merge_overlap\n while len(peaks) > 1 and max_overlap >= merge_overlap:\n # find largest overlap\n max_i = 0\n max_overlap = peaks[0].end - peaks[1].start\n for i in range(1, len(peaks) - 1):\n peaks_overlap = peaks[i].end - peaks[i + 1].start\n if peaks_overlap > max_overlap:\n max_i = i\n max_overlap = peaks_overlap\n\n if max_overlap >= merge_overlap:\n # merge peaks\n peaks[max_i].merge(peaks[max_i + 1], peak_size, chrom_len)\n\n # remove merged peak\n peaks = peaks[: max_i + 1] + peaks[max_i + 2 :]\n\n return peaks", "def merge(intervals):\n intervals.sort(key=lambda x: x[0])\n # take the first interval\n merged = [intervals[0]]\n # loop through all the intervals\n for this_interval in intervals:\n if this_interval[0] <= merged[-1][1]:\n merged[-1] = (merged[-1][0], max(merged[-1][1], this_interval[1]))\n else:\n merged.append(this_interval)\n return merged", "def merge_df_intervals_with_callback(df, callback):\n if not \"strand\" in df:\n df = df.assign(strand=1)\n strand_added = True\n else:\n strand_added = False\n joined = _df_to_tup(df)\n result = []\n for chr, sub_group in itertools.groupby(joined, lambda tup: tup[0]):\n args = [x[1:] for x in sub_group]\n iv = IntervalSet.from_tuples_with_id(args)\n subsets = iv.merge_hull().to_tuples_with_id()\n for s in subsets:\n sub_df = df.iloc[list(s[2])].copy()\n sub_df.at[:, \"start\"] = s[0]\n sub_df.at[:, \"stop\"] = s[1]\n row_data = callback(sub_df)\n if not isinstance(\n row_data, dict\n ): # and not (isinstance(row_data, pd.core.series.Series) and len(row_data.shape) == 1):\n print(\"type\", type(row_data))\n # print 'len(shape)', len(row_data.shape)\n print(callback)\n raise ValueError(\n \"Merge_function returned something other than dict (writing to the pandas series directly is very slow, call to_dict() on it, then modify it.)\"\n )\n if set(row_data.keys()) != set(df.columns):\n raise ValueError(\n \"Merge_function return wrong columns. Expected %s, was %s\"\n % (df.columns, list(row_data.keys()))\n )\n row_data[\"start\"] = s[0]\n row_data[\"stop\"] = s[1]\n\n result.append(row_data)\n res = pd.DataFrame(result).sort_values([\"chr\", \"start\"])\n if strand_added:\n res = res.drop(\"strand\", axis=1)\n return res", "def solution(intervals):\n solution = Solution()\n output = solution.merge(intervals)\n\n print(output)", "def mergeIntervals(int1,int2):\n newint=interval('(-1,1)') \n if int1.minval>int2.minval or (int2.lrbd=='(' and int1.minval==int2.minval):\n int1,int2=int2,int1\n \n if isMergeable(int1,int2):\n newrtNum=max(int1.rtnum,int2.rtnum)\n if newrtNum==int2.rtnum:\n newint=interval(int1.lrbd+str(int1.lfnum)+','+str(newrtNum)+int2.upbd)\n else:\n newint=interval(int1.lrbd+str(int1.lfnum)+','+str(newrtNum)+int1.upbd)\n\n else:\n raise Cant_be_merged('Can\\'t be merged')\n \n return newint", "def overlaps(interval,intervals):\n return [x for x in intervals if interval.overlaps(x)]", "def merge_reads(s1, s2, q1, q2, amplen):\n # If the amplicon is of length L and the reads are lengths l1, l2 then:\n # - read 1 from 0 to L-l2-1 inclusive doesn't overlap\n # - read 1 from L-l2 to l1-1 inclusive overlaps with read 2\n # - read 2 from 0 to l1+l2-L-1 inclusive overlaps with read 1\n # - read 2 from l1+l2-L to its end doesn't overlap\n\n # A picture for clarity:\n # s1 coords: 0 l1-1\n # | |\n # ----------------------------------------\n # ------------------------------\n # | | |\n # s1 coords: L-l2 | L-1\n # s2 coords: 0 l1+l2-L-1\n\n # Reverse complement read 2 and reverse its quality scores.\n s2 = reverse_complement(s2)\n q2 = q2[::-1]\n\n # This is where we'll put the merged sequence and quality score.\n s = np.zeros(amplen, dtype=np.int8)\n q = np.zeros(amplen, dtype=np.int8)\n\n # If the reads overlap correctly, then s1[offset+i] == s2[i], assuming s2 is\n # the reverse complement of the reverse read.\n offset = amplen - len(s2)\n\n # Fill in the parts of the merged sequence where the reads don't overlap.\n s[:offset] = s1[:offset]\n q[:offset] = q1[:offset]\n s[len(s1):] = s2[len(s1)+len(s2)-amplen:]\n q[len(s1):] = q2[len(s1)+len(s2)-amplen:]\n\n # Create a set of views into the overlapping region. We can directly compare\n # vs1[i] to vs2[i] and use that to fill in vs[i] with all indexing taken\n # care of.\n vs1 = s1[offset:]\n vq1 = q1[offset:]\n vs2 = s2[:len(vs1)]\n vq2 = q2[:len(vs1)]\n vs = s[offset:len(s1)]\n vq = q[offset:len(s1)]\n\n # Quality score of matching bases is the larger of the two quality\n # scores (this is a somewhat conservative low estimate). Quality\n # score of mismatched bases is the difference of the two quality\n # scores. If the mismatched bases have equal quality scores, the\n # base is written as an N with the minimum possible quality.\n\n # Positions where the reads agree.\n ieq = vs1 == vs2\n vs[ieq] = vs1[ieq]\n vq[ieq] = np.maximum(vq1[ieq], vq2[ieq])\n\n # Positions where the reads disagree.\n ineq = vs1 != vs2\n mismatches = ineq.sum()\n\n # Positions where the reads disagree and read 1 has the higher quality.\n ir1 = np.logical_and(ineq, vq1 > vq2)\n vs[ir1] = vs1[ir1]\n vq[ir1] = MIN_QUAL + vq1[ir1] - vq2[ir1]\n\n # Positions where the reads disagree and read 2 has the higher quality.\n ir2 = np.logical_and(ineq, vq2 > vq1)\n vs[ir2] = vs2[ir2]\n vq[ir2] = MIN_QUAL + vq2[ir2] - vq1[ir2]\n\n # Positions where the reads disagree and they have equal qualities.\n irn = np.logical_and(ineq, vq1 == vq2)\n vs[irn] = bN\n vq[irn] = MIN_QUAL\n\n return s, q, mismatches", "def mergeSeq(left, right):\n i = j = 0\n result = []\n while i < len(left) and j < len(right):\n if left[i] <= right[j]:\n result.append(left[i])\n i += 1\n else:\n result.append(right[j])\n j += 1\n\n result += left[i:]\n result += right[j:]\n return result", "def test_mergeIntervals(self):\n\n # test expected behavior for correctly formatted inputs\n int1 = interval('[1,2)')\n int2 = interval('(1,2]')\n int12 = interval('[1,2]')\n merged12 = mergeIntervals(int1, int2)\n self.assertEqual(int12, merged12)\n int3 = interval('[3,3]')\n int13 = interval('[1,3]')\n self.assertEqual(int13, mergeIntervals(int12, int3))\n int4 = interval('(3,4]')\n int58 = interval('[5,8]')\n\n # test expected behavior for incorrectly formatted inputs\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int1, int4)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int4, int1)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int3, int1)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int3, int58)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int1, 4)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(3, int1)\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(3, \"not an interval\")\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(3, \"[1,3]\")\n with self.assertRaises(ValueError):\n int1 = mergeIntervals([], \"\")\n with self.assertRaises(ValueError):\n int1 = mergeIntervals([12, \"hi\"], \"interval\")\n with self.assertRaises(ValueError):\n int1 = mergeIntervals(int1, \"\")\n with self.assertRaises(ValueError):\n int1 = mergeIntervals([], int2)\n print(\"merge test complete\")", "def merge_in(self, other, convert_to_string=True):\n assert isinstance(other, ExtendedAlignment)\n #_LOG.debug(\"Merging started ...\")\n if other.is_empty():\n return\n me = 0\n she = 0 # Assumption: alignments are female!\n me_len = self.get_length() if not self.is_empty() else 0\n she_len = other.get_length()\n insertion = -1\n\n merged_insertion_columns = 0\n\n ''' Add sequences from her to my alignment '''\n for f in other.fragments:\n self.fragments.add(f)\n if convert_to_string:\n self.from_string_to_bytearray()\n\n selfother = {}\n for k, v in other.items():\n # assert(k not in self,\n # \"Merging overlapping alignments not implemented\")\n if k not in self:\n selfother[k] = bytearray(v, encoding=\"utf8\")\n while True:\n ''' Check exit conditions'''\n if me == me_len and she == she_len:\n break\n\n ''' Check the 5 possible statuses between she and I '''\n if she != she_len and other.is_insertion_column(she):\n if me != me_len and self.is_insertion_column(me):\n ''' We both have a series of insertion columns'''\n start = me\n while(me != me_len and self.is_insertion_column(me) and\n she != she_len and other.is_insertion_column(she)):\n me += 1\n she += 1\n merged_insertion_columns += 1\n run = me - start\n self.col_labels[start:me] = list(range(\n insertion, insertion-run, -1))\n else:\n ''' Hers is a series of insertion columns'''\n start = she\n while she != she_len and other.is_insertion_column(she):\n she += 1\n run = she - start\n ins = bytearray(b\"-\") * run\n for seq in self.values():\n seq[me:me] = ins\n self._col_labels[me:me] = list(range(\n insertion, insertion - run, -1))\n insertion -= run\n me += run\n me_len += run\n elif me != me_len and self.is_insertion_column(me):\n ''' Mine is a series of insertion column'''\n start = me\n while me != me_len and self.is_insertion_column(me):\n me += 1\n run = me - start\n ins = bytearray(b\"-\") * run\n for v in selfother.values():\n v[start:start] = ins\n self.col_labels[start:me] = list(\n range(insertion, insertion-run, -1))\n insertion -= run\n elif(she == she_len or (me != me_len and\n self.col_labels[me] < other.col_labels[she])):\n ''' My column is not present (i.e. was allgap) in the\n \"other\"'''\n start = me\n while(me < me_len and (she == she_len or me != me_len and\n self.col_labels[me] < other.col_labels[she])):\n me += 1\n run = me - start\n ins = bytearray(b\"-\") * run\n for v in selfother.values():\n v[start:start] = ins\n elif(me == me_len or (she != she_len and\n self.col_labels[me] > other.col_labels[she])):\n ''' Her column is not present (i.e. was allgap) in \"me\"'''\n start = she\n while(she < she_len and (me == me_len or she != she_len and\n self.col_labels[me] > other.col_labels[she])):\n she += 1\n run = she - start\n ins = bytearray(b\"-\") * run\n for seq in self.values():\n seq[me:me] = ins\n self._col_labels[me:me] = other.col_labels[start:she]\n me += run\n me_len += run\n elif self.col_labels[me] == other.col_labels[she]:\n ''' A shared column'''\n while(me < me_len and she < she_len and\n self.col_labels[me] == other.col_labels[she]):\n she += 1\n me += 1\n else:\n raise \"hmmm, we thought this should be impossible? %d %d\" % (\n me, she)\n\n self.update(selfother)\n\n if convert_to_string:\n self.from_bytearray_to_string()\n #_LOG.debug(\"Merging finished ...\")\n\n return merged_insertion_columns", "def joinIntervalsSum(myIntervals,start='start',end='end',score='readcount',sampleName=\".\",offset=0):\n \n if not myIntervals: return myIntervals\n non_overlapping = []\n sep = {'+':[],'-':[]}\n \n print \"Splitting intervals by strand\"\n for i in myIntervals:\n sep[i.strand].append(i)\n \n print \"Joining intervals...\"\n for strand in sep.keys():\n print strand\n intervals = sep[strand]\n intervals.sort()\n \n \n current = copy.copy(intervals[0])\n for x in intervals[1:]:\n next = copy.copy(x)\n if current.intersects(next, start=start, end=end,offset=offset):\n current.end = max(current.end,next.end)\n current.__dict__[score] = current.__dict__[score]+next.__dict__[score]\n else:\n current.name = sampleName\n non_overlapping.append(current)\n current = copy.copy(next)\n current.name=sampleName\n non_overlapping.append(current)\n print \"Sorting intervals\"\n non_overlapping.sort()\n print \"Done\"\n return non_overlapping", "def overlaps(self, other):\n pass", "def test_overlapping_alignments_2():\n generate_bam_file(gqd.sam_content, gqd.sam_bam_prefix)\n gqd.gene_wise_quantification._min_overlap = 5\n sam = pysam.Samfile(gqd.sam_bam_prefix + \".bam\")\n # 1 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 10))) == []\n # 4 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 13))) == []\n # 5 overlapping base in the 5' end of the reads => okay\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 14))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]\n # 1 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 19, 23))) == []\n # 4 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 16, 23))) == []\n # 5 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 15, 23))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]", "def region_gene_overlap(\n region_pr,\n gene_bed,\n up=100_000,\n down=100_000,\n):\n genes = pr.read_bed(gene_bed)\n # Convert to DataFrame & we don't need intron/exon information\n genes = genes.as_df().iloc[:, :6]\n\n # Get the TSS only\n genes.loc[genes[\"Strand\"] == \"+\", \"End\"] = genes.loc[\n genes[\"Strand\"] == \"+\", \"Start\"\n ]\n genes.loc[genes[\"Strand\"] == \"-\", \"Start\"] = genes.loc[\n genes[\"Strand\"] == \"-\", \"End\"\n ]\n\n # Extend up and down\n genes.loc[genes[\"Strand\"] == \"+\", \"Start\"] -= up\n genes.loc[genes[\"Strand\"] == \"+\", \"End\"] += down\n genes.loc[genes[\"Strand\"] == \"-\", \"Start\"] -= down\n genes.loc[genes[\"Strand\"] == \"-\", \"End\"] += up\n\n # Perform the overlap\n genes = pr.PyRanges(genes)\n genes = genes.join(region_pr).as_df()\n\n return genes", "def range_overlap(range1, range2):\n return range(max(range1[0], range2[0]), min(range1[1], range2[1]))", "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2", "def overlapping_ranges(\n ranges_1: Sequence[Tuple[int, int]],\n ranges_2: Sequence[Tuple[int, int]],\n) -> List[Tuple[int, int]]:\n return [\n (max(first[0], second[0]), min(first[1], second[1]))\n for first in ranges_1\n for second in ranges_2\n if max(first[0], second[0]) < min(first[1], second[1])\n ]", "def __init__(self):\n        self.intervals = []\n        \n    ### O(len(intervals))\n    def addNum(self, val: int) -> None:\n        if(len(self.intervals) == 0):\n            self.intervals.append([val, val])\n            return\n        \n        flag, left = 1, -math.inf\n        for i, interval in enumerate(self.intervals):\n            for point in interval:\n                right = point\n                if(left == val or right == val):\n                    return \n                elif(left < val and right > val):\n                    if(flag):\n                        ### merge case\n                        if(val == left+1 and val == right -1):\n                            self.intervals[i-1][1] = self.intervals[i][1]\n                            self.intervals.pop(i)\n                        elif(val == left+1):\n                            self.intervals[i-1][1] = val\n                        elif(val == right-1):\n                            self.intervals[i][0] = val\n                        else:\n                            self.intervals.insert(i, [val, val])\n                    ### val in one of the existing intervals\n                    return", "def resolveRanges( self, left_ranges, right_ranges):\n new_left_ranges = []\n new_right_ranges = []\n \n ranges = map( lambda x: (x[0], x[1], 0), left_ranges)\n ranges += map( lambda x: (x[0], x[1], 1), right_ranges)\n \n ranges.sort()\n \n last_left, last_right, last_is_right = ranges[0]\n for this_left, this_right, this_is_right in ranges[1:]:\n \n ## if segment is the same type, just combine\n if (last_is_right and this_is_right) or (not last_is_right and not this_is_right):\n last_right = this_right\n continue\n \n ## write if not consecutive and there is a small gap\n if this_left - last_right > self.min_segment_size:\n if last_is_right:\n new_right_ranges.append((last_left, last_right))\n else:\n new_left_ranges.append((last_left, last_right))\n \n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n continue\n \n ## if current segment is too small: add to current type\n if (this_right - this_left) < self.min_segment_size:\n last_right = this_right\n continue\n \n ## if previous segment is too small to be output: add to next type\n if (last_right - last_left) < self.min_segment_size:\n last_right = this_right\n last_is_right = this_is_right\n continue\n \n ## otherwise: output\n if last_is_right:\n new_right_ranges.append((last_left, last_right))\n else:\n new_left_ranges.append((last_left, last_right))\n \n last_left, last_right, last_is_right = this_left, this_right, this_is_right \n \n if last_is_right:\n new_right_ranges.append((last_left, last_right))\n else:\n new_left_ranges.append((last_left, last_right))\n \n self.debug( \"ranges=%s\" % str(ranges), 4 )\n self.debug( \"new_left_ranges=%s\" % str(new_left_ranges), 4)\n self.debug( \"new_right_ranges=%s\" % str(new_right_ranges), 4 )\n \n return new_left_ranges, new_right_ranges", "def overlaps(self, other):\n return _binary_op(arctern.ST_Overlaps, self, other).astype(bool, copy=False)", "def overlap_with(self, other):", "def imerge(*iterables):\n return _IMerge(iterables)", "def merge_events(self, threshold, overlap_fun):\n ids = {event: [] for event in self.positive_ids}\n\n for id1, id2 in itertools.combinations(self.positive_ids, 2):\n if overlap_fun(id1, id2, threshold):\n ids[id1] += [id2]\n ids[id2] += [id1]\n\n # add new events\n for id1 in ids:\n for id2 in ids[id1]:\n self.positive_ids[id1] += [trans_id for trans_id in self.positive_ids[id2]\n if trans_id not in self.positive_ids[id1]]\n\n self.negative_ids[id1] += [trans_id for trans_id in self.negative_ids[id2]\n if trans_id not in self.negative_ids[id1]]", "def _merge_ranges(ranges: Iterable[Range]) -> _LinkedList[Range]:\n # sort our list of ranges, first\n ranges = _LinkedList(sorted(ranges))\n # # determine if we need to do anything\n # if len(ranges) < 2:\n # return\n # try to merge each range with the one after it, up until the end of the list\n node = ranges.first\n while node and node.next:\n prev_range = node.value\n next_range = node.next.value\n new_range = prev_range.union(next_range)\n if new_range is not None: # TODO python 3.8 refactoring - this is a great place for :=\n node.value = new_range\n ranges.pop_after(node)\n else:\n node = node.next\n return ranges", "def merge(self, peak2, ext_len, chrom_len):\n # find peak midpoints\n peak_mids = [find_midpoint(self.start, self.end)]\n peak_mids.append(find_midpoint(peak2.start, peak2.end))\n\n # weight peaks\n peak_weights = [1 + len(self.act)]\n peak_weights.append(1 + len(peak2.act))\n\n # compute a weighted average\n merge_mid = int(0.5 + np.average(peak_mids, weights=peak_weights))\n\n # extend to the full size\n merge_start = max(0, merge_mid - ext_len / 2)\n merge_end = merge_start + ext_len\n if chrom_len and merge_end > chrom_len:\n merge_end = chrom_len\n merge_start = merge_end - ext_len\n\n # merge activities\n merge_act = self.act | peak2.act\n\n # set merge to this peak\n self.start = merge_start\n self.end = merge_end\n self.act = merge_act", "def mergeWith(self, others):", "def merge_sorth_in_place(num_list, start_index, end_index):\n pass", "def merge(): #Status: WIP\r\n pass", "def merge_segdb(segdbs):\n segdb = segdbs[0]\n for r in segdbs[1:]:\n segdb.extend(r)\n return segdb", "def merge_segdb(segdbs):\n segdb = segdbs[0]\n for r in segdbs[1:]:\n segdb.extend(r)\n return segdb", "def _merge(self):\n raise NotImplementedError", "def find_overlap(self, gdf1, gdf2):\n gdf1.crs = \"epsg:4326\" # todo: fix this ugliness\n gdf2.crs = \"epsg:4326\"\n return gpd.sjoin(gdf1, gdf2).drop(\"index_right\", axis=1)", "def calculate_overlap(self, r1, r2):\n\n # We know that reads that can be glued,\n # share at least half of their length.\n # Make sure one is not shorter than\n # the half of another.\n\n if len(r1) / 2 + len(r1) % 2 <= len(r2) \\\n and len(r2) / 2 + len(r2) % 2 <= len(r1):\n\n # prepare second halves for overlap pre-check\n\n tail1 = r1[len(r1) / 2:]\n tail2 = r2[len(r2) / 2:]\n \n # case 1: r1 contains r2 completely\n #\n # For example,\n #\n # ATCGCCGGAT\n # TCGCCGGA\n \n pos = r1.find(r2)\n if pos != -1:\n self.reads[r1].overlaps[r2] = pos + len(r2) - len(r1)\n \n # case 2: r2 contains r1 completely\n #\n # For example,\n #\n # TCGCCGGA\n # ATCGCCGGAT\n \n pos = r2.find(r1)\n if pos != -1:\n self.reads[r2].overlaps[r1] = pos + len(r1) - len(r2)\n \n # case 3: end of r1 overlaps with beginning of r2\n #\n # For example,\n #\n # ATCGCCGGAT\n # TCGCCGGATGC\n #\n # First check that at least half of r1 is in r2\n # If there is a match, calculate the expected length \n # of overlap and check if they indeed overlap.\n\n \n pos = r2.find(tail1)\n if pos != -1:\n overlap = pos + len(tail1)\n if r1[-overlap:] == r2[:overlap]:\n self.reads[r1].overlaps[r2] = len(r2) - overlap\n \n # case 4: end of r2 overlaps with beginning of r1\n #\n # For example,\n #\n # CGCCGGATCC\n # TCGCCGGAT\n #\n # First check that at least half of r2 is in r1\n # If there is a match, calculate the expected length \n # of overlap and check if they indeed overlap.\n \n pos = r1.find(tail2)\n if pos != -1: \n overlap = pos + len(tail2)\n if r2[-overlap:] == r1[:overlap]:\n self.reads[r2].overlaps[r1] = len(r1) - overlap", "def merge_regions(bed_files, out_bed):\n merge_all = (\"zcat {0} | \"\n \"sort -k1,1 -k2,2n | \"\n \"bedtools merge -i stdin | \"\n \"gzip -c \"\n \"> {1}\").format(' '.join(bed_files), out_bed)\n print merge_all\n os.system(merge_all)\n\n return None", "def overlaps(a, b, **kwargs):\n return lib.overlaps(a, b, **kwargs)", "def paired_interval_extend(uniq_fragment,fragment_cov,gtf_dic):\n out_dic = {}\n total_reads = 0\n for key in uniq_fragment.keys():\n chr_no = key[0]\n #print (frag_start,frag_end)\n frag_strand = key[3]\n interval_comp = uniq_fragment[key][0]\n complete_info = uniq_fragment[key][1]\n frag_cov = fragment_cov[key]\n total_reads += frag_cov\n geneNA = 'NA'\n geneType = 'NA'\n geneRegion = 'NA'\n flag = 0\n for trans in gtf_dic[(chr_no,frag_strand)]:\n frag_start,frag_end = key[1:3]\n # for trans in gtf_dic[('chr1','-')]:\n # if chr_no == 'chr1' and frag_strand == '-':\n if frag_start > trans[0] and frag_end < trans[1]:\n #print 'Hello!'\n # print (trans)\n geneNA = trans[4]\n geneType = trans[5]\n if geneType == 'protein_coding':\n CDS_start,CDS_end = trans[2:4]\n if frag_start >= CDS_start and frag_end <= CDS_end:\n geneRegion = 'CDS'\n elif frag_strand == '+':\n if frag_end <= CDS_start:\n geneRegion = '5UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = '5UTR-CDS'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = 'CDS-3UTR'\n elif frag_start >= CDS_end:\n geneRegion = '3UTR'\n elif frag_strand == '-':\n if frag_end <= CDS_start:\n geneRegion = '3UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = 'CDS-3UTR'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = '5UTR-CDS'\n elif frag_start >= CDS_end:\n geneRegion = '5UTR'\n else:\n geneRegion = 'Null'\n # print (frag_start,frag_end,CDS_start,CDS_end,geneNA,geneRegion)\n#------------------------------------------------------------------------------ intersect of fragments interval and exons interval\n frag_intersect = interval_comp & trans[-1]\n interval_comp_length = sum([interval_comp[a].upper- interval_comp[a].lower for a in range(0,len(interval_comp))])\n # print (interval_comp)\n # print (frag_intersect)\n#------------------------------------------------------------------------------ fragments located in introns\n if frag_intersect == P.empty(): \n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n if complete_info == 'complete':\n flag = 3\n #print interval_comp\n#------------------------------------------------------------------------------ reduce alignment noise\n frag_intersect_length = sum([frag_intersect[a].upper-frag_intersect[a].lower for a in range(0,len(frag_intersect))])\n absolute_diff = abs(frag_intersect_length-interval_comp_length)\n if absolute_diff == 0:\n#------------------------------------------------------------------------------ \n start_region = []\n length_region = []\n for region in frag_intersect:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n start_region = []\n length_region = []\n for region in interval_comp:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),geneNA,geneType,\\\n frag_strand,str(frag_start),str(frag_end),'intron-containing',str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n #print interval_comp\n #print frag_intersect\n#------------------------------------------------------------------------------ fragments boundaries located in exons\n #print frag_intersect[0][0],frag_start,frag_intersect[-1][1],frag_end\n #print abs_position\n # print (P.closedopen(frag_start,frag_end),trans[-1])\n interval_update = P.closedopen(frag_start,frag_end) & trans[-1]\n # print (interval_update)\n frag_trans_length = sum([interval_update[a].upper-interval_update[a].lower for a in range(0,len(interval_update))])\n absolute_diff = abs(frag_trans_length-interval_comp_length)\n #print absolute_diff\n #print geneRegion\n #print interval_comp\n #print abs_position\n if absolute_diff <= 300: #insert sequence length <=200nt\n #print frag_trans_length,interval_comp_length\n #print geneRegion\n flag = 2\n start_out = []\n length_out = []\n for interval_region in list(interval_update):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n # print (trans)\n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron-containing',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n if flag == 0:\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic[(chr_no,frag_start,frag_end,frag_strand)] = [(chr_no,str(frag_start),str(frag_end),'intergenic','intergenic',frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info)]\n print ('Total treated fragments: ' + str(total_reads))\n return out_dic", "def extend(self, iterable: Iterable[Rangelike]) -> None:\n self._ranges = RangeSet._merge_ranges(\n self._ranges + (Range(r) for r in iterable)\n )", "def merge(self, low, high, mid):\n array_one = []\n array_two = []\n\n for element in range(low, mid+1):\n array_one.append(self.array[element])\n for element in range(mid+1, high+1):\n array_two.append(self.array[element])\n\n i = low\n\n while len(array_one) != 0 and len(array_two) != 0:\n if array_one[0] <= array_two[0]:\n self.array[i] = array_one.pop(0)\n i += 1\n else:\n self.array[i] = array_two.pop(0)\n i += 1\n\n while len(array_one) != 0:\n self.array[i] = array_one.pop(0)\n i += 1\n\n while len(array_two) != 0:\n self.array[i] = array_two.pop(0)\n i += 1", "def dates_intervals_are_overlapped(start_1, end_1, start_2, end_2):\n return end_1 >= start_2 and end_2 >= start_1", "def check_for_overlapping_genes(sequence_record):\n overlapping_gene_pairs = []\n all_gene_positions = []\n for gene in sequence_record.features:\n # BCBio uses 0-based and end-exclusive positions (first-third base is bases 0,1,2, i.e range 0-3), \n # so add 1 to start and keep end as is to convert to 1-based-end-inclusive\n all_gene_positions.append((gene.location.start.position+1, gene.location.end.position, gene.id))\n all_gene_positions.sort()\n for gene1_data,gene2_data in itertools.izip(all_gene_positions,all_gene_positions[1:]):\n (gene1_start,gene1_end,gene1_name), (gene2_start,gene2_end,gene2_name) = gene1_data, gene2_data\n if gene1_end>=gene2_start:\n overlapping_gene_pairs.append((gene1_name,gene2_name))\n # check for \"gene1 contains gene2\", print a warning, since it can make other things not work right\n if gene1_end>=gene2_end:\n print(\"WARNING: gene %s is completely inside gene %s! \"%(gene1_name, gene2_name)\n +\"Various gene-position-related results may be inaccurate.\")\n return overlapping_gene_pairs\n # MAYBE-TODO rewrite it so it actually detects ALL overlaps? Right now if gene A contains nonoverlapping genes B and C, it'll sort them as (A,B,C) since A starts first, so it'll detect the (A,B) overlap, but it won't detect the (A,C) overlap because it doesn't CHECK (A,C), only (A,B) and (B,C). This could be fixed either by just brute-force checking all gene pairs (and then using DNA_basic_utilities.position_test_overlap), or by writing something prettier. In any case, not a priority, since generally genes DON'T OVERLAP AT ALL.", "def getCoveringRanges( self, left_ranges, right_ranges, parent_ranges ):\n \n child_ranges = map( lambda x: (x[0], x[1], 0), left_ranges)\n child_ranges += map( lambda x: (x[0], x[1], 1), right_ranges)\n \n child_ranges.sort()\n parent_ranges.sort()\n \n new_left_ranges = []\n new_right_ranges = []\n \n parent_index = 0\n last_to = 0\n \n parent_left, parent_right = parent_ranges[parent_index]\n\n self.debug( \"child_ranges=%s\" % str(child_ranges) )\n self.debug( \"parent_ranges=%s\" % str(parent_ranges))\n \n last_left, last_right, last_is_right = child_ranges[0]\n \n for this_left, this_right, this_is_right in child_ranges[1:]:\n \n ## look at previous segment last_left to last_right:\n ## find matching parent_index:\n old_parent_index = parent_index\n while (min(parent_right, last_right) - max(parent_left, last_left)) < 0:\n parent_index += 1\n if parent_index == len(parent_ranges): break\n parent_left, parent_right = parent_ranges[parent_index]\n \n ## skip fragments that do not overlap\n if parent_index == len(parent_ranges):\n parent_index = old_parent_index\n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n continue\n \n ## firstly: make segment covering\n new_left = min(parent_left, last_left)\n new_right = min(max(parent_right, last_right), this_left)\n \n if last_is_right:\n new_right_ranges.append((new_left, new_right))\n else:\n new_left_ranges.append((new_left, new_right))\n \n ## reduce parent on left side\n parent_left=max(new_right, parent_left)\n \n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n \n ## process last segment\n while (min(parent_right, last_right) - max(parent_left, last_left)) < 0:\n parent_index += 1\n if parent_index >= len(parent_ranges): break \n parent_left, parent_right = parent_ranges[parent_index]\n \n new_left = min(parent_left, last_left)\n new_right = max(parent_right, last_right)\n \n if last_is_right:\n new_right_ranges.append((new_left, new_right))\n else:\n new_left_ranges.append((new_left, new_right))\n \n self.debug( \"old left ranges=%s\" % str(left_ranges))\n self.debug( \"new left ranges=%s\" % str(new_left_ranges))\n self.debug( \"old right ranges=%s\" % str(right_ranges))\n self.debug( \"new right ranges=%s\" % str(new_right_ranges))\n \n return new_left_ranges, new_right_ranges", "def get_interval_seqs(interval_alignment: AlignIO.MultipleSeqAlignment):\n gapless_seqs = [str(record.seq.ungap(\"-\")) for record in interval_alignment]\n\n callback_seqs, expanded_seqs = [], []\n expanded_set = set()\n for seq in remove_duplicates(gapless_seqs):\n if len(expanded_set) == 0:\n callback_seqs.append(seq)\n if not set(seq).issubset(allowed_bases):\n continue\n alternatives = [iupac[base] for base in seq]\n for tuple_product in itertools.product(*alternatives):\n expanded_str = \"\".join(tuple_product)\n if expanded_str not in expanded_set:\n expanded_set.add(expanded_str)\n expanded_seqs.append(expanded_str)\n\n if len(expanded_set) == 0:\n logging.warning(\n \"WARNING: Every sequence must have contained an N in this slice - redo sequence curation because this is nonsense\"\n )\n logging.warning(f'Sequences were: {\" \".join(callback_seqs)}')\n logging.warning(\n \"Using these sequences anyway, and should be ignored downstream\"\n )\n return callback_seqs\n return expanded_seqs", "def range_union(ranges):\n union = []\n for r in sorted(ranges, key=lambda r: r.start):\n if len(union) > 0 and union[-1].stop >= r.start:\n union[-1] = range(union[-1].start, max(union[-1].stop, r.stop))\n else:\n union.append(r)\n return union", "def calcOverlap(intervals):\n bp = 0 \n for i in intervals:\n bp += sum([overlapCases(i, j) for j in intervals])\n return(bp)", "def merge_orders(self, overlap_handling=\"adjust Aeff\"):\n\n # split into separate photons objects for each observation, and split the orders within that observation into\n # faux separate observations, then merge them\n if len(self.obs_times) > 1:\n separate = [self.get_obs(i) for i in range(len(self.obs_times))]\n else:\n separate = [self]\n for obj in separate:\n temp_meta = obj.obs_metadata\n order_ranges = obj.obs_bandpasses[0]\n Norders = len(order_ranges)\n obj.obs_metadata = [0]*Norders\n obj.obs_times *= Norders\n obj.obs_bandpasses = [order_ranges[[i]] for i in range(len(order_ranges))]\n obj.photons['n'] = obj.photons['o'] - _np.min(obj.photons['o'])\n obj.photons.remove_column('o')\n obj.merge_like_observations(overlap_handling=overlap_handling)\n obj.obs_metadata = temp_meta\n obj.photons.remove_column('n')\n obj.obs_bandpasses = [_np.vstack(obj.obs_bandpasses)]\n obj.obs_times = [obj.obs_times[0]]\n\n all = sum(separate[1:], separate[0])\n self.obs_times = all.obs_times\n self.obs_bandpasses = all.obs_bandpasses\n self.obs_metadata = all.obs_metadata\n self.photons = all.photons", "def merge_sync_loop_ranges(func, sync_infos):\n # sanity check\n if sync_infos == None:\n return None\n if sync_infos == []:\n return []\n # convert to sync loop ranges\n sync_loop_ranges = []\n for sync_info in sync_infos:\n sync_vars = sync_info.sync_vars\n for addr_range in sync_info.addr_ranges:\n saddr = addr_range[0]\n eaddr = addr_range[1]\n sync_loop_range = SyncLoopRange(func, saddr, eaddr, sync_vars)\n sync_loop_ranges.append(sync_loop_range)\n sync_loop_ranges = sorted(sync_loop_ranges)\n # merge ranges\n merged_sync_loop_ranges = []\n merge_base = sync_loop_ranges[0]\n for sync_loop_range in sync_loop_ranges[1:]:\n if not merge_base.try_combine(sync_loop_range):\n merged_sync_loop_ranges.append(merge_base)\n merge_base = sync_loop_range\n merged_sync_loop_ranges.append(merge_base)\n # ok, done\n return merged_sync_loop_ranges", "def overlaps(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoOverlaps(self, right).to_expr()", "def intervals_and_sources(self, chromosomes):\n num_intervals = sum([len(ints) for ints in chromosomes.itervalues()])\n intervals = np.empty(num_intervals, dtype=np.uint32)\n sources = np.empty(num_intervals, dtype=np.uint8)\n interval_num = 0\n for chromosome, interval_list in sorted(chromosomes.iteritems(), key=lambda x: x[0]):\n for species, end in interval_list:\n intervals[interval_num] = self.genome_index(chromosome, end)\n sources[interval_num] = species\n interval_num += 1\n return intervals, sources", "def overlaps(self, other): # -> bool:\n ...", "def merge_fasta_by_gaps(input_filename, output_filename, sequence_name, gap_size):\n gap = \"N\" * gap_size\n new_sequences = []\n chain = []\n output_position = 0\n for record in SeqIO.parse(input_filename, \"fasta\"):\n record_length = len(record)\n new_sequences.append(str(record.seq))\n\n # Create a simple PSL record for the input sequence and the new output\n # sequence for future liftOver use.\n chain.append([0, 0, 0, 0, 0, 0, 0, 0, \"+\", sequence_name, 0, output_position, output_position + record_length, record.id, record_length, 0, record_length, 1, record_length, 0, 0])\n\n output_position = output_position + record_length + gap_size\n\n # Save the new record with all sequences joined by the requested gaps.\n new_record = SeqRecord(Seq(gap.join(new_sequences)), id=sequence_name, description=\"\")\n\n # Write out all records to the given output file.\n SeqIO.write([new_record], output_filename, \"fasta\")\n\n # Update all chain items with the final target sequence length and output the chain.\n new_record_length = len(new_record)\n for item in chain:\n item[PSL_TSIZE_INDEX] = new_record_length\n print \"\\t\".join(map(str, item))", "def nlp_merge_common_matches(matches):\n\n merged_matches = []\n\n for idx_1, start_1, end_1 in matches:\n\n curr_idx = idx_1\n curr_start = start_1\n curr_end = end_1\n\n for idx_2, start_2, end_2 in matches:\n\n if (start_2 < curr_start and end_2 > curr_end) or (start_2 <= curr_start and end_2 > curr_end) or (\n start_2 < curr_start and end_2 >= curr_end):\n curr_idx = idx_2\n curr_start = start_2\n curr_end = end_2\n\n merged_matches.append((curr_idx, curr_start, curr_end))\n\n return list(set(merged_matches))", "def find_overlap_annots(ibs1, ibs2, method='annots'):\n if method == 'images':\n images1, images2 = ibs1.images(), ibs2.images()\n idxs1, idxs2 = ut.isect_indices(images1.uuids, images2.uuids)\n isect_images1 = images1.take(idxs1)\n annot_uuids = ut.flatten(isect_images1.annot_uuids)\n isect_annots1 = ibs1.annots(uuids=annot_uuids)\n elif method == 'annots':\n annots1, annots2 = ibs1.annots(), ibs2.annots()\n idxs1, idxs2 = ut.isect_indices(annots1.uuids, annots2.uuids)\n isect_annots1 = annots1.take(idxs1)\n return isect_annots1.aids", "def _merge_row(self, row1, row2):\n\n duprow = list(row1)\n duprow.extend(list(row2))\n row1.clear()\n overlap_map = {}\n\n for body, overlap in duprow:\n if body not in overlap_map:\n overlap_map[body] = 0\n overlap_map[body] += overlap\n\n for body, overlap in overlap_map.items():\n row1.add((body, overlap))", "def collapse_ranges (ranges):\n\n # FIXME: does tuple and set conversion really add anything?\n\n # Ranges must be unique: we do not count timings when they start and end at\n # exactly the same time. By using a set, we do not repeat ranges.\n # we convert to a list before return.\n final = set()\n\n # return empty list if given an empty list\n if not ranges:\n return final\n\n START = 0\n END = 1\n\n # sort ranges into a copy list, by their start time\n _ranges = sorted(ranges, key=lambda x: x[START])\n\n # sat 'base' to the earliest range (smallest starting point)\n base = _ranges[0]\n\n for _range in _ranges[1:]:\n\n # if range is empty, skip it\n if _range[START] == _range[END]:\n continue\n\n if _range[START] <= base[END]:\n # ranges overlap -- extend the base\n base[END] = max(base[END], _range[END])\n\n else:\n # ranges don't overlap -- move base to final, and current _range\n # becomes the new base\n final.add(tuple(base))\n base = _range\n\n # termination: push last base to final\n final.add(tuple(base))\n\n # Return final as list of list in case a mutable type is needed.\n return [list(b) for b in final]", "def merge_overlapping(boxes, max_overlap=0.05):\n def overlaps(bi, bj):\n return (bi.overlap(bj) >= max_overlap\n or bj.overlap(bi) >= max_overlap)\n\n def merge_into(boxes, box):\n overlapping = [b for b in boxes if overlaps(box, b)]\n if (len(overlapping) == 0):\n return (boxes + [box])\n else:\n preserved = [b for b in boxes if not overlaps(box, b)]\n merged = covering_box(overlapping + [box])\n return (merge_into(preserved, merged))\n\n boxes_merged = []\n for b in boxes:\n boxes_merged = merge_into(boxes_merged, b)\n return boxes_merged", "def get_inter_cds_regions(annotations):\n # Determine locations of inter-CDS regions for each chromosome\n inter_cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n inter_cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate through genes and store the ranges between them;\n # for TriTrypDB files, the gene boundaries are generally the same\n # as the CDS boundaries.\n for gene in genes:\n # Determine location for the region up to start of the gene\n end = int(gene.location.start)\n\n # Skip over snoRNAs, etc. that are nested inside of other genes\n # For example: TcCLB TcChr22-2 179,000:180,000\n if end <= start:\n continue\n\n # Add CDS to relevant list based on strand\n if strand is None:\n # Left-most gene\n inter_cds_regions[chr_id][gene.location.strand].append((start, end))\n elif strand != gene.location.strand:\n # Add ORFs in both directions at transcription switch sites (TSSs)\n inter_cds_regions[chr_id][+1].append((start, end))\n inter_cds_regions[chr_id][-1].append((start, end))\n else:\n # Within PTU; look for ORFs on same strand\n inter_cds_regions[chr_id][strand].append((start, end))\n\n # update start counter and strand\n start = int(gene.location.end)\n strand = gene.location.strand\n\n # add region after last gene\n inter_cds_regions[chr_id][strand].append((start, ch_end))\n\n return inter_cds_regions", "def merge(left, right):\n ret = []\n li = ri = 0\n while li < len(left) and ri < len(right):\n if left[li] <= right[ri]:\n ret.append(left[li])\n li += 1\n else:\n ret.append(right[ri])\n ri += 1\n if li == len(left):\n ret.extend(right[ri:])\n else:\n ret.extend(left[li:])\n return ret", "def merge(left, right):\n new = []\n left_index, right_index = 0, 0\n len_left, len_right = len(left), len(right)\n while left_index < len_left and right_index < len_right:\n if left[left_index] <= right[right_index]:\n new.append(left[left_index])\n left_index += 1\n else:\n new.append(right[right_index])\n right_index += 1\n new += left[left_index:]\n new += right[right_index:]\n return new", "def merge(*args):\n from ..operators.observable.merge import merge_\n return merge_(*args)", "def add(self, rng: Rangelike) -> None:\n # if it's a RangeSet, then do extend instead\n if isinstance(rng, RangeSet):\n self.extend(rng)\n return\n elif _is_iterable_non_string(rng):\n raise ValueError(\"argument is iterable and not Range-like; use .extend() instead\")\n # otherwise, convert Range to a list at first\n rng = Range(rng)\n # change the error message if necessary\n try:\n temp_ranges = self._ranges.copy()\n # if the list of ranges is empty, then add the node at the beginning\n if len(temp_ranges) == 0:\n temp_ranges.append(rng)\n inserted_node = temp_ranges.first\n # otherwise, if our range would fit at the end, then put it there\n elif rng > temp_ranges.last.value:\n temp_ranges.append(rng)\n inserted_node = temp_ranges.last\n # otherwise, find the node *before which* our range fits\n else:\n node = temp_ranges.first\n while rng > node.value:\n node = node.next\n temp_ranges.insert_before(node, rng)\n inserted_node = node.prev\n # now, merge this range with the previous range(s):\n if inserted_node.prev:\n prev_union = inserted_node.value.union(inserted_node.prev.value)\n while prev_union and inserted_node.prev:\n inserted_node.value = prev_union\n temp_ranges.pop_before(inserted_node)\n prev_union = inserted_node.value.union(inserted_node.prev.value) if inserted_node.prev else None\n # merge this range with the next range(s)\n if inserted_node.next:\n next_union = inserted_node.value.union(inserted_node.next.value)\n while next_union and inserted_node.next:\n inserted_node.value = next_union\n temp_ranges.pop_after(inserted_node)\n next_union = inserted_node.value.union(inserted_node.next.value) if inserted_node.next else None\n except TypeError:\n raise TypeError(f\"Range '{rng}' is not comparable with the other Ranges in this RangeSet\")\n # apply changes\n self._ranges = temp_ranges\n # TODO python 3.8 update - use an assignment operator (see the following code):\n # while inserted_node.prev and (prev_union := inserted_node.value.union(inserted_node.prev.value)):\n # inserted_node.value = prev_union\n # self._ranges.pop_before(inserted_node)\n # while inserted_node.next and (next_union := inserted_node.value.union(inserted_node.next.value)):\n # inserted_node.value = next_union\n # self._ranges.pop_after(inserted_node)", "def percentages_overlapping(self, other: 'Interval') -> Optional['Interval']:\n intersection = Interval.intersection([self, other])\n if intersection is None:\n return None\n if self.length == 0:\n return Interval(0, 1)\n return Interval(\n (intersection.a - self.a) / self.length,\n (intersection.b - self.a) / self.length)", "def merge(self, nums1, m: int, nums2, n: int) -> None:\n nums1Len, nums2Len = m, n\n if nums2Len == 0:\n return\n i,j = 0, 0\n while i<nums1Len and j <nums2Len:\n if nums1[i]<=nums2[j]:\n i += 1\n else:\n nums1.insert(i, nums2[j])\n i, j, nums1Len = i+1, j+1, nums1Len+1\n print(nums1)\n #print('ij',i,j)\n while j< nums2Len:\n nums1.insert(i, nums2[j])\n i, j, nums1Len = i+1, j+1, nums1Len+1\n #print(nums1)\n for _ in range(nums2Len):\n nums1.pop()\n #print(nums1)", "def _intersect_interval(self, other):\n interval = Intersection(self.interval, other.interval)\n return interval.inf, interval.sup", "def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail", "def merge(items, start, end, subarray_size):\n mid = start + subarray_size\n assert is_sorted(items[start:mid])\n assert is_sorted(items[start+subarray_size:end])\n\n aux = []\n i = start\n j = mid\n\n while i < mid and j < end:\n if items[i] < items[j]:\n aux.append(items[i])\n i += 1\n else:\n aux.append(items[j])\n j += 1\n aux.extend(items[i:mid] or items[j:end])\n assert is_sorted(aux)\n\n ## Overwrite elements in subarrays in their correctly sorted order\n for i in range(start, end):\n items[i] = aux[i-start]", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def _merge_boundaries(self):\n \n optical_seg = self._amalgamated_optical_segments\n if bool(optical_seg):\n optical_seg[\"catagory\"] = OPTICAL * tf.ones_like(\n optical_seg[\"x_start\"],\n dtype=tf.int64\n )\n self._optical_seg_count = tf.shape(\n optical_seg[\"x_start\"],\n out_type=tf.int64\n )[0]\n else:\n self._optical_seg_count = 0\n \n stop_seg = self._amalgamated_stop_segments\n if bool(stop_seg):\n stop_seg[\"catagory\"] = STOP * tf.ones_like(\n stop_seg[\"x_start\"],\n dtype=tf.int64\n )\n self._stop_seg_count = tf.shape(\n stop_seg[\"x_start\"],\n out_type=tf.int64\n )[0]\n else:\n self._stop_seg_count = 0\n \n target_seg = self._amalgamated_target_segments\n if bool(target_seg):\n target_seg[\"catagory\"] = TARGET * tf.ones_like(\n target_seg[\"x_start\"],\n dtype=tf.int64\n )\n self._target_seg_count = tf.shape(\n target_seg[\"x_start\"],\n out_type=tf.int64\n )[0]\n else:\n self._target_seg_count = 0\n \n self._merged_segments = amalgamate(\n [optical_seg, stop_seg, target_seg], \n SEGMENT_GEO_SIG | {\"catagory\"}\n )\n \n optical_arc = self._amalgamated_optical_arcs\n if bool(optical_arc):\n optical_arc[\"catagory\"] = OPTICAL * tf.ones_like(\n optical_arc[\"x_center\"],\n dtype=tf.int64\n )\n self._optical_arc_count = tf.shape(\n optical_arc[\"x_center\"],\n out_type=tf.int64\n )[0]\n else:\n self._optical_arc_count = 0\n \n stop_arc = self._amalgamated_stop_arcs\n if bool(stop_arc):\n stop_arc[\"catagory\"] = STOP * tf.ones_like(\n stop_arc[\"x_center\"],\n dtype=tf.int64\n )\n self._stop_arc_count = tf.shape(\n stop_arc[\"x_center\"],\n out_type=tf.int64\n )[0]\n else:\n self._stop_arc_count = 0\n \n target_arc = self._amalgamated_target_arcs\n if bool(target_arc):\n target_arc[\"catagory\"] = TARGET * tf.ones_like(\n target_arc[\"x_center\"],\n dtype=tf.int64\n )\n self._target_arc_count = tf.shape(\n target_arc[\"x_center\"],\n out_type=tf.int64\n )[0]\n else:\n self._target_arc_count = 0\n \n self._merged_arcs = amalgamate(\n [optical_arc, stop_arc, target_arc], \n ARC_GEO_SIG | {\"catagory\"}\n )", "def mergeCells(self, startRow, startColumn, endRow, endColumn):\n\n\t\t\t\tif (isinstance(startColumn, str)):\n\t\t\t\t\treturn self.thing.mergeCells(f\"{startColumn}{startRow}:{endColumn}{endRow}\")\n\n\t\t\t\tif (isinstance(endColumn, int)):\n\t\t\t\t\treturn self.thing.merge_cells(start_row = startRow, start_column = startColumn, end_row = endRow, end_column = endColumn)\n\n\t\t\t\traise NotImplementedError()", "def merging(arr, s1, e1, s2, e2):\n p1 = s1\n p2 = s2\n tmp = [0 for _ in range(len(arr))]\n idx = s1\n\n while p1 <= e1 and p2 <= e2:\n if arr[p1] < arr[p2]:\n tmp[idx] = arr[p1]\n idx += 1\n p1 += 1\n else:\n tmp[idx] = arr[p2]\n idx += 1\n p2 += 1\n\n if p1 <= e1:\n for i in range(p1, e1+1):\n tmp[idx] = arr[p1]\n idx += 1\n p1 += 1\n else:\n for i in range(p2, e2+1):\n tmp[idx] = arr[p2]\n idx += 1\n p2 += 1\n\n for i in range(s1, e2+1):\n arr[i] = tmp[i]\n\n # done", "def add_extend(self, start, stop, pattern=\"\"):\n overlaps = self.max_overlap(start, stop)\n\n def clear_overlaps(lst):\n for o in lst:\n self.remove(o[\"orig_start\"], o[\"orig_end\"])\n\n if len(overlaps) == 0:\n # no overlap, just save these coordinates\n self.add(start, stop, pattern=pattern, overlap=True)\n # if filename == \"./data/i2b2_notes/167-02.txt\":\n # \tprint(\"No overlaps:\")\n # \tprint(filename,start,stop,pattern)\n elif len(overlaps) == 1:\n clear_overlaps(overlaps)\n # 1 overlap, save this value\n o = overlaps[0]\n self.add(o[\"new_start\"], o[\"new_stop\"], pattern=pattern, overlap=True)\n # if filename == \"./data/i2b2_notes/167-02.txt\":\n # \tprint(\"One overlap:\")\n # \tprint(filename,start,stop,pattern)\n else:\n clear_overlaps(overlaps)\n # greater than 1 overlap, by default this is sorted because of scan order\n o1 = overlaps[0]\n o2 = overlaps[-1]\n self.add(o2[\"new_start\"], o1[\"new_stop\"], pattern=pattern, overlap=True)\n # if filename == \"./data/i2b2_notes/167-02.txt\":\n # \tprint(\"Multiple overlaps:\")\n # \tprint(filename,start,stop,pattern)\n\n return True, None", "def remove_overlap_query(hsp1, hsp2):\n # Calculate where is the overlap and remove the overlapping part: 'qstart': 6, 'qend': 7, 'sstart': 8, 'send': 9,\n if hsp2[7] < hsp1[7]:\n new_qstart = hsp2[7]\n new_qend = hsp1[7] - 1\n delta = hsp2[8] - new_qend\n new_sstart = hsp2[9]\n new_send = hsp2[10] - delta\n\n elif hsp2[8] > hsp1[8]:\n new_qstart = hsp1[8] + 1\n new_qend = hsp2[8]\n delta = new_qstart - hsp2[7]\n new_sstart = hsp2[9] + delta\n new_send = hsp2[10]\n\n # lgHSP: 17, bitscore: 11, id: 12, pos:13\n new_id, new_pos, new_length = calculate_fraction(\n delta=delta, lgHSP=hsp2[17], pid=hsp2[12], pos=hsp2[13]\n )\n\n return {\n 17: new_length,\n 10: new_send,\n 8: new_qend,\n 7: new_qstart,\n 9: new_sstart,\n 13: new_pos,\n 12: new_id,\n }", "def fetchRefSeqIntervalsIndexed(genome='hg18',proteinCodingOnly=False,verbose=False):\n cursor=gbdbConnect(gbdbname=genome)\n select=\"SELECT * FROM refGene\"\n if verbose:\n sys.stderr.write(\"Fetching RefSeq Sequences...\\n\")\n cursor.execute(select)\n rows=cursor.fetchall()\n output={}\n for chr in genomelib.chr_names:\n output[chr]={}\n output[chr]['+']=[]\n output[chr]['-']=[]\n if verbose:\n sys.stderr.write(\"Creating index by chr and strand...\\n\")\n \n for row in rows:\n if proteinCodingOnly and not row['name'].startswith('NM'):\n continue\n try:\n exonStarts = map(int,row['exonStarts'].rstrip().split(\",\")[:-1])\n exonEnds = map(int,row['exonEnds'].rstrip().split(\",\")[:-1])\n except:\n print \"\\t\".join([\"%s:%s\" % (k,v) for k,v in row.iteritems()])\n start = int(row['txStart'])\n exonOffsets = [x-start for x in exonStarts]\n exonLengths = []\n for i in xrange(len(exonStarts)):\n exonLengths.append(exonEnds[i]-exonStarts[i]+1)\n if row['chrom'] in genomelib.chr_names:\n output[row['chrom']][row['strand']].append(intervallib.SplicedInterval(row['chrom'],row['txStart'],row['txEnd'],row['strand'],\",\".join([str(x) for x in exonLengths]),\",\".join([str(x) for x in exonOffsets]),name=row['name2']))\n \n #Sort \n if verbose:\n sys.stderr.write(\"Sorting:\\n\")\n tstart = time.time()\n for key in output.keys():\n if verbose:\n sys.stderr.write(\"\\t%s\\t\" % key)\n output[key]['+'].sort()\n output[key]['-'].sort()\n tend = time.time()\n if verbose:\n sys.stderr.write('%0.2f sec\\n' % (tend-tstart))\n tstart = time.time()\n return output", "def get_overlap(self, other):\n return self.intersection_over_union(other)", "def merge(collection, start, mid, end):\n\n left = collection[start : mid + 1]\n right = collection[mid + 1 : end + 1]\n\n left.reverse()\n right.reverse()\n\n index = start\n\n while left and right:\n if left[len(left) - 1] <= right[len(right) - 1]:\n collection[index] = left.pop()\n else:\n collection[index] = right.pop()\n\n index += 1\n\n while left:\n collection[index] = left.pop()\n index += 1\n\n while right:\n collection[index] = right.pop()\n index += 1", "def report_exon_overlap(strand1, exons1, strand2, exons2):\n #print(strand1)\n #print(exons1)\n #print(exons2)\n exons1 = convert_2dlst_to_set(exons1)\n first_exon1, last_exon1 = return_first_and_last_exon(exons1)\n exons2 = convert_2dlst_to_set(exons2)\n first_exon2, last_exon2 = return_first_and_last_exon(exons2)\n \n dct_report = dict()\n if not first_exon1 == first_exon2:\n \"\"\" first exon of isoseq and annotated-gene-model are not exactly the same \"\"\"\n if str(first_exon1).split(\".\")[1] == str(first_exon2).split(\".\")[1]:\n \"\"\" if first intron-start boundary is the same \"\"\"\n if int(str(first_exon1).split(\".\")[0]) > int(str(first_exon2).split(\".\")[0]):\n \"\"\" if isoseq first exon is shorter \"\"\"\n if strand1 == \"+\":\n dct_report[5] = \"partial_inside\"\n else:\n dct_report[3] = \"partial_inside\"\n else:\n \"\"\" if isoseq first exon is longer \"\"\"\n if strand1 == \"+\":\n dct_report[5] = \"partial_outside\"\n else:\n dct_report[3] = \"partial_outside\"\n else:\n if strand1 == \"+\":\n dct_report[5] = \"different\"\n else:\n dct_report[3] = \"different\"\n else:\n if strand1 == \"+\":\n dct_report[5] = \"same\"\n else:\n dct_report[3] = \"same\"\n\n if not last_exon1 == last_exon2:\n \"\"\" last exon of isoseq and annotated-gene-model are not exactly the same \"\"\"\n if str(last_exon1).split(\".\")[0] == str(last_exon2).split(\".\")[0]:\n \"\"\" if last intron-end boundary is the same \"\"\"\n if int(str(last_exon1).split(\".\")[1]) < int(str(last_exon2).split(\".\")[1]):\n \"\"\" if isoseq first exon is shorter \"\"\"\n if strand1 == \"+\":\n dct_report[3] = \"partial_inside\"\n else:\n dct_report[5] = \"partial_inside\"\n else:\n \"\"\" if isoseq first exon is longer \"\"\"\n if strand1 == \"+\":\n dct_report[3] = \"partial_outside\"\n else:\n dct_report[5] = \"partial_outside\"\n else:\n if strand1 == \"+\":\n dct_report[3] = \"different\"\n else:\n dct_report[5] = \"different\" \n else:\n if strand1 == \"+\":\n dct_report[3] = \"same\"\n else:\n dct_report[5] = \"same\"\n return(dct_report[5], dct_report[3])", "def merge(self, parallel_seq):\n return _OSeq(sorted(self._elements + parallel_seq._elements, key=lambda x: x.get(offset_attr, 0)))", "def remove_overlap_subject(hsp1, hsp2):\n # Calculate where is the overlap and remove the overlapping part: 'qstart': 6, 'qend': 7, 'sstart': 8, 'send': 9,\n if hsp2[9] < hsp1[9]:\n new_sstart = hsp2[9]\n new_send = hsp1[9] - 1\n delta = hsp2[10] - new_send\n new_qstart = hsp2[7]\n new_qend = hsp2[8] - delta\n\n elif hsp2[10] > hsp1[10]:\n new_sstart = hsp1[10] + 1\n new_send = hsp2[10]\n delta = new_sstart - hsp2[9]\n new_qstart = hsp2[7] + delta\n new_qend = hsp2[8]\n\n # lgHSP: 17, bitscore: 11, id: 12, pos:13\n new_id, new_pos, new_length = calculate_fraction(\n delta=delta, lgHSP=hsp2[17], pid=hsp2[12], pos=hsp2[13]\n )\n\n return {\n 17: new_length,\n 10: new_send,\n 8: new_qend,\n 7: new_qstart,\n 9: new_sstart,\n 13: new_pos,\n 12: new_id,\n }", "def merge_gti_intersections(gti):\n gtiss = np.argsort(gti[:,0])\n gtise = np.argsort(gti[:,1])\n\n gtirs = np.ravel(gti[gtiss, :])\n gtire = np.ravel(gti[gtise, :])\n gtiidxs = np.argsort(gtirs)\n gtiidxe = np.argsort(gtire)\n\n newgti = np.array([\n gti[gtiss[np.arange(0, gtiidxs.size, 2) == gtiidxs[0::2]], 0],\n gti[gtise[np.arange(1, gtiidxe.size, 2) == gtiidxe[1::2]], 1]\n ]).T\n\n return newgti", "def merge (left, right):\n i = 0\n j = 0\n n = len(left)\n m = len(right)\n out = []\n\n while i < n and j < m:\n if left[i] < right[j]:\n out.append(left[i])\n i += 1\n else:\n out.append(right[j])\n j += 1\n\n if i is n:\n for l in xrange(j, m):\n out.append(right[l])\n elif j is m:\n for l in xrange(i, n):\n out.append(left[l])\n\n return out", "def agent_overlap(t_drs, h_drs, replacements):\n t_agents = get_agent(t_drs) \n h_agents = get_agent(h_drs)\n length = len(t_agents) + len(h_agents)\n if len(t_agents) is 0:\n return 0\n common = 0\n for agent in t_agents:\n if agent in h_agents:\n h_agents.pop(h_agents.index(agent))\n common =+ 1\n if common > 1:\n print(common)\n \n return len(h_agents)/len(t_agents) #seems to work better then real comparison\n '''\n else:\n for replacement in replacements:\n if get_agent(replacement[15]) == get_agent(replacement[16]):\n return 1\n '''", "def merge_roidb(roidbs):\n roidb = roidbs[0]\n for r in roidbs[1:]:\n roidb.extend(r)\n return roidb", "def merge_roidb(roidbs):\n roidb = roidbs[0]\n for r in roidbs[1:]:\n roidb.extend(r)\n return roidb", "def find_overlap_range(x1, lenght1, x2, length2):\n\n\n highest_start_point = max(x1, x2)\n lowest_end_point = min(x1 + lenght1, x2 + length2)\n \n if highest_start_point >= lowest_end_point:\n return None\n \n overlap_length = lowest_end_point - highest_start_point\n \n return (highest_start_point, overlap_length)", "def sequence_union(s1, s2):\n # make sure that s1 is the longer sequence and s2 is merged into it\n if sequence_length(s1) < sequence_length(s2):\n sp = s2\n s2 = s1\n s1 = sp\n s = []\n c1, dt1 = s1.pop(0)\n c2, dt2 = s2.pop(0)\n while True:\n if dt1 < dt2:\n s.append( (list( set(c1) | set(c2) ), dt1) )\n dt2 -= dt1\n try:\n c1, dt1 = s1.pop(0)\n except:\n break\n elif dt2 < dt1:\n s.append( (list( set(c1) | set(c2) ), dt2) )\n dt1 -= dt2\n try:\n c2, dt2 = s2.pop(0)\n except:\n c2 = []\n dt2 = np.inf\n else:\n s.append( (list( set(c1) | set(c2) ), dt1) )\n try:\n c1, dt1 = s1.pop(0)\n except:\n break\n try:\n c2, dt2 = s2.pop(0)\n except:\n c2 = []\n dt2 = np.inf \n return s", "def overlaps(self, other):\n return self.start <= other.end and self.end >= other.start", "def combine(self, other):\n if isinstance(other, sppasInterval) is False:\n AnnDataTypeError(other, \"sppasInterval\")\n\n if self > other:\n other, self = self, other\n\n if self.__end <= other.get_begin():\n return sppasInterval(self.__begin, other.get_end())\n\n return sppasInterval(other.get_begin(), self.__end)", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps" ]
[ "0.75773865", "0.6600593", "0.6574509", "0.6569135", "0.63968265", "0.63313943", "0.6322615", "0.624788", "0.6216611", "0.6146239", "0.6133331", "0.60140103", "0.60021335", "0.59763306", "0.5970033", "0.58648163", "0.58545077", "0.57922995", "0.57907534", "0.57814354", "0.57276565", "0.57117796", "0.570119", "0.5663055", "0.5637245", "0.5615576", "0.5611965", "0.5564089", "0.55608976", "0.5557894", "0.55415624", "0.5528412", "0.55282027", "0.5525709", "0.5502941", "0.5498119", "0.5493681", "0.54817057", "0.5477622", "0.5473504", "0.5473504", "0.5471214", "0.5466447", "0.5442741", "0.542228", "0.5420562", "0.5417896", "0.54151344", "0.54029566", "0.53913814", "0.53880554", "0.53793", "0.5363447", "0.53532636", "0.53152573", "0.52960473", "0.5276931", "0.52701616", "0.5260858", "0.5229783", "0.5213832", "0.52128464", "0.5204378", "0.52041805", "0.52038074", "0.52027684", "0.52020377", "0.52001715", "0.5198752", "0.51809645", "0.5180692", "0.5179122", "0.5152185", "0.51515824", "0.51422185", "0.513803", "0.5135505", "0.51354015", "0.51281786", "0.512253", "0.51181525", "0.5117221", "0.51132107", "0.51060796", "0.51042306", "0.509935", "0.5088303", "0.50827193", "0.50764304", "0.50755155", "0.5062118", "0.50613815", "0.50613815", "0.5051689", "0.50477076", "0.50418615", "0.50404155", "0.5031732", "0.5031732", "0.5031732" ]
0.6790975
1
Read file into string.
def read_file(self, file: Path) -> str: with open(file) as f: return f.read()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_file(file):\n with open(file, 'r') as f:\n file_string = f.read()\n return file_string", "def read_file(path): #TODO implementme, handling paths more intelligently\n f = open(path, \"r\")\n string = f.read()\n f.close()\n return string", "def file2str(file):\n with open(file, \"r\") as textFile:\n return textFile.read()", "def ReadFileIntoString(filepath):\n with open(filepath, 'r') as file_handle:\n contents = file_handle.read()\n return contents", "def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()", "def readfile(path: Union[str, Path]) -> str:\n with open(path) as infile:\n return infile.read()", "def file_to_string(path_to_file):\n\t\twith open(path_to_file, 'r') as f:\n\t\t\tcontent = f.read()\n\t\treturn content", "def read_file(file_path):\n with open(file_path, 'r') as infile:\n return infile.read()", "def file_to_str(fname):\n data = None\n # rU = read with Universal line terminator\n with open(fname, 'rU') as f:\n data = f.read()\n return data", "def file_to_string(file_name):\n with open(file_name, 'r') as f:\n text = f.read()\n # delete original file\n os.remove(file_name)\n return text", "def read_file(filepath: str) -> str:\n with open(filepath, \"r\") as filep:\n return filep.read()", "def read_file(filename):\n return open(filename).read()", "def read_file(file_name):\n with open(file_name, 'r') as f:\n return f.read()", "def read_from_file(file_name):\n with open(file_name, \"rb\") as text_file:\n return text_file.read()", "def read_file(file):\n with open(file, \"r\") as fid:\n return fid.read()", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n return f.read()", "def read_file(file_path, mode='r', encoding=\"utf-8\"):\n with codecs.open(file_path, mode, encoding=encoding) as fp:\n return fp.read().strip()", "def ReadFile(self, filename):\r\n file = open(filename, 'rb')\r\n result = \"\"\r\n try:\r\n result = file.read()\r\n finally:\r\n file.close()\r\n return result", "def _readfile(dirpath, filename):\n try:\n with codecs.open(os.path.join(dirpath, filename), \"r\", \"utf-8\") as f:\n return f.read()\n except IOError:\n return u\"\"", "def ReadFile(self, filename):\n file = open(filename, 'rb')\n result = \"\"\n try:\n result = file.read()\n finally:\n file.close()\n return result", "def read_file(name_file):\n with open(name_file, 'r') as file:\n return file.read()", "def read_file(name):\n with open(name, 'r') as my_file:\n return my_file.read().encode('utf-8')", "def local_file_as_string(self, file_path):\n with open(file_path, 'rb') as file:\n string = file.read().decode('utf-8')\n return string", "def read_file(self, file_name: str)-> str:\n if not os.path.exists(file_name):\n raise IOError(\"The File {} doesn't exists!\".format(file_name))\n\n with open(file_name) as file:\n return file.read().strip()", "def readfile(filename):\n with open(filename, encoding=\"utf-8\") as file:\n raw = file.read()\n return raw", "def _Read(filename):\n with open(filename, 'rb') as f:\n return f.read()", "def file_to_string(file_path):\n data = ''\n try:\n with open(file_path, 'r') as file:\n data = file.read()\n file.close()\n except FileNotFoundError as err: # Sublime give an error, but it's not.\n print(Bcolors.FAIL + 'ERROR: ' + file_path + ' not found.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n except PermissionError as err:\n print(Bcolors.FAIL + 'ERROR: ' + file_path + ', Permission Denied.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n return data", "def read_file(filename: str, mode: str = \"r\") -> str:\n with open(filename, mode) as file:\n file_content = file.read()\n return file_content", "def read_file(filename):\n with open(filename) as fp:\n return fp.read()", "def read_file(filename):\n with codecs.open(filename, 'r', 'utf8') as f:\n return f.read()", "def read_file(filename):\n with codecs.open(filename, 'r', 'utf8') as f:\n return f.read()", "def read(file):\n with open(file, 'r') as file:\n return file.read()", "def read_file(file) -> str:\n file = open(file, \"r\")\n my_string = file.read()\n return get_clean_text(my_string)", "def read(self, filename):\n\t\treturn codecs.open(filename, 'r', 'utf8').read()", "def read_file(file_path):\n\n file_string = ''\n\n with open(file_path, 'r', newline='') as file:\n for line in file:\n file_string = file_string + line.rstrip('\\n')\n\n return file_string", "def read_text_file(str_name_file: str):\n content: str = ''\n with open(str_name_file, mode=\"r\", encoding='utf-8') as file:\n print(\"file being read: \" + str_name_file + \"\\n\")\n content = file.read()\n return content", "def read(path):\n with open(path) as f:\n return f.read()", "def _ReadFile(filepath):\n with open(filepath) as f:\n return f.read()", "def read_file(path):\n try:\n with open(path, 'r') as text_file:\n return \"\".join(text_file.readlines()).strip()\n except IOError:\n exit(\"Error: file '%s' is not readable!\" % path)", "def local_read(filename):\n full_filename = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n filename)\n return codecs.open(full_filename, 'r', 'utf-8').read()", "def read_file(file):\n f = open(file, \"r\", encoding=\"utf8\")\n return f.read()", "def read_data() -> str:\n with open('input.txt') as input_file:\n return input_file.read()", "def file_read(path: str) -> str:\n if os.path.isfile(path):\n while True:\n try:\n with open(path, \"r\") as fptr:\n return fptr.read()\n except PermissionError:\n pass\n return \"\"", "def read(file_name):\n with io.open(os.path.join(os.path.dirname(__file__), file_name),\n encoding='utf-8') as f:\n return f.read()", "def SimpleRead(fn):\n content = \"\"\n try:\n content = open(fn).read()\n except :\n print(\"Failed to read file: %s\\n\"%(fn))\n print sys.exc_info()[1]\n\n return content", "def read(path, encoding=\"utf-8\"):\n try:\n with io.open(path, encoding=encoding) as f:\n return f.read()\n except Exception as e:\n logger.error(\"read: %s failed. Error: %s\", path, e)\n return \"\"", "def read(name):\n\n return open(name).read()", "def read_file():\n with open(FILE_NAME) as f:\n data = f.read()\n return data", "def read_file(filename):\n path = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(path, filename)\n try:\n return open(filepath).read()\n except IOError:\n return ''", "def ReadFile(f_path):\n data = ''\n\n if f_path:\n try:\n fh = open(f_path, 'r')\n try:\n data = fh.read()\n finally:\n fh.close()\n except IOError:\n return ''\n\n return data", "def read_file(filename):\n path = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(path, filename)\n try:\n return open(filepath).read()\n except:\n return ''", "def load_file():\n s = ''\n\n with open(FILE_PATH) as f:\n for line in f:\n # .rstrip method gets rid of new line \"\\n\" characters\n s = s + line.rstrip() \n return s", "def readfile(filename):\n\n infile = open(filename, \"r\") # open file for reading\n\n # Use Python's file read function to read the file contents\n filetext = infile.read().splitlines()\n\n infile.close() # close the file\n\n return filetext # the text of the file, as a single string", "def openfile(path:str) -> str:\n with open(file=path, mode='br') as file:\n r_0 = file.readline()\n return str(r_0)", "def readFromTextFile(self, file_name):\n with open(file_name, 'r') as file_obj:\n return file_obj.read()", "def _get_string(self):\n result = self.sfile.readline().rstrip('\\n')\n return result", "def read(filename):\n\n path = os.path.join(os.path.dirname(__file__), filename)\n\n with open(path) as f:\n return f.read()", "def get_file_text(file_name):\n\tf = open(file_name, 'r')\n\ttext = f.read()\n\treturn text", "def readFile(self, name):\n\t\ttry:\n\t\t\tf = open(name, 'r')\n\t\t\tlines = f.readlines()\n\t\t\tf.close()\n\t\texcept IOError:\n\t\t\treturn None\n\n\t\treturn join(lines, \"\")", "def get_file_content(self, file_name: str):\n file_name = Path(__file__).absolute().parents[1].joinpath(file_name)\n try:\n with file_name.open('r') as file:\n intermediate = file.readlines()\n return ''.join(intermediate)\n except FileNotFoundError as message:\n self.logger.error(message)\n return ''", "def read_file(filename):\n if os.path.isfile(filename):\n with open(filename, 'r') as f:\n return f.read()", "def _read_one_line_file(name):\n with open(name, \"rb\") as file:\n data = file.read()\n return data.decode('utf-8').strip()", "def read_file(filename):\n open_kwargs = {}\n if sys.version_info.major == 3:\n open_kwargs = {'encoding': 'utf-8'}\n\n path = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(path, filename)\n with open(filepath, **open_kwargs) as filecontents:\n return filecontents.read()", "def readFile(fileName):\n with open(fileName, 'r', encoding='utf-8') as f:\n text = f.read()\n return text", "def open_and_read_file(file_path):\n\n # your code goes here\n text_file = open(file_path)\n text_string= text_file.read()\n text_file.close()\n return text_string", "def read_file(file_name, enc=\"latin-1\"):\n f = open(file_name, \"r\", encoding=enc)\n content = \"\".join(f.readlines())\n f.close()\n return content", "def readText(fileName):\n fileText = \"\"\n with open(fileName,\"r\") as fileObject:\n fileText = fileObject.read()\n \n return fileText", "def read_full_file(filename, options=\"rb+\"):\n with open(filename, options) as f:\n text = f.read()\n return text", "def read_file(path):\n assert_is_string(path)\n f = open(path, \"r\")\n data = f.read()\n f.close()\n return data", "def read_file(*file_name: str) -> str:\n with open(os.path.join(HERE, *file_name)) as f:\n return f.read()", "def read_file(file_path):\n\n text = ''\n with open(file_path, 'r') as file:\n for line in file.readlines():\n text += line\n return text", "def read(fn):\n with open(os.path.join(os.path.dirname(__file__), fn), encoding='utf-8') as f:\n return f.read()", "def read_from_file(path):\n with io.open(path, 'rb') as ios:\n return read(ios)", "def read_file(filename):\n f = open(filename)\n contents = f.read()\n f.close()\n return contents", "def read_file(filename):\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content", "def open_and_read_file(file_path):\n\n # Open file and read into memory\n text = open(file_path).read().rstrip()\n\n # Replace newlines with space\n #text = text.replace('\\n', ' ')\n\n return text", "def fs_read(file_path):\n try:\n with open(str(file_path), 'r') as f:\n return f.read()\n except UnicodeDecodeError:\n with open(str(file_path), 'r', encoding='latin-1') as f:\n return f.read()\n except IOError as e:\n raise e", "def read_file(file):\n try:\n with open(file, \"r\") as f:\n content = f.read().replace(\"\\n\", \"\")\n return content\n except:\n return f\"[ERROR]: could not open '{file}'\"", "def _read_file(self, filePath):\n with open(filePath) as f:\n fileContent = f.read()\n f.close()\n return fileContent.strip()", "def get_file_data(file_name):\r\n try:\r\n with open(file_name, 'rb') as input_file:\r\n data = input_file.read()\r\n return data\r\n except Exception as err:\r\n return str(err).encode()", "def ReadFile(path, mode='r'):\n with open(path, mode) as f:\n return f.read()", "def read_as_text(filename: str) -> str:\n with open(filename) as file_handle:\n txt = file_handle.read()\n return txt", "def read(filename):\n with open(os.path.join(os.path.dirname(__file__), filename)) as f:\n return f.read()", "def _read(fname):\n fpath = os.path.dirname(__file__)\n fpath = os.path.join(fpath, fname)\n with open(fpath, 'r') as file_:\n return file_.read()", "def read_from_file(filename):\n\twith open(filename, 'r') as myfile:\n\t\ttext=myfile.read()\n\treturn text", "def read_file(self, file_name):\n\n with open(file_name, 'r') as file_input:\n file_content = file_input.read()\n return file_content", "def read(path, encoding=\"utf-8\"):\n with open(path, \"rb\") as f:\n return f.read().decode(encoding)", "def open_and_read_file(file_path):\n text_data = open(file_path).read()\n # print text_data\n return text_data", "def _file_read(self, file: str) -> str:\n with open(f\"tests/resources/{file}\", \"r\") as fs:\n result = \"\\n\".join(fs.read().splitlines())\n return result", "def _read_file(file_name):\n file_handle = file(file_name)\n try:\n return file_handle.read()\n finally:\n file_handle.close()", "def read(self, name: str) -> str:\n path = self.get_path(name)\n if not os.path.exists(path):\n return \"\"\n\n with open(path, \"r\") as fh:\n return fh.read()", "def readfile(fname, mode='rb'):\n f = open(fname, mode)\n raw = f.read()\n f.close()\n return raw", "def read_file(file_path: str) -> str:\n try:\n with open(file=file_path, mode='r', encoding=\"utf8\") as f:\n return f.read()\n\n except FileNotFoundError:\n raise FileNotFoundError(f'No text file was found at location {file_path}')", "def read_file(file_path):\n try:\n input_file = open(file_path)\n text_content = input_file.read()\n input_file.close()\n return text_content\n except IOError:\n print (\"Can not read from file\")", "def open_and_read_file(file_path):\n\n # Read the file, return text as a string titled \"contents\"\n contents = open(file_path).read()\n\n # Return contents of your file as one long string\n return contents", "def _read_data(self, txtfile):\n data_string = open(txtfile,'r').read()\n return data_string", "def read(path):\n with open(path) as f:\n contents = f.read()\n return contents", "def readFromFile(self, path):\n log(logging.DEBUG, \"Read from file: \" + path)\n with open(path, \"r\") as f:\n return f.read()", "def read(path):", "def txt2str(file: str) -> str:\n return get_first_line(file)" ]
[ "0.8227773", "0.81447333", "0.8022711", "0.79404026", "0.7933046", "0.78936315", "0.78294694", "0.7784561", "0.7775774", "0.7751609", "0.77187586", "0.7716427", "0.77023", "0.76969075", "0.76942647", "0.7690147", "0.7679615", "0.7668749", "0.76600176", "0.7641418", "0.7638525", "0.7632242", "0.7614777", "0.7610765", "0.7597486", "0.75866437", "0.7578475", "0.75502867", "0.7550041", "0.7523464", "0.7523464", "0.752149", "0.7518519", "0.7495588", "0.7476836", "0.7439948", "0.7427164", "0.742537", "0.742303", "0.739871", "0.73865175", "0.7362231", "0.73570377", "0.7350546", "0.73490566", "0.73405576", "0.73226994", "0.7321811", "0.73178446", "0.731702", "0.72981405", "0.7297321", "0.72419715", "0.724192", "0.7241335", "0.7234792", "0.7226412", "0.7217687", "0.7213066", "0.7195827", "0.7189282", "0.7189028", "0.71810734", "0.7178534", "0.7165595", "0.7162301", "0.7162046", "0.71527106", "0.7151157", "0.71501297", "0.714834", "0.71458733", "0.7141725", "0.7131727", "0.7131503", "0.7124653", "0.7122981", "0.7104181", "0.7100375", "0.70903486", "0.7084091", "0.70788556", "0.7078762", "0.7070451", "0.7066599", "0.7062914", "0.7048186", "0.704805", "0.7047627", "0.7033052", "0.7026601", "0.70155746", "0.70153254", "0.70139396", "0.7001586", "0.6988611", "0.6968907", "0.69659126", "0.69327646", "0.69279563" ]
0.7827757
7
Create a new websocket and connect its input and output to the subprocess with the specified PID.
async def websocket_handler(self, request, ws): if self.repl_mgr is None: return sanic.response.HTTPResponse(status=404) log.info('initiating websocket') await self.repl_mgr.process_websocket(ws) log.info('terminating websocket')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch_web_socket(vnc_port, web_socket_port, server):\n\n path = os.path.abspath(os.path.dirname(__file__))\n ws = os.path.join(path, \"../../webConsole/bin/websockify.py\")\n\n web_socket_path = os.path.abspath(ws)\n\n cmd = \"%s %s:%s %s:%s --idle-timeout=120 &\" % (web_socket_path, server, vnc_port, server, web_socket_port)\n\n logger.debug(cmd)\n\n proc = subprocess.Popen(cmd, shell=True, close_fds=True)\n time.sleep(1)\n return proc.pid", "def setup_websocket(ws_url, service_account_file, audience, router_password, source_port, dest_ip, dest_port):\n def on_message(ws, message):\n \"\"\"Handle a message\"\"\"\n handle_message(ws, message, router_password, source_port, dest_ip, dest_port)\n\n def on_error(ws, error):\n \"\"\"Handle an error by exiting or closing if it is a KeyboardInterrupt (Ctrl+C)\"\"\"\n if type(error) is KeyboardInterrupt:\n logger.info('Cancel requested (Ctrl+C), closing connection.')\n ws.close()\n else:\n logger.error(\"The following error occurred:\\n{error}\".format(error=error))\n sys.exit(1)\n\n def on_close(ws):\n \"\"\"Handle the WebSocket close\"\"\"\n logger.info('WebSocket closed.')\n\n def on_open(ws):\n \"\"\"Handle the WebSocket opening\"\"\"\n logger.info('WebSocket open, sending authentication.')\n authenticate(ws, service_account_file, audience)\n ws.send(STATUS_COMMAND_FORMAT.format(status_payload=json.dumps(get_status(router_password, source_port, dest_ip, dest_port))))\n\n return websocket.WebSocketApp(ws_url,\n on_open=on_open,\n on_message=on_message,\n on_error=on_error,\n on_close=on_close)", "def main():\n global APP\n APP = make_app()\n APP.clients = [] # global list of all connected websocket clients\n APP.printer = Serial('/dev/ttyUSB0', baudrate=19200)\n APP.listen('1337', '0.0.0.0')\n log('Listening on http://0.0.0.0:1337')\n tornado.ioloop.IOLoop.current().start()", "async def main():\n await serve_websocket(handle_server, SERVER, PORT, ssl_context=None)", "def add_ws(self):\n def on_message(ws, message):\n print(message)\n\n def on_error(ws, error):\n pass\n\n def on_close(ws):\n pass\n\n def on_open(ws):\n thread.start_new_thread(self.run, ())\n\n ws = websocket.WebSocketApp(self.url + '/',\n on_message = on_message,\n on_error = on_error,\n on_close = on_close)\n\n ws.on_open = on_open\n self.ws = ws", "async def create_websocket_server(sock, filter=None): # pylint: disable=W0622\n ws = Websocket()\n await ws.start_server(sock, filter=filter)\n return ws", "def start_server():\n server = WebsocketServer(9001, host='0.0.0.0')\n server.set_fn_message_received(message_received)\n server.set_fn_client_left(client_left)\n print(\"Started\")\n server.run_forever()", "def __init__(self, websocket_ip, port=9090):\n print(\"Connecting to websocket: {}:{}\".format(websocket_ip, port))\n self.ws = websocket.create_connection(\n 'ws://' + websocket_ip + ':' + str(port))\n self._advertise_dict = {}", "def initSocket(self):\n \n # Check WebSocket support\n if self.nodejs:\n try:\n WebSocket = require('ws')\n except Exception:\n # Better error message\n raise \"FAIL: you need to 'npm install -g ws' (or 'websocket').\"\n else:\n WebSocket = window.WebSocket\n if (WebSocket is undefined):\n window.document.body.innerHTML = 'Browser does not support WebSockets'\n raise \"FAIL: need websocket\"\n # Open web socket in binary mode\n self.ws = ws = WebSocket(window.flexx.ws_url)\n #ws.binaryType = \"arraybuffer\" # would need utf-decoding -> slow\n \n def on_ws_open(evt):\n window.console.info('Socket connected')\n ws.send('hiflexx ' + flexx_session_id)\n def on_ws_message(evt):\n window.flexx.last_msg = msg = evt.data or evt\n #msg = window.flexx.decodeUtf8(msg)\n window.flexx.command(msg)\n def on_ws_close(evt):\n self.ws = None\n msg = 'Lost connection with server'\n if evt and evt.reason: # nodejs-ws does not have it?\n msg += ': %s (%i)' % (evt.reason, evt.code)\n if (not window.flexx.is_notebook) and (not self.nodejs):\n window.document.body.innerHTML = msg\n else:\n window.console.info(msg)\n def on_ws_error(self, evt):\n self.ws = None\n window.console.error('Socket error')\n \n # Connect\n if self.nodejs:\n ws.on('open', on_ws_open)\n ws.on('message', on_ws_message)\n ws.on('close', on_ws_close)\n ws.on('error', on_ws_error)\n else:\n ws.onopen = on_ws_open\n ws.onmessage = on_ws_message\n ws.onclose = on_ws_close\n ws.onerror = on_ws_error", "def start(turn_handler):\n\n if os.environ.get('BOTBOX_SECRET'):\n print('Using env secret:', os.environ['BOTBOX_SECRET'])\n headers = {'Authorization': os.environ['BOTBOX_SECRET']}\n elif len(sys.argv) > 1:\n print('Using cli secret:', sys.argv[1])\n headers = {'Authorization': sys.argv[1]}\n else:\n print('Using no authentication')\n headers = []\n\n # get the URL for the server from an environment variable if it is set,\n # otherwise use the default localhost\n if os.environ.get('BOTBOX_SERVER'):\n url = (WS_SERVER_SCHEME + '://'\n + os.environ['BOTBOX_SERVER'] + ':' + WS_SERVER_PORT)\n else:\n url = WS_SERVER_SCHEME + '://' + WS_SERVER_URL + ':' + WS_SERVER_PORT\n\n print(\"Connecting to:\", url)\n\n ws = websocket.WebSocketApp(\n url,\n on_open = _on_open,\n on_message = lambda ws, msg: _on_message(ws, msg, turn_handler),\n on_error = _on_error,\n on_close = _on_close,\n header = headers\n )\n\n ws.run_forever()", "def _launch_process_watcher(self, parent_pid, child_pid, child_host, child_port, minecraft_dir):\n\n multiprocessing.freeze_support()\n parent_conn, child_conn = multiprocessing.Pipe()\n self._logger.info(\"Starting process watcher for process {} @ {}:{}\".format(child_pid, child_host, child_port))\n p = multiprocessing.Process(\n target=InstanceManager._process_watcher, args=(\n parent_pid, child_pid, \n child_host, child_port, \n minecraft_dir, child_conn))\n \n def update_port(port):\n parent_conn.send([port])\n # p.daemon = True\n\n p.start()\n return p, update_port", "def connect_subproc(args, service=VoidService, config={}):\n from subprocess import Popen, PIPE\n proc = Popen(args, stdin=PIPE, stdout=PIPE)\n conn = connect_pipes(proc.stdout, proc.stdin, service=service, config=config)\n conn.proc = proc # just so you can have control over the process\n return conn", "def websocket(self) -> Websocket:\n self.__http_client.data_snapshot()\n host_uri = f'ws://{self.__http_client.host_ip}/api/v1/data/stream'\n subprotocols = [f'SessionToken_{self.__http_client.session_token}', \"object\"]\n return Websocket(host_uri, subprotocols, timeout=self.__http_client.request_timeout)", "def initSocket(self):\n \n # Check WebSocket support\n if self.nodejs:\n try:\n WebSocket = require('ws') # does not work on Windows?\n #WebSocket = require('websocket').client\n except Exception:\n # Better error message\n raise \"FAIL: you need to 'npm install -g ws'.\"\n else:\n WebSocket = window.WebSocket\n if (window.WebSocket is undefined):\n document.body.innerHTML = 'This browser does not support WebSockets'\n raise \"FAIL: need websocket\"\n # Open web socket in binary mode\n self.ws = ws = WebSocket(flexx.ws_url)\n ws.binaryType = \"arraybuffer\"\n \n def on_ws_open(evt):\n console.info('Socket connected')\n def on_ws_message(evt):\n flexx.last_msg = evt.data or evt\n msg = flexx.decodeUtf8(flexx.last_msg)\n flexx.command(msg)\n def on_ws_close(evt):\n self.ws = None\n msg = 'Lost connection with server'\n if evt and evt.reason: # nodejs-ws does not have it?\n msg += ': %s (%i)' % (evt.reason, evt.code)\n if (not flexx.is_notebook) and (not self.nodejs):\n document.body.innerHTML = msg\n else:\n console.info(msg)\n def on_ws_error(self, evt):\n self.ws = None\n if flexx.is_notebook:\n console.error('Socket error: re-run flexx.app.init_socket() to connect.')\n else:\n console.error('Socket error')\n \n # Connect\n if self.nodejs:\n ws.on('open', on_ws_open)\n ws.on('message', on_ws_message)\n ws.on('close', on_ws_close)\n ws.on('error', on_ws_error)\n else:\n ws.onopen = on_ws_open\n ws.onmessage = on_ws_message\n ws.onclose = on_ws_close\n ws.onerror = on_ws_error", "def start_server(self):\n self.logger.info(\"Starting WebSocket server on port %d\" % self.port)\n http_server = Thread(target=tornado.ioloop.IOLoop.instance().start)\n http_server.start()", "def _spawn_stream_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n\n listen_name = get_safe(config, \"process.listen_name\") or name\n log.debug(\"Stream Process (%s) listen_name: %s\", name, listen_name)\n process_instance._proc_listen_name = listen_name\n\n process_instance.stream_subscriber = StreamSubscriber(process=process_instance, exchange_name=listen_name, callback=process_instance.call_process)\n\n # Add publishers if any...\n publish_streams = get_safe(config, \"process.publish_streams\")\n pub_names = self._set_publisher_endpoints(process_instance, publish_streams)\n\n rsvc = self._create_listening_endpoint(node=self.container.node,\n from_name=process_instance.id,\n process=process_instance)\n\n # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs)\n def cleanup(*args):\n self._cleanup_method(process_instance.id, rsvc)\n for name in pub_names:\n p = getattr(process_instance, name)\n p.close()\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[rsvc, process_instance.stream_subscriber],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_stream_process for %s\" % process_instance._proc_name)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n try:\n proc.start_listeners()\n except IonProcessError:\n self._process_quit(process_instance)\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n raise\n\n return process_instance", "def start(self) -> None:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n self.wserver = websockets.serve(self.__producer_handler, port=self.port, loop=loop)\n try:\n # run server forever\n self.server = asyncio.get_event_loop()\n self.server.run_until_complete(self.wserver)\n self.server.run_forever()\n except Exception:\n self.close()\n\n loop.run_forever()", "def start(self, websocketport: Optional[int] = None) -> None:\n if self.config:\n websocketport = self.config.websocketport\n\n if not websocketport:\n LOGGER.error(\"No websocket port specified\")\n return\n\n self.websocket = WSClient(\n self.session, self.host, websocketport, self.session_handler\n )\n self.websocket.start()", "def run(on_create):\n from twisted.internet import reactor\n\n # multiple, configurable transports, either via dict-like config, or\n # from native Twisted endpoints\n transports = [\n {\n \"type\": \"websocket\",\n \"url\": \"ws://127.0.0.1:8080/ws\"\n }\n ]\n\n # a connection connects and automatically reconnects WAMP client\n # transports to a WAMP router. A connection has a listener system\n # where user code can hook into different events : on_join\n connection = Connection(on_create, realm='public',\n transports=transports, reactor=reactor)\n\n # the following returns a deferred that fires when the connection is\n # finally done: either by explicit close by user code, or by error or\n # when stop reconnecting\n done = connection.connect()\n\n def finish(res):\n print(res)\n reactor.stop()\n\n done.addBoth(finish)\n\n reactor.run()", "def open(self, pysession_id):\n self.id = id(self)\n self.funcserver = self.application.funcserver\n self.pysession_id = pysession_id\n\n # register this connection with node\n self.state = self.funcserver.websocks[self.id] = {\"id\": self.id, \"sock\": self}", "def createConnectionToCli(self):\n connected = False\n # loop until connected\n while not connected:\n try:\n self.dataClient = Client(\n ('localhost', 5000), authkey=b'secret password')\n connected = True\n except ConnectionRefusedError:\n pass\n\n self.logger.debug('Connected to Process!')", "def start(self) -> None:\n if self.config:\n self.websocket = self.ws_client(\n self.loop, self.session, self.host,\n self.config.websocketport, self.async_session_handler)\n self.websocket.start()\n else:\n _LOGGER.error('No deCONZ config available')", "def open(self):\n self._lock.acquire()\n try:\n self._relaypid, self._portoffset = self._check_tcprelay()\n logger.debug(\n \"PIGGYBACK TCPRELAY\"\n \"PID: {0} PORT: {1}\".format(self._relaypid,\n self._portoffset))\n except AttributeError:\n # TODO: tcprelays might want to close when test is over???\n self._portoffset = get_available_portoffset()\n command = \"/usr/local/bin/tcprelay --portoffset {0} \" \\\n \"--locationid {1} rsync telnet \" \\\n \"ssh > /tmp/tcprelay.{1}.log 2>&1\" \\\n \" &\".format(self._portoffset, self.locationid_param)\n logger.debug(\"SPAWNING TCPRELAY - {0}\".format(command))\n child = subprocess.Popen([\"bash\", \"-c\", command], close_fds=True)\n time.sleep(0.5)\n try:\n self._relaypid, self._portoffset = self._check_tcprelay()\n except AttributeError:\n logger.error(\n \"FAILED to SPAWN TCPRELAY - CMD {0} \"\n \"OUTPUT: {1} ERROR: {2} RC: {3}\".format(command,\n child.stdout,\n child.stderr,\n child.returncode))\n finally:\n self._lock.release()", "def connect_to_worker():\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://localhost:5555\")\n return socket", "async def _outgoing_ws(self, pid, websocket):\n character = self.players[pid]\n\n while not websocket.closed:\n msg = await character.msgs.get()\n\n # TODO: try to get more messages and buffer writes?\n try:\n await websocket.send(msg + \"\\n\\r\")\n except websockets.exceptions.ConnectionClosed:\n break\n\n logging.debug(\"_outgoing_ws closed for %s\", pid)", "def startNode(klass):\n try:\n ws = klass('ws://localhost:8080/ws')\n ws.daemon = False\n ws.connect()\n except:\n ws.close()", "def __init__(self, port: int, max_worker_threads: int = 4):\n super().__init__(max_worker_threads=max_worker_threads)\n self.do_stop = False\n self.port: int = port\n self._is_running: bool = False\n self._server: WebSocketServer = websockets.serve(\n ws_handler=self._handler,\n host='127.0.0.1',\n port=self.port,\n loop=self._loop)", "async def gw_start(test_cli):\n gw_url = await get_gw(test_cli)\n return await websockets.connect(gw_url)", "def _ws_connect(self):\n\n return websocket.websocket_connect(\n 'ws://localhost:{}{}'.format(self.get_http_port(), self.request)\n )", "def open(self):\n broker = os.path.join(getsitepackages()[0], 'pynq_networking', 'rsmb',\n 'rsmb', 'src', 'broker_mqtts')\n\n self.close()\n os.system(f\"nohup {broker} > {self.log} &\")\n\n for t in MQTT_PACKET_TYPES:\n bind_layers(MQTT, t, {'type': t.type})\n\n bind_layers(TCP, MQTT_Stream, {'dport': self.mqtt_port})\n bind_layers(TCP, MQTT_Stream, {'sport': self.mqtt_port})\n\n for t in MQTTSN_PACKET_TYPES:\n bind_layers(MQTTSN, t, {'type': t.type})\n\n bind_layers(UDP, MQTTSN, {'dport': self.mqttsn_port})\n bind_layers(UDP, MQTTSN, {'sport': self.mqttsn_port})", "def websocket_init(self, payload, *args, **kwargs):\n data = json.loads(str(payload, \"utf-8\"))\n self.is_connecting = False\n if url := data.get(\"url\"):\n self.gateway = f\"{url}/?v={DISCORD_API_VERSION}&encoding=json\".encode(\"utf-8\")\n useragent = kwargs.pop(\"useragent\", DISCORD_USER_AGENT)\n headers = kwargs.pop(\n \"headers\",\n {\n \"Authorization\": [f\"Bot {DISCORD_BOT_TOKEN}\"],\n \"Content-Type\": [\"application/json\"],\n },\n )\n\n logger.log_info(\"Connecting to Discord Gateway...\")\n WebSocketClientFactory.__init__(\n self, url, *args, headers=headers, useragent=useragent, **kwargs\n )\n self.start()\n else:\n logger.log_err(\"Discord did not return a websocket URL; connection cancelled.\")", "async def test_websocket_application():\n application = URLRouter([path(\"testws/<str:message>/\", KwargsWebSocketApp())])\n communicator = WebsocketCommunicator(application, \"/testws/test/\")\n connected, subprotocol = await communicator.connect()\n # Test connection\n assert connected\n assert subprotocol is None\n message = await communicator.receive_from()\n assert message == \"test\"\n await communicator.disconnect()", "def on_websocket_open(self) -> None:\n raise NotImplementedError() # pragma: no cover", "def main():\n\n symbols = constants.SYMBOL_LIST\n if not symbols:\n logging.info(\"No symbol entered to continue.\")\n sys.exit()\n\n try:\n input_price = float(constants.PRICE_INPUT)\n except ValueError:\n logging.error(\"That wasn't a number!\")\n\n data_feeder = DataFeeder(symbols, input_price)\n\n def on_message(ws, message, data_feeder):\n \"\"\"\n This function is called when a message is received by the web-socket.\n \"\"\"\n data_feeder.store_trading_data_into_db(data=data_feeder.process_received_message(message))\n\n def on_error(ws, error):\n \"\"\"\n This function is called when there is an error in the web-socket connection.\n \"\"\"\n logging.error(\"Error:-\", error)\n\n def on_close(ws):\n \"\"\"\n This function is called when the web-socket connection is closed.\n \"\"\"\n logging.info(\"### Web-socket Connection Closed ###\")\n\n try:\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(data_feeder.url,\n on_message = lambda ws, message: on_message(ws, message, data_feeder),\n on_error = on_error,\n on_close = on_close)\n ws.run_forever()\n except Exception as e:\n logging.error('Could not connect to web-socket',str(e))", "async def ws_cmd(args):\n url = \"ws://{}:{}/ws/device/\".format(\n args.server, args.port)\n headers = {'devicetoken': args.token}\n while True:\n try:\n async with websockets.connect(\n url, extra_headers=headers) as websocket:\n logger.info(\"ws server connected...\")\n try:\n while True:\n data = await websocket.recv()\n data = json.loads(data)\n\n if data['type'] == 'cmd':\n status, msg = await run_cmd(data['cmd'])\n logging.info(\"result: {}\".format(msg))\n await websocket.send(json.dumps({\n \"type\": \"cmd\",\n \"msg\": msg,\n }))\n except Exception:\n logger.exception(\"{} error\".format(data))\n except Exception:\n await asyncio.sleep(args.retval)\n logger.info(\"retry connected...\")", "def start_websocket_server(self, addr, port):\n app = SLSApplication(self, default_host=addr)\n app.listen(port)\n log.info(f\"Serving SLS/Websocket on ({addr}, {port})\")\n tornado.ioloop.IOLoop.current().start()", "async def test_websocket_communicator():\n communicator = WebsocketCommunicator(SimpleWebsocketApp(), \"/testws/\")\n # Test connection\n connected, subprotocol = await communicator.connect()\n assert connected\n assert subprotocol is None\n # Test sending text\n await communicator.send_to(text_data=\"hello\")\n response = await communicator.receive_from()\n assert response == \"hello\"\n # Test sending bytes\n await communicator.send_to(bytes_data=b\"w\\0\\0\\0\")\n response = await communicator.receive_from()\n assert response == b\"w\\0\\0\\0\"\n # Test sending JSON\n await communicator.send_json_to({\"hello\": \"world\"})\n response = await communicator.receive_json_from()\n assert response == {\"hello\": \"world\"}\n # Close out\n await communicator.disconnect()", "def connect_subproc():\n return factory.connect_subproc([sys.executable, \"-u\", SERVER_FILE, \"-q\", \"-m\", \"stdio\"], \n SlaveService)", "async def on_connect(websocket, path):\n charge_point_id = path.strip('/')\n cp = ChargePoint(charge_point_id, websocket)\n\n await cp.start()", "def websocket_servient():\n\n ws_port = find_free_port()\n ws_server = WebsocketServer(port=ws_port)\n\n servient = Servient(catalogue_port=None)\n servient.add_server(ws_server)\n\n @tornado.gen.coroutine\n def start():\n raise tornado.gen.Return((yield servient.start()))\n\n wot = tornado.ioloop.IOLoop.current().run_sync(start)\n\n property_name_01 = uuid.uuid4().hex\n property_name_02 = uuid.uuid4().hex\n action_name_01 = uuid.uuid4().hex\n event_name_01 = uuid.uuid4().hex\n\n td_dict = {\n \"id\": uuid.uuid4().urn,\n \"name\": uuid.uuid4().hex,\n \"properties\": {\n property_name_01: {\n \"observable\": True,\n \"type\": \"string\"\n },\n property_name_02: {\n \"observable\": True,\n \"type\": \"string\"\n }\n },\n \"actions\": {\n action_name_01: {\n \"input\": {\n \"type\": \"object\"\n },\n \"output\": {\n \"type\": \"string\"\n },\n }\n },\n \"events\": {\n event_name_01: {\n \"type\": \"string\"\n }\n },\n }\n\n td = ThingDescription(td_dict)\n\n exposed_thing = wot.produce(td.to_str())\n exposed_thing.expose()\n\n @tornado.gen.coroutine\n def action_handler(parameters):\n input_value = parameters.get(\"input\")\n arg_b = input_value.get(\"arg_b\") or uuid.uuid4().hex\n raise tornado.gen.Return(input_value.get(\"arg_a\") + arg_b)\n\n exposed_thing.set_action_handler(action_name_01, action_handler)\n\n yield servient\n\n @tornado.gen.coroutine\n def shutdown():\n yield servient.shutdown()\n\n tornado.ioloop.IOLoop.current().run_sync(shutdown)", "def start(self):\n if config['port'] or config['host']:\n port = config['port'] or 5222\n host = config['host'] or sleekxmpp.JID(config['jid']).host\n addr = (host, port)\n else:\n addr = tuple()\n self.connect(addr)\n self.process(threaded=True)", "async def register(websocket):\n app['websockets'].add(websocket)\n await notify_users()", "def get_websocket(host, port, route='/', ssl=False):\n client = MessageBusClient(host, port, route, ssl)\n client.run_in_thread()\n return client", "def push_message(target_binary, target_platform, target_type, target_port, message):\n stderr = \"\"\n stdout = \"\"\n if target_type == APP_TYPE_SERVER:\n try:\n # start the server\n log(\"starting the server\")\n if target_platform == PLATFORM_WINDOWS:\n log(\"using wine\")\n server_instance = subprocess.Popen([\"wine\", target_binary], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n log(\"running binary\")\n server_instance = subprocess.Popen([target_binary], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n \n # give it time to start up\n log(\"allowing time to start\")\n time.sleep(LOAD_TIME)\n \n # warn the user of potential error message\n log(\"expect some kind of error message, just close it if it pops up\")\n\n # encode message\n encoded_message = str.encode(message) \n\n # send message\n send_message_tcp(\"localhost\", target_port, encoded_message)\n\n # record error message\n stderr = server_instance.stderr.read().decode()\n stdout = server_instance.stdout.read().decode()\n except:\n pass\n finally:\n server_instance.kill()\n else:\n try:\n if target_platform == PLATFORM_WINDOWS:\n log(\"using wine\")\n process_instance = subprocess.Popen([\"wine\", target_binary], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n log(\"running binary\")\n process_instance = subprocess.Popen([target_binary], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n \n # push map message to stdin\n process_instance.stdin.write(message)\n\n # record error message\n stderr = process_instance.stderr.read().decode()\n stdout = process_instance.stdout.read().decode()\n except:\n pass \n finally:\n process_instance.kill()\n return stdout, stderr", "def test_ducts_with_subprocess(self):\n assert_that(SUBPROCESS_TEST_SCRIPT).exists()\n proc = None\n parent = None\n try:\n parent = MessageDuctParent.psuedo_anonymous_parent_duct()\n parent.bind()\n proc = subprocess.Popen(\n [sys.executable, SUBPROCESS_TEST_SCRIPT, parent.listener_address], env={'PYTHONPATH': ROOT_DIR}\n )\n assert_that(parent.listen()).is_true()\n for _ in range(100):\n parent.send(\"pingpong\")\n parent.poll(1)\n assert_that(parent.recv()).is_equal_to(\"pingpong\")\n parent.send(None)\n time.sleep(1)\n finally:\n if parent:\n parent.close()\n if proc:\n proc.terminate()", "def start(self):\n #url = '{}://{}:{}/'.format('http',\n # self.ip,\n # self.port)\n #self.service_info = ServiceInfo(\n # '_webthing._sub._http._tcp.local.',\n # '{}._http._tcp.local.'.format(self.name),\n # address=socket.inet_aton(self.ip),\n # port=self.port,\n # properties={\n # 'url': url,\n # },\n # server='{}.local.'.format(socket.gethostname()))\n #self.zeroconf = Zeroconf()\n #self.zeroconf.register_service(self.service_info)\n\n # If WebSocketS used and NOT running in thread, and WebServer IS\n # running in thread make shure WebServer has enough stack size to\n # handle also the WebSocket requests.\n log.info('Starting Web Server')\n self.server.Start(threaded=srv_run_in_thread, stackSize=8192)", "def start_mqtt_auth_watcher(run_event):\n print('START MQTT WATCHER')\n cmd = ['/app/src/mosquitto_watcher.sh']\n # , cwd=os.path.join(os.path.dirname(__file__))\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\n while run_event.is_set():\n time.sleep(1)\n process.terminate()\n process.wait()", "def __init__(self, parent=None):\n super(robotTwoTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "def _process_watcher(parent_pid, child_pid, child_host, child_port, minecraft_dir, conn):\n port = child_port\n def port_watcher():\n nonlocal port \n \n port = conn.recv()[0]\n\n port_thread = threading.Thread(target=port_watcher)\n port_thread.start()\n\n # Wait for processes to be launched\n time.sleep(1)\n try:\n child = psutil.Process(child_pid)\n except psutil.NoSuchProcess:\n child = None\n try:\n parent = psutil.Process(parent_pid)\n except psutil.NoSuchProcess:\n parent = None\n \n while True:\n try:\n time.sleep(0.1) # Sleep for a short time, and check if subprocesses needed to be killed.\n\n if not parent.is_running() or parent is None:\n if not (child is None):\n try:\n Instance._kill_minecraft_via_malmoenv(child_host,port)\n time.sleep(2)\n except:\n pass\n \n InstanceManager._reap_process_and_children(child)\n try:\n shutil.rmtree(minecraft_dir)\n except:\n logger.warning(\"Failed to delete temporary minecraft directory. It may have already been removed.\")\n pass\n return\n # Kill the watcher if the child is no longer running.\n # If you want to attempt to restart the child on failure, this\n # would be the location to do so.\n if not child.is_running():\n return\n except KeyboardInterrupt:\n pass", "def main():\n my_painting_mqtt_client = MyPaintingMQTTClient()\n my_painting_mqtt_client.run_app()", "def open(self):\n APP.clients.append(self)\n # self.send_status()\n log(\"WebSocket opened. {0} child(s) connected\".\n format(len(APP.clients)))", "async def _incoming_ws(self, pid, websocket):\n # websockets have a convenient __aiter__ interface, allowing\n # us to just iterate over the messages forever.\n # Under the hood, if there are no messages available from the\n # WebSocket, this code will yield and until another message is\n # received.\n\n # If the WebSocket is disconnected unexpectedly, the for loop\n # will produce an exception.\n try:\n async for msg in websocket:\n # Trim whitespace\n msg = msg.strip()\n # Make sure the message isn't an empty string\n if msg:\n # Pass the message onto the server's handler.\n self.on_player_msg(pid, msg)\n # If we get this error, then player probably just logged off.\n except websockets.exceptions.ConnectionClosed:\n pass\n finally:\n logging.debug(\"_incoming_ws closed for %s\", pid)", "def __init__(self, parent=None):\n super(embeddedTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "def open_rtcp_port(self):\n self.rtcp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)", "def connectionMade(self):\n self._pid = self.transport.pid\n if self._pid:\n self.logger(\"Process has pid %d\" % self._pid)\n self.transport.closeStdin() # close stdin", "def start_process():\n global command, process\n\n def on_data(data):\n data = data.decode().strip()\n print('{}'.format(data))\n\n cmd = command.split(' ')\n\n if process:\n process.terminate()\n\n process = MySubprocess(cmd, -1, functools.partial(on_data), None, None)", "def run():\n\n assert SSH_HOST is not None, 'SSH_HOST not set. Please configure.'\n\n\n def connect():\n port = find_open_port(SSH_HOST)\n if init_tunnel(SSH_HOST, port):\n print 'Tunnel initialized, pid:', PID\n return {'ssh tunnel entry': 'ssh://{}:{}'.format(SSH_HOST, port)}\n return {}\n\n def is_pid_alive(pid):\n processes = subprocess.check_output(['ps', '-fx'])\n for line in processes.splitlines():\n lpid = line.split()[0]\n if lpid == pid:\n return True\n return False\n\n def find_open_port(host, start_port=22222):\n i = 0\n while i < 1000:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((host, start_port + i))\n if result == 0:\n print \"Port is already used: \", start_port + i\n i += 1\n else:\n return start_port + i\n \n\n \n\n if PID is None:\n return connect()\n else:\n # check if process is still alive\n if is_pid_alive(PID):\n print 'Tunnel still active. Not doing anything.'\n else:\n return connect()", "def wpListenerStart(outboundMessageQueue):\n print \"Starting http POST server for wp updates\"\n wpPush.writeOut = outboundMessageQueue", "def __init__(self, parent=None):\n super(robotFourTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "def open(self):\n try:\n self.error_count = 0\n self.conn_thread = Thread(target=self.connect, name='Websocket Connection')\n self.conn_thread.start()\n except Exception as e:\n self.conn_thread.join()\n self.on_error(self.ws, \"Error from openning connection. Error -> {}\".format(e))", "def __init__(self, parent=None):\n super(robotThreeTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "def __init__(self, process_name=sys.argv[0], transport_factory=transport.TransportUnixFactory()):\n self.factory = transport_factory\n self.server = self.factory.serve()\n self.server.addEndpoint(general.EndpointIntrospect())\n processinfo = general.EndpointProcessInfo()\n processinfo.setProcessName(process_name)\n self.server.addEndpoint(processinfo)\n self.server.addEndpoint(tracing.EndpointTraceMapping())\n self.server.addEndpoint(tracing.EndpointNativeTraceSender())", "def __init__(self, parent=None):\n super(robotOneTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "async def test_run_without_launching(self):\n\n port = get_first_available_port(7860, 7870)\n\n io = gr.Interface(lambda s: s, gr.Textbox(), gr.Textbox()).queue()\n\n config = uvicorn.Config(app=io.app, port=port, log_level=\"warning\")\n\n server = Server(config=config)\n server.run_in_thread()\n\n try:\n async with websockets.connect(f\"ws://localhost:{port}/queue/join\") as ws:\n completed = False\n while not completed:\n msg = json.loads(await ws.recv())\n if msg[\"msg\"] == \"send_data\":\n await ws.send(json.dumps({\"data\": [\"Victor\"], \"fn_index\": 0}))\n if msg[\"msg\"] == \"send_hash\":\n await ws.send(\n json.dumps({\"fn_index\": 0, \"session_hash\": \"shdce\"})\n )\n if msg[\"msg\"] == \"process_completed\":\n completed = True\n assert msg[\"output\"][\"data\"][0] == \"Victor\"\n finally:\n server.close()", "def ws_request(self, ws_url):\n url = \"wss://stream.binance.com:9443/ws/%s\" % (ws_url)\n\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(url,\n on_error=self.ws_on_error,\n on_close=self.ws_on_close)\n\n return ws", "def _spawn(self, protocol, args, env=None):\n return reactor.spawnProcess(protocol, self.cmd, args, env=env)", "def spawn_new_player(self):\n tmux_invocation = [\n \"tmux\",\n \"new-session\",\n \"-s\" + TMUX_SESSION,\n \"-d\",\n \"mpv\",\n \"--no-video\",\n \"--term-playing-msg='${media-title}'\",\n \"--idle\",\n \"--input-ipc-server=\" + SOCKET_NAME,\n ]\n # start tmux with mpv and wait until ready\n os.spawnvp(os.P_WAIT, tmux_invocation[0], tmux_invocation)\n while not self.connect_player():\n sleep(0.1)", "def spawn(self, pcls, args):\n\n childp, ownp = multiprocessing.Pipe()\n p = pcls(self._id, childp)\n p._loglevel = self._loglevel\n p.start()\n\n childp.close()\n cid = ownp.recv()\n ownp.send((\"setup\", args))\n ownp.send(\"start\")\n\n self._child_procs.append((p.pid, cid))\n\n return cid", "async def _connect(self, subsystem=None, exec_command=None):\n ip, port, user, passwd = await self.dest_info()\n self._extra_info[\"peer\"] = PeerInfo(ip, port)\n\n if self._devinfo.proxy_required(ip):\n host = self.service.get_http_proxy_url(ip)\n elif self._devinfo.should_nat(ip):\n host = await self._devinfo.translate_address(ip)\n else:\n host = ip\n\n self.logger.info(\"Connecting to: %s: %d\", host, port)\n\n # known_hosts is set to None to disable the host verifications. Without\n # this the connection setup fails for some devices\n conn, _ = await asyncssh.create_connection(\n self._client_factory,\n host=host,\n port=port,\n username=user,\n password=passwd,\n client_keys=None,\n known_hosts=None,\n )\n\n chan, cmd_stream = await self._conn.create_session(\n lambda: CommandStream(self, self._loop),\n encoding=None,\n term_type=self.TERM_TYPE,\n subsystem=subsystem,\n command=exec_command,\n )\n self._chan = chan\n return cmd_stream", "async def start(self):\n # Avoid being rate limited by Twitter when restarting the stream with the same follow list.\n if self.sub_process and not set(self.sub_process.follows) != set(self.get_follows()):\n return\n\n # Kill the current stream before starting a new one\n self.stop()\n\n # No need to start a stream if we're not following anyone\n if not self.conf.follows:\n return\n\n # Create a new multi-processes queue, a new stream object and a new Process\n log.info('Creating new sub-process.')\n self.mp_queue = multiprocessing.Queue()\n self.mp_queue.cancel_join_thread()\n self.sub_process = SubProcessStream(self.mp_queue, self.conf.credentials, self.get_follows())\n log.info('Created new sub-process.')\n\n # Schedule the polling daemon (it will take care of starting the child process)\n self.daemon = asyncio.ensure_future(self._run())", "def test_create_websocket_url(self):\n\n self.assertEqual(\n 'ws://host:8888/',\n typhoonae.websocket.create_websocket_url())\n\n self.assertEqual(\n 'ws://host:8888/foo',\n typhoonae.websocket.create_websocket_url('/foo'))", "async def handler(websocket, path):\n\n print(\"Connected\")\n # print(vars(websocket))\n \n # global connected\n # # Register.\n # connected.add(websocket)\n # try:\n # # Implement logic here.\n # await asyncio.wait([ws.send(\"Hello!\") for ws in connected])\n # await asyncio.sleep(10)\n # finally:\n # # Unregister.\n # connected.remove(websocket)\n\n while True:\n listener_task = asyncio.ensure_future(websocket.recv())\n producer_task = asyncio.ensure_future(producer())\n done, pending = await asyncio.wait(\n [listener_task, producer_task],\n return_when=asyncio.FIRST_COMPLETED)\n\n if listener_task in done:\n message = listener_task.result()\n await consumer(message)\n else:\n listener_task.cancel()\n\n if producer_task in done:\n message = producer_task.result()\n await websocket.send(message)\n else:\n producer_task.cancel()", "def launch():\n\n core.openflow.addListenerByName(\"ConnectionUp\", _handle_ConnectionUp)\n log.info(\"Hub running\")", "async def start_sockets(data: dict) -> tuple:\n\n # create Websockets connections\n bnc_websocket = await websockets.connect(data['binance']['url'], max_queue=None, ping_interval=None)\n ftx_websocket = await websockets.connect(data['ftx']['url'], max_queue=None, ping_interval=None)\n\n # subscribing to updates\n await bnc_websocket.send(json.dumps(data['binance']['subscribe_request']))\n await ftx_websocket.send(json.dumps(data['ftx']['subscribe_request']))\n\n return bnc_websocket, ftx_websocket", "def main():\n\tports = glob.glob(\"/dev/tty.wchusbserial*\") + glob.glob(\"/dev/tty.usbserial*\") + glob.glob(\"COM3\") + glob.glob(\"COM4\")\n\tBAUDRATE = 9600\n\tchoice = int(input((str(ports) + \" enter numerical index for port: \")))\n\tportname = ports[choice]\n\tport = None\n\tsending_queue = None\n\treceiving_process_on = None\n\treceiving_process = None\n\ttry:\n\t\tsending_queue = multiprocessing.Queue()\n\t\treceiving_process_on = multiprocessing.Value(c_bool,False)\n\t\treceiving_process = multiprocessing.Process(target = communication, args = (portname,BAUDRATE,sending_queue,receiving_process_on))\n\t\treceiving_process.start()\n\t\twhile True:\n\t\t\tword = input(\"Enter a message: \")\n\t\t\tsending_queue.put(create_chunk(word)) #sending 32 bytes to the process queue\n\t\t\t\n\texcept Exception as e:\n\t\tprint(\"ERROR:\", e)\n\tfinally:\n\t\treceiving_process_on.value = False\n\t\tfor i in range(10): #wait for the process to stop\n\t\t\tpass\n\t\tif receiving_process != None:\n\t\t\treceiving_process.join()\n\t\t\n\t\tif sending_queue != None:\n\t\t\tsending_queue.close()", "def open(self, *args, **kwargs):\n self._open = True\n self._stat.websocket_stream_open += 1\n # Create subscription for the stream\n url = self.request.uri\n self._logger.info(\"Websocket connection %s %s\", url, self)\n\n async_future = asyncio.async(\n self.netconf_subscribe(\n self.request.uri,\n self.request.headers.get(\"Authorization\")), \n loop=self._asyncio_loop)\n yield tornado.platform.asyncio.to_tornado_future(async_future)", "def websocket_server():\n\n servient = Servient()\n\n thing_01_id = uuid.uuid4().urn\n thing_02_id = uuid.uuid4().urn\n\n exposed_thing_01 = ExposedThing(servient=servient, thing=Thing(id=thing_01_id))\n exposed_thing_02 = ExposedThing(servient=servient, thing=Thing(id=thing_02_id))\n\n prop_name_01 = uuid.uuid4().hex\n prop_name_02 = uuid.uuid4().hex\n prop_name_03 = uuid.uuid4().hex\n event_name_01 = uuid.uuid4().hex\n action_name_01 = uuid.uuid4().hex\n\n prop_value_01 = Faker().sentence()\n prop_value_02 = Faker().sentence()\n prop_value_03 = Faker().sentence()\n\n prop_init_01 = PropertyFragmentDict({\n \"type\": \"string\",\n \"observable\": True\n })\n\n prop_init_02 = PropertyFragmentDict({\n \"type\": \"string\",\n \"observable\": True\n })\n\n prop_init_03 = PropertyFragmentDict({\n \"type\": \"string\",\n \"observable\": True\n })\n\n event_init_01 = EventFragmentDict({\n \"type\": \"object\"\n })\n\n action_init_01 = ActionFragmentDict({\n \"input\": {\"type\": \"string\"},\n \"output\": {\"type\": \"string\"}\n })\n\n def async_lower(parameters):\n loop = tornado.ioloop.IOLoop.current()\n input_value = parameters.get(\"input\")\n return loop.run_in_executor(None, lambda x: time.sleep(0.1) or x.lower(), input_value)\n\n exposed_thing_01.add_property(prop_name_01, prop_init_01, value=prop_value_01)\n exposed_thing_01.add_property(prop_name_02, prop_init_02, value=prop_value_02)\n exposed_thing_01.add_event(event_name_01, event_init_01)\n exposed_thing_01.add_action(action_name_01, action_init_01, async_lower)\n\n exposed_thing_02.add_property(prop_name_03, prop_init_03, value=prop_value_03)\n\n ws_port = find_free_port()\n\n ws_server = WebsocketServer(port=ws_port)\n ws_server.add_exposed_thing(exposed_thing_01)\n ws_server.add_exposed_thing(exposed_thing_02)\n\n @tornado.gen.coroutine\n def start():\n yield ws_server.start()\n\n tornado.ioloop.IOLoop.current().run_sync(start)\n\n url_thing_01 = build_websocket_url(exposed_thing_01, ws_server, ws_port)\n url_thing_02 = build_websocket_url(exposed_thing_02, ws_server, ws_port)\n\n yield {\n \"exposed_thing_01\": exposed_thing_01,\n \"exposed_thing_02\": exposed_thing_02,\n \"prop_name_01\": prop_name_01,\n \"prop_init_01\": prop_init_01,\n \"prop_value_01\": prop_value_01,\n \"prop_name_02\": prop_name_02,\n \"prop_init_02\": prop_init_02,\n \"prop_value_02\": prop_value_02,\n \"prop_name_03\": prop_name_03,\n \"prop_init_03\": prop_init_03,\n \"prop_value_03\": prop_value_03,\n \"event_name_01\": event_name_01,\n \"event_init_01\": event_init_01,\n \"action_name_01\": action_name_01,\n \"action_init_01\": action_init_01,\n \"ws_server\": ws_server,\n \"url_thing_01\": url_thing_01,\n \"url_thing_02\": url_thing_02,\n \"ws_port\": ws_port\n }\n\n @tornado.gen.coroutine\n def stop():\n yield ws_server.stop()\n\n tornado.ioloop.IOLoop.current().run_sync(stop)", "def add(self, websocket):\n if websocket in self:\n return\n\n logger.info(\"Managing websocket %s\" % format_addresses(websocket))\n websocket.opened()\n with self.lock:\n fd = websocket.sock.fileno()\n self.websockets[fd] = websocket\n self.poller.register(fd)", "async def relay(websocket, path):\n # register(websocket) sends user_event() to websocket\n await register(websocket)\n try:\n while True:\n try:\n message = await websocket.recv()\n except ConnectionClosed:\n break\n else:\n await relay_message(message, current_user=websocket)\n finally:\n await unregister(websocket)", "async def open_websocket_server(sock, filter=None): # pylint: disable=W0622\n ws = await create_websocket_server(sock, filter=filter)\n try:\n yield ws\n finally:\n await ws.close()", "def openRtpPort(self):\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tself.rtpSocket_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\t\t\t\tself.rtpSocket_client.bind(('', self.rtpPort))\r\n\t\t\t\tself.rtpSocket_client.settimeout(0.5)\r\n\t\t\t\tself.listenRtp()\r\n\t\t\texcept Exception as err:\r\n\t\t\t\tif (str(err) == \"[Errno 9] Bad file descriptor\"):\r\n\t\t\t\t\tbreak", "def launch_proxy(local_port, remote_port, remote_ip):\n\n path = os.path.abspath(os.path.dirname(__file__))\n ws = os.path.join(path, \"../../proxy/bin/wistar_proxy.py\")\n\n wistar_proxy_path = os.path.abspath(ws)\n\n cmd = \"/usr/bin/env python %s --local-port=%s --remote-ip=%s --remote-port=%s &\" % (wistar_proxy_path,\n local_port,\n remote_ip,\n remote_port)\n\n logger.debug(cmd)\n\n proc = subprocess.Popen(cmd, shell=True, close_fds=True)\n time.sleep(1)\n return proc.pid", "async def websocket_client(self):\n return await websocket(CLIENT, \"/websocket\")", "def spawnChild(self, protocolName):\n from twisted.internet import reactor\n\n inheritedSocket = self.dispatcher.addSocket()\n inheritedFD = inheritedSocket.childSocket().fileno()\n\n processProtocol = ChildProcessProtocol(self, inheritedSocket)\n\n # Annoyingly, twistd *has* to make a pid file.\n pidFileFD, pidFileName = mkstemp()\n close(pidFileFD)\n unlink(pidFileName)\n\n arguments = (\n sys.executable, b\"-c\",\n b\"from twisted.scripts.twistd import run; run()\",\n b\"--pidfile\", pidFileName,\n b\"--nodaemon\", b\"--logfile\", b\"-\",\n self.pluginName,\n b\"--inherited-fd=3\",\n b\"--protocol\", protocolName,\n )\n\n self.log.debug(\n u\"Spawning child process for protocol {protocol!r} \"\n u\"with arguments: {arguments}\",\n protocol=protocolName, arguments=arguments,\n )\n\n transport = reactor.spawnProcess(\n processProtocol,\n sys.executable, arguments, env={\n b\"PYTHONPATH\": b\":\".join(sys.path),\n },\n childFDs={0: b\"w\", 1: b\"r\", 2: b\"r\", 3: inheritedFD}\n )\n\n child = ChildProcess(transport, processProtocol)\n\n self.log.info(\n u\"Spawned child process #{child.transport.pid} \"\n u\"for protocol {protocol!r}\",\n child=child, protocol=protocolName, arguments=arguments,\n )\n\n self.children.add(child)", "def main():\n if len(sys.argv) != 3:\n sys.exit(\"Usage: python receiver.py [Receiver Port] [Window Size]\")\n receiver_port = int(sys.argv[1])\n window_size = int(sys.argv[2])\n receiver(receiver_port, window_size)", "def connect(self):\n assert self.listening\n assert not self.connected\n ctx = zmq.Context.instance()\n port = NODE_INFOS[self.ID].port\n self._send_socket = ctx.socket(zmq.PUB)\n self._send_socket.bind(f\"tcp://*:{port}\")\n self.connected = True", "def start(self):\n zmq_uri = (\n \"{protocol}://{address}:{port}\".format(\n protocol=self.protocol, address=self.address, port=self.port\n )\n if self.port\n else \"{protocol}://{address}\".format( # noqa\n protocol=self.protocol, address=self.address\n )\n )\n log.debug(\"ZMQ URI: %s\", zmq_uri)\n self.ctx = zmq.Context()\n if hasattr(zmq, self.type):\n skt_type = getattr(zmq, self.type)\n else:\n skt_type = zmq.PULL\n self.sub = self.ctx.socket(skt_type)\n self.sub.connect(zmq_uri)\n if self.hwm is not None:\n self.sub.setsockopt(zmq.RCVHWM, self.hwm)\n if self.recvtimeout is not None:\n log.debug(\"Setting RCVTIMEO to %d\", self.recvtimeout)\n self.sub.setsockopt(zmq.RCVTIMEO, self.recvtimeout)\n if self.keepalive is not None:\n log.debug(\"Setting TCP_KEEPALIVE to %d\", self.keepalive)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE, self.keepalive)\n if self.keepalive_idle is not None:\n log.debug(\"Setting TCP_KEEPALIVE_IDLE to %d\", self.keepalive_idle)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_IDLE, self.keepalive_idle)\n if self.keepalive_interval is not None:\n log.debug(\"Setting TCP_KEEPALIVE_INTVL to %d\", self.keepalive_interval)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_INTVL, self.keepalive_interval)", "def subscribe(self):\n fd = libplasma.subscribe(self.conn)\n self.notification_sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n # Make the socket non-blocking.\n self.notification_sock.setblocking(0)", "def create_connection(url, timeout=None, **options):\r\n websock = WebSocket()\r\n websock.settimeout(timeout != None and timeout or default_timeout)\r\n websock.connect(url, **options)\r\n return websock", "def make_communicator(token):\n\n return WebsocketCommunicator(TokenAuthMiddlewareStack(\n URLRouter(\n websocket_urlpatterns\n )\n ), '/ws/chat/?token=' + token)", "def __init__(self,\n *,\n qrcode: bool = False,\n host: str = \"0.0.0.0\",\n port: int = 8000,\n logger: logging.Logger = logging.getLogger(\n 'mvt.phone_sensor'),\n log_level: int = logging.WARN,\n proxy_client_from: Optional[str] = None):\n\n self._ws: Optional[websockets.WebSocketServerProtocol] = None\n self._out: Queue[Union[websockets.Data, ClientDisconnect]] = Queue()\n self._waiting = False\n self._qrcode = qrcode\n self._proxy_client_from = proxy_client_from\n self.logger = logger\n self.logger.setLevel(log_level)\n self.client_connected = False\n self.loop = asyncio.new_event_loop()\n self._in: asyncio.Queue[str] = asyncio.Queue(loop=self.loop)\n self.stop_flag = self.loop.create_future()\n\n self.server_thread = Thread(target=self._start_server,\n kwargs={'host': host, 'port': port},\n daemon=True)\n self.server_thread.start()\n assert self._out.get() == 'ready', \"server failed to start\"", "async def _listen(self,sub_params): \n async with websockets.connect(self.url) as websocket:\n await websocket.send(json.dumps(sub_params))\n # self.keepalive.start()\n start_time = time.time()\n while not self.shutdown_event.is_set():\n try:\n now = time.time()\n if((now - start_time) > 0.5):\n self.calculate_order_depth()\n start_time = now \n data = await websocket.recv()\n msg = json.loads(data)\n except ValueError as e:\n self.on_error(e)\n except Exception as e:\n self.on_error(e)\n else:\n self.on_message(msg)", "def __init__(self, callback, parent):\n IPC.__init__(self, UPSTREAM_CHANNEL, DOWNSTREAM_CHANNEL, callback)\n self.parent = parent\n self.controller = parent.controller\n self.logger = get_logger(\"ipc_server\")", "def __init__(self, target_addr: str, target_port: int, max_worker_threads: int = 2):\n super().__init__(max_worker_threads=max_worker_threads)\n self._addr: str = target_addr\n self._port: int = target_port\n self._websocket: WebSocketClientProtocol = None\n self._is_running: bool = False", "def make_server(connect_handler=None, message_handler=None, disconnect_handler=None):\n class Server(tornado.websocket.WebSocketHandler):\n def open(self):\n print('new connection')\n if connect_handler:\n return connect_handler(self)\n\n def on_message(self, message):\n if message_handler:\n return message_handler(json.loads(message), self)\n\n def on_close(self):\n print('connection closed')\n if disconnect_handler:\n return disconnect_handler(self)\n\n def check_origin(self, origin):\n return True\n return Server", "def _sendMessageToWeb(self, msg):\n if self.ioLoopInst is not None:\n msg = npToPy(msg)\n json_msg = json.dumps(msg)\n self.ioLoopInst.add_callback(sendWebSocketMessage, wsName='wsUser', msg=json_msg)\n else:\n print(f'WebDisplayMsg {msg}')", "def websocket_proxy_server(url, key=\"\"):\n def create_on_message(conn):\n def _fsend(data):\n data = bytes(data)\n conn.write_message(data, binary=True)\n return len(data)\n on_message = rpc._CreateEventDrivenServer(_fsend, \"WebSocketProxyServer\")\n return on_message\n\n @gen.coroutine\n def _connect(key):\n conn = yield websocket.websocket_connect(url)\n on_message = create_on_message(conn)\n temp = _server_env()\n # Start connecton\n conn.write_message(struct.pack('@i', RPC_MAGIC), binary=True)\n key = \"server:\" + key\n conn.write_message(struct.pack('@i', len(key)), binary=True)\n conn.write_message(key.encode(\"utf-8\"), binary=True)\n msg = yield conn.read_message()\n assert len(msg) >= 4\n magic = struct.unpack('@i', msg[:4])[0]\n if magic == RPC_MAGIC + 1:\n raise RuntimeError(\"key: %s has already been used in proxy\" % key)\n elif magic == RPC_MAGIC + 2:\n logging.info(\"RPCProxy do not have matching client key %s\", key)\n elif magic != RPC_MAGIC:\n raise RuntimeError(\"%s is not RPC Proxy\" % url)\n logging.info(\"Connection established\")\n msg = msg[4:]\n if msg:\n on_message(bytearray(msg), 3)\n\n while True:\n try:\n msg = yield conn.read_message()\n if msg is None:\n break\n on_message(bytearray(msg), 3)\n except websocket.WebSocketClosedError as err:\n break\n logging.info(\"WebSocketProxyServer closed...\")\n temp.remove()\n ioloop.IOLoop.current().stop()\n ioloop.IOLoop.current().spawn_callback(_connect, key)\n ioloop.IOLoop.current().start()", "def create_process(self, args=[], *popenargs, **kwargs):\n try:\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n kwargs.setdefault('startupinfo', startupinfo)\n except:\n pass\n kwargs.setdefault('universal_newlines', True)\n kwargs.setdefault('stdin', sys.stdin)\n return subprocess.Popen(self.build_args(args), *popenargs, **kwargs)", "def connect_to_websocket(self):\n conn = yield websocket_connect(\"wss://api.bitfinex.com/ws\")\n\n req = {\n \"event\": \"subscribe\",\n \"channel\": \"book\",\n \"pair\": \"BTCUSD\",\n \"freq\": \"F0\",\n }\n conn.write_message(json.dumps(req))\n while True:\n msg = yield conn.read_message()\n response = json.loads(msg)\n if response:\n if self.snapshot_received:\n # Perform update in database\n # and emit update to client\n self.perform_update(response)\n\n if isinstance(response, list) and not self.snapshot_received: # If true, store snapshot in database\n\n for data in response[1]: # here data is of form [price, count, amount]\n item_type = \"bid\" if data[2] > 0 else \"ask\" # bid if amt > 0, else ask\n item = self.add_new_bitfinex_item(item_type, data[0], data[1])\n self.session.add(item)\n self.session.commit()\n print(\"Bitfinex Snapshot Received\")\n self.snapshot_received = True # Set flag\n else:\n break", "def connect(self):\n if not self.jid or self.jid.node or self.jid.resource:\n raise ValueError,\"Cannot connect: no or bad JID given\"\n if not self.secret:\n raise ValueError,\"Cannot connect: no secret given\"\n if not self.server:\n raise ValueError,\"Cannot connect: no server given\"\n if not self.port:\n raise ValueError,\"Cannot connect: no port given\"\n\n self.lock.acquire()\n try:\n stream=self.stream\n self.stream=None\n if stream:\n stream.close()\n\n self.__logger.debug(\"Creating component stream: %r\" % (self.stream_class,))\n stream=self.stream_class(jid = self.jid,\n secret = self.secret,\n server = self.server,\n port = self.port,\n keepalive = self.keepalive,\n owner = self)\n stream.process_stream_error=self.stream_error\n self.stream_created(stream)\n stream.state_change=self.__stream_state_change\n stream.connect()\n self.stream=stream\n self.state_changed.notify()\n self.state_changed.release()\n except:\n self.stream=None\n self.state_changed.release()\n raise", "def main():\n global discuss_bot_id, discussion_chat_id\n r = requests.get('https://slack.com/api/rtm.connect', {'token': bot_token})\n discuss_bot_id = r.json()['self']['id']\n url = r.json()['url']\n r = requests.get('https://slack.com/api/conversations.list',\n {'token': bot_token})\n for channel in r.json()['channels']:\n if channel['name'] == 'discussion':\n discussion_chat_id = channel['id']\n print(discussion_chat_id)\n ws = websocket.WebSocketApp(\n url=url, on_message=on_message, on_error=on_error, on_close=on_close)\n ws.on_open = on_open\n ws.run_forever()" ]
[ "0.61489266", "0.6004585", "0.58952093", "0.5711386", "0.56981546", "0.5681631", "0.5521888", "0.55072397", "0.5495588", "0.54655325", "0.5456767", "0.5442702", "0.539571", "0.53936225", "0.5373037", "0.5360796", "0.53407484", "0.53166866", "0.52801496", "0.5262081", "0.52579105", "0.5254402", "0.52409124", "0.52344596", "0.5223999", "0.5215537", "0.51427954", "0.5129504", "0.5125885", "0.5121839", "0.5110913", "0.5101862", "0.50816673", "0.50723195", "0.50673586", "0.50544673", "0.50504315", "0.5034717", "0.50247204", "0.5011962", "0.4986483", "0.49851272", "0.4978769", "0.49636152", "0.49586925", "0.49535283", "0.4952887", "0.49380895", "0.49203682", "0.4912716", "0.49126905", "0.4911071", "0.49037394", "0.48782855", "0.48770666", "0.48755172", "0.48737508", "0.4859915", "0.48508734", "0.48465252", "0.4846103", "0.48368907", "0.48361948", "0.4834171", "0.48221582", "0.48208755", "0.4820002", "0.4815585", "0.4811578", "0.4810217", "0.48060796", "0.4803102", "0.48006904", "0.47973946", "0.47897273", "0.4778226", "0.47742003", "0.47741905", "0.47518063", "0.4738176", "0.47373068", "0.47342604", "0.47334433", "0.47236767", "0.4719852", "0.4718756", "0.4712782", "0.47094053", "0.47079933", "0.47060767", "0.46925116", "0.46925095", "0.46892974", "0.46827915", "0.46697548", "0.46693593", "0.46633467", "0.46618983", "0.46604016", "0.4656903", "0.46534008" ]
0.0
-1
opener for opening sheets for client stock company name (e.g AAPL for apple inc.) name name of the sheet (e.g 'income' / 'balace'), use sheets_names() to see all names returns a csv sheet of the sheet of the company
def open_file(stock, name, setup=False): if not isinstance(stock, str): raise TypeError("Parameter 'stock' should be a string, not a " + type(stock).__name__) if setup is True: # when setup, name is "AAPL_income.csv", not "income" # path = _os.path.join(datapath(setup=False), stock, name) path = datapath(True, stock, name) df = _pd.read_csv(path) _gc.collect() return df # not setup, normal open_file names = ['major_holders', 'top_institutional_holders', 'top_mutual_fund_holders', 'Trading_Information', 'Financial_Highlights', 'Valuation_Measures', 'Executives', 'Description', 'Earnings_Estimate', 'Revenue_Estimate', 'Earnings_History', 'EPS_Trend', 'EPS_Revisions', 'Growth_Estimates', 'stats', 'statements', 'reports', 'Executives', 'Description', 'analysis', 'Summary', 'balance', 'cash_flow', 'income'] if name not in names: try: name = _path(name) # when client mistakenly input factor instead of sheet name except ValueError: raise ValueError( 'Parameter "name" should be the name of the financial sheets, not a factor name...Use path method to ' 'find the location of a factor') path = datapath(True, stock, stock) try: df = _pd.read_csv(path + '_' + name + '.csv') _gc.collect() except FileNotFoundError: _gc.collect() if _os.path.exists(datapath(True, stock)): raise ValueError("There is no sheet - {} - for company {}. Use main_get to retrieve the sheet".format (name, stock)) else: raise ValueError("There is no record of '" + stock + "' in database") return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def excel(df_ccl, df_arg_stocks, df_bonds, df_arg_stocks_ccl):\n if os.path.exists('CCL.xlsx'):\n wb = xw.Book('CCL.xlsx')\n # SHEET CEDEARS\n ws = wb.sheets('CCL CEDEARs')\n ws.range('A1').expand().value = df_ccl\n # SHEET MERVAL\n ws_merval = wb.sheets('Merval')\n ws_merval.range('A1').expand().value = df_arg_stocks\n # SHEET BONOS\n ws_bonds = wb.sheets('Bonos')\n ws_bonds.range('A1').expand().value = df_bonds\n # SHEET CCL MERVAL\n ws_ccl = wb.sheets('CCL ADRs')\n ws_ccl.range('A1').expand().value = df_arg_stocks_ccl\n\n tiempo = time.asctime()\n print('Carga exitosa de datos. Ultima ejecución: ',tiempo)", "def google_sheets_connector():\n scope = ['https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name('key.json', scope)\n client = gspread.authorize(credentials)\n sheet = client.open('backend').sheet1\n return sheet", "def set_user_defined_sheet_name():\n global sheet\n probable_sheets = []\n\n machine_nr = input('Veuillez entrer un numéro de machine (Vide si recherche par C.) : ')\n invoice_nr = input('Veuillez entrer un numéro de C. : ')\n workbook = load_workbook(filename='./temp_excel.xlsm')\n sheets = workbook.sheetnames\n\n # Research amongs all sheets (There can be a lot)\n for ii in sheets:\n if machine_nr in ii and invoice_nr in ii:\n sheet = workbook[ii]\n break\n elif machine_nr in ii:\n probable_sheets.append(ii)\n elif invoice_nr in ii:\n probable_sheets.append(ii)\n\t \n\n # If no exact corresponding sheet is found\n if not sheet and probable_sheets != []:\n print('Aucune feuille ne correspond totalement à votre recherche, mais certaines s\\'en rapprochent :')\n i = 0\n for ii in probable_sheets:\n print(f'{i} : {ii}')\n i+=1\n print('99 pour quitter')\n choice = input('Faites un choix :')\n\n # Let the user exit the script\n if choice == 'q':\n sys.exit()\n else:\n sheet = workbook[probable_sheets[int(choice)]]", "def write_to_xls_file(self,xls_filename,sheet_name):\r\n rb = xlrd.open_workbook(xls_filename,formatting_info=True)\r\n workbook = copy(rb) #a writable copy (I can't read values out of this, only write to it)\r\n\r\n ''' get all sheetnames '''\r\n list_of_sheetnames = []\r\n list_of_sheetnames = rb.sheet_names()\r\n ''' make a set of sheetnames without duplication '''\r\n sheet_names = set(list_of_sheetnames)\r\n ''' verify if a given ticker existed or not '''\r\n if (sheet_name in sheetnames) == True:\r\n flag = True\r\n else:\r\n flag = False\r\n\r\n if flag == True:\r\n print \"The data sheet named \" + ticker_name + \" existed.\"\r\n else:\r\n print \"No data sheet named \" + ticker_name + \", created new\"\r\n w_sheet = workbook.add_sheet(ticker_name)\r\n w_sheet.write(0,0,'Eod_C_Action')\r\n w_sheet.write(0,1,'Eod_I_Version')\r\n w_sheet.write(0,2,'UsrId')\r\n w_sheet.write(0,3,'Eod_D_Creation')\r\n w_sheet.write(0,4,'Eod_D_Quote')\r\n w_sheet.write(0,5,'InsId')\r\n w_sheet.write(0,6,'Eod_I_ProviderId')\r\n w_sheet.write(0,7,'Eod_N_Open')\r\n w_sheet.write(0,8,'Eod_N_High')\r\n w_sheet.write(0,9,'Eod_N_Low')\r\n w_sheet.write(0,10,'Eod_N_Close')\r\n w_sheet.write(0,11,'Eod_I_Volume')\r\n \r\n for row_index in range(1,len(self.close)+1):\r\n w_sheet.write(row_index,0,'A')\r\n w_sheet.write(row_index,1,0)\r\n w_sheet.write(row_index,2,8)\r\n w_sheet.write(row_index,3,datetime.datetime.now().strftime('%Y-%m-%d'))\r\n w_sheet.write(row_index,4,self.date[row_index-1].strftime('%Y-%m-%d'))\r\n w_sheet.write(row_index,5,1)\r\n w_sheet.write(row_index,6,1)\r\n w_sheet.write(row_index,7,self.open_[row_index-1])\r\n w_sheet.write(row_index,8,self.high[row_index-1])\r\n w_sheet.write(row_index,9,self.low[row_index-1])\r\n w_sheet.write(row_index,10,self.close[row_index-1])\r\n w_sheet.write(row_index,11,self.volume[row_index-1])\r\n\r\n workbook.save(xls_filename)", "def google_sheets_connector():\n print(\"Connecting to Google Sheets\")\n scope = ['https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name('key.json', scope)\n client = gspread.authorize(credentials)\n sheet = client.open('backend').sheet1\n return sheet", "def retrieve_company_data(self):\n self.set_stock_sym_append_str('')\n self.set_stock_retrieval_type('all') #'all', watcher\n self.load_stock_symbol_fr_file()", "def getSheet(self, sheet_name):\r\n return self.workbook.Sheets(sheet_name)", "def getAllStocks():\n return pandas.read_excel('D:\\The Fastlane Project\\Coding Projects\\Stock Analysis\\stocks\\stock_data_2.xlsx')", "def _get_spreadsheet(i):\n path = io_mgr.get_parties_spreadsheet(i)\n if not os.path.exists(path):\n raise IOError()\n\n return openpyxl.load_workbook(path, read_only=True)", "def get_excel(exceldocument):\r\n\r\n sheet = xlrd.open_workbook(exceldocument).sheet_by_index(0)\r\n return sheet", "def get_sheet_name(self):\n\n xl = pd.ExcelFile(self.excel_file)\n sheet_names = xl.sheet_names\n for item in sheet_names:\n if re.match('(.*)Current primers', item, re.IGNORECASE): # Only extracts most recent primers.\n sheet_name = item\n return sheet_name", "def multi_sheet(self):\n # Initialize #\n all_sheets = []\n # Loop #\n for name in self.handle.sheet_names:\n sheet = self.handle.parse(name)\n sheet.insert(0, \"nace\", name)\n all_sheets.append(sheet)\n # Write #\n df = pandas.concat(all_sheets)\n df.to_csv(str(self.dest), **self.kwargs)", "def get_sheet_name(self, has_verbal_autopsy):\n source_to_sheet = {\n \"India_MCCD_Orissa_ICD10\": \"India_MCCD_states_ICD10\",\n \"India_MCCD_Delhi_ICD10\": \"India_MCCD_states_ICD10\",\n \"Thailand_Public_Health_Statistics\": \"ICD10_tabulated\",\n \"India_SRS_states_report\": \"India_SRS_states_report\",\n \"UKR_databank_ICD10_tab\": \"ICD10_tabulated\",\n \"Russia_FMD_ICD9\": \"Russia_FMD_1989_1998\",\n \"Iran_Mohsen_special_ICD10\": \"Iran_Mohsen_special_ICD10\"\n }\n if has_verbal_autopsy and (self.source != 'India_SRS_states_report'):\n sheet_name = 'INDEPTH_ICD10_VA'\n elif self.source in source_to_sheet.keys():\n sheet_name = source_to_sheet[self.source]\n else:\n sheet_name = self.code_system\n return sheet_name", "def __call__(self):\n if len(self.handle.sheet_names) > 1: self.multi_sheet()\n else: self.mono_sheet()", "def login_open_sheet(oauth_key_file, spreadsheet):\n try:\n scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\n gc = gspread.authorize(credentials)\n worksheet = gc.open(spreadsheet).sheet1 # pylint: disable=redefined-outer-name\n return worksheet\n except Exception as ex: # pylint: disable=bare-except, broad-except\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, \\\n and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\n print('Google sheet login failed with error:', ex)\n sys.exit(1)", "def open_stock_list(exchange='ALL'):\n if exchange not in ['NYSE', 'NASDAQ'] and exchange != 'ALL':\n raise ValueError(\"Parameter 'exchange' should either NYSE or NASDAQ\")\n\n if exchange == 'ALL': # all tickets\n c1 = open_general('NASDAQ')\n c2 = open_general('NYSE')\n df = _pd.concat([c1, c2], ignore_index=True).drop('Unnamed: 9', axis=1) # drop duplicated column\n else:\n _csv = open_general(exchange)\n df = _csv.drop('Unnamed: 9', axis=1)\n return df", "def save_company_names(self,reload = False):\n #this is a security measure such that the companies can not be reloaded by fault.\n if not reload:\n return\n\n # Get the html of the Wikipedia site to extract the table\n website_url = requests.get(\"https://en.wikipedia.org/w/index.php?title=List_of_S%26P_500_companies&oldid=895655255\").text\n html_site = BeautifulSoup(website_url, 'lxml')\n\n # Extract the table\n SP_Table = html_site.find('table',{'class':'wikitable sortable'})\n \n # Extract the rows of the table\n rows = SP_Table.findAll('tr')\n \n # Extract for each row in rows the second value as this is the wanted symbol\n df = pd.DataFrame(columns=['Symbol', 'FullName', 'CSVName', 'Sector'])\n for row in rows[1:]:\n # Extract the company names\n companyFullName = row.findAll('td')[1].text\n # Extract the company csv names\n companyCSVName = companyFullName.replace('*', ' ')\n # Extract the company symbols\n companySymbol = row.findAll('td')[0].text\n companySymbol = ''.join(companySymbol.split())\n sector = row.findAll('td')[3].text\n df1 = pd.DataFrame([[companySymbol, companyFullName, companyCSVName, sector]], columns=df.columns)\n df = df.append(df1, ignore_index=True)\n \n df['Sector'] = df['Sector'].apply(lambda x: x.replace('\\n', ''))\n df.to_csv(self.PATH_TO_COMPANY_FILES + '/Companies.csv', index=False)\n\n return", "def login_open_sheet(oauth_key_file, spreadsheet):\r\n try:\r\n scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\r\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\r\n gc = gspread.authorize(credentials)\r\n worksheet = gc.open(spreadsheet).sheet1\r\n return worksheet\r\n except Exception as ex:\r\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\r\n print('Google sheet login failed with error:', ex)\r\n print(datetime.datetime.now())\r\n sys.exit(1)", "def load_menu_options():\n workbook_path = get_workbook_path()\n wb = openpyxl.load_workbook(workbook_path)\n sheet_names = wb.sheetnames\n return sheet_names", "def login_open_sheet(oauth_key_file, spreadsheet):\r\n try:\r\n scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\r\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\r\n gc = gspread.authorize(credentials)\r\n worksheet = gc.open(spreadsheet).sheet1\r\n return worksheet\r\n\r\n except Exception as ex:\r\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\r\n print('Google sheet login failed with error:', ex)\r\n sys.exit(1)", "def get_stock_data(name):\n from ._ids import _stock_parts\n\n if name in _stock_parts:\n return read_drive_data(_stock_parts[name])\n else:\n raise IndexError(f\"Uknown company name. {name} was given.\")", "def getAllSheetNames(self):\n\n\t\t\treturn self.thing.get_sheet_names()", "def getActiveSheetName():\n document = Context.getDocument()\n activeSheet = document.getCurrentController().getActiveSheet()\n return activeSheet.getName()", "def login_open_sheet(oauth_key_file, spreadsheet):\n\ttry:\n\t\tjson_key = json.load(open(oauth_key_file))\n\t\tcredentials = SignedJwtAssertionCredentials(json_key['client_email'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tjson_key['private_key'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t['https://spreadsheets.google.com/feeds'])\n\t\tgc = gspread.authorize(credentials)\n\t\tworksheet = gc.open(spreadsheet).sheet1\n\t\treturn worksheet\n\texcept Exception as ex:\n\t\tprint 'Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!'\n\t\tprint 'Google sheet login failed with error:', ex\n\t\tsys.exit(1)", "def sheet(self, name, encoding=None, order_by=None):\n return _ExcelSheet(self, name, encoding, order_by)", "def _get_sheet(self, ws_name):\n return self._spreadsheet.sheet_by_name(ws_name)", "def get_xlsx_report(url, sheet_name):\n r = requests.get(url, verify=False)\n data = pyexcel_xlsx.get_data(io.BytesIO(r.content))\n return data[sheet_name]", "def get_sheet():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('sheets', 'v4', credentials=creds)\n\n # Call the Sheets API\n sheet = service.spreadsheets()\n return sheet", "def get_worksheet(self, workbook):\n for worksheet_name in workbook.sheet_names():\n return workbook.sheet_by_name(worksheet_name)", "def login_open_sheet(oauth_key_file, spreadsheet):\r\n\ttry:\r\n\t\tscope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive','https://www.googleapis.com/auth/spreadsheets','https://www.googleapis.com/auth/drive']\r\n\r\n\t\tjson_key = json.load(open(oauth_key_file))\r\n\t\tcredentials = SignedJwtAssertionCredentials(json_key['client_email'],json_key['private_key'],scope)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t#below line doesn't work anymore so commented, and use the other way to authorize\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t#gc = gspread.authorize(credentials)\r\n\t\t#instead, use one of the 2 below lines\r\n\t\t#gc = gspread.service_account(filename='D:\\Online Classes\\edureka\\Edureka materials\\S3\\code\\iotsheets-276804-a20f837deb72.json')\r\n\t\tgc = gspread.service_account(GDOCS_OAUTH_JSON)\r\n\t\t\r\n\t\tworksheet = gc.open(spreadsheet).sheet1\r\n\t\treturn worksheet\r\n\texcept Exception as ex:\r\n\t\tprint('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\r\n\t\tprint('Google sheet login failed with error:', ex)\r\n\t\tsys.exit(1)", "def get_share_list():\n url = \"https://www1.nseindia.com/content/equities/EQUITY_L.csv\"\n resp = requests.get(url)\n resp = csv_to_list(resp)[1:-1]\n return create_stock(resp)", "def company_share_owned(self, ticker):\n self.__validate_google_credentials()\n sheet = self.service.spreadsheets()\n result = sheet.values().get(spreadsheetId=self.google_spreadsheet_id,\n range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values', [])\n for row in values:\n if ticker == row[1]:\n return True\n return False", "def get_xls(xls_name, sheet_name):\n cls = []\n # get xls file's path\n xlsPath = os.path.join(proDir, \"testFile\", 'case', xls_name)\n # open xls file\n file = open_workbook(xlsPath)\n # get sheet by name\n sheet = file.sheet_by_name(sheet_name)\n # get one sheet's rows\n nrows = sheet.nrows\n for i in range(nrows):\n if sheet.row_values(i)[0] != u'case_name':\n cls.append(sheet.row_values(i))\n return cls", "def getStockList(storeExcel=False, path=None):\n import time\n\n start = time.time()\n stockList = Custom().ScreenerView(columns=[0,1,2,3,4,5,6,7,8,25,30,65,66,67])\n end = time.time()\n\n print('Took {0} Min and {1} Seconds to Query'.format((end - start)//60, (end-start)%60))\n\n if storeExcel:\n stockList.to_excel(path)\n\n return stockList", "def get_sheet(excel_fname, sheet_name=None):\r\n book = xlrd.open_workbook(excel_fname)\r\n\r\n if sheet_name:\r\n\r\n if sheet_name in book.sheet_names():\r\n sheet = book.sheet_by_name(sheet_name)\r\n return sheet\r\n else:\r\n print(\"ERROR: Sheet '{0}' cannot be found in workbook '{1}'\".format(\r\n sheet_name, excel_fname))\r\n sys.exit(1)\r\n\r\n else:\r\n # Get the first worksheet.\r\n sheet = book.sheet_by_index(0)\r\n return sheet", "def main():\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('sheets', 'v4', http=creds.authorize(Http()))\n\n # Call the Sheets API\n SPREADSHEET_ID = '1whfqnqc3TM8ui4hjLqCQq9ZVN5kMuTQrRodXvFreZxM'\n result = service.spreadsheets().get(spreadsheetId = SPREADSHEET_ID).execute()\n spreadsheetUrl = result['spreadsheetUrl']\n\n exportUrl = re.sub(\"\\/edit$\", '/export', spreadsheetUrl)\n headers = { 'Authorization': 'Bearer ' + creds.access_token }\n params = { 'format': 'csv',\n 'gid': 0 } \n queryParams = urllib.urlencode(params)\n url = exportUrl + '?' + queryParams\n response = requests.get(url, headers = headers)\n with open(sys.argv[1], 'wb') as csvFile:\n csvFile.write(response.content)", "def obtenerHoja(libro, nombre):\r\n return libro.sheet_by_name(nombre)", "def load_inventory_sheet(workbook):\n sheets = workbook.get_sheet_names()\n return workbook[sheets[-1]]", "def get_drive_worksheet(spreadsheet_key, worksheet_name):\n gspread = get_authenticated_gspread()\n spreadsheet = gspread.open_by_key(spreadsheet_key)\n return spreadsheet.worksheet(worksheet_name)", "def obtain_stock_names():\n\n url = 'https://en.wikipedia.org/wiki/List_of_S%26P_400_companies'\n stock_names = []\n response = requests.get(url, timeout=5)\n content = BeautifulSoup(response.content, \"html.parser\")\n\n # We get stock_names from the web page\n for stock in content.findAll('a', attrs={\"class\": \"external text\"}):\n if(len(stock.text)<=5):\n stock_names.append(stock.text)\n\n # We persist the Stock Names\n save_dir = Path(__file__).parent.parent\n filename = (save_dir / \"../data/stock_names.joblib\").resolve()\n PersistenceAPI.persist_stock_data(stock_names, filename)\n\n return stock_names", "def get_sheetsclient(config, project=\"cscap\"):\n return get_googleapiclient(config, project, \"sheets\", \"v4\")", "def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)", "def sheets(self):\n result = []\n recordset = self.connection.OpenSchema(20)\n while not recordset.EOF:\n result.append(recordset.Fields[2].Value)\n recordset.MoveNext()\n recordset.Close()\n del recordset\n return result", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def openExcelSheet(outputFileName):\n workbook = Workbook()\n worksheet = workbook.add_sheet(\"Sheet 1\")\n return workbook, worksheet", "def connect_book(self):\n \n print('Connecting to excel workbook... ',end=''),\n self.book = xw.Book(self.file_path)\n \n try: self.sheet = self.book.sheets[self.sheet]\n except:\n while True:\n sheet_name = input(\"\"\"\\tSheet name: \"\"\") or 'Sheet1'\n try:\n self.sheet = self.book.sheets[sheet_name]\n break\n except: #forgive me, Guido, for I have sinned\n print('\\tNo matching sheet name, please try again.')\n continue\n \n print('Connected')", "def get_company_cik(stock_ticker: str):\n company_cik = sec_finance_functions.get_company_data_by_ticker(stock_ticker).company_cik\n return company_cik", "def get_worksheet(spreadsheet, name, create_if_non_existant=True, creation_func=None):\n for worksheet in spreadsheet.worksheets():\n if worksheet.title == name:\n return worksheet\n if create_if_non_existant:\n worksheet = spreadsheet.add_worksheet(title=name, rows=\"300\", cols=\"10\")\n if creation_func:\n creation_func(worksheet)\n return worksheet\n return None", "def spreadsheets(self):\r\n return resource.Spreadsheets(self)", "def spreadsheet(self, key):\r\n return resource.Spreadsheet(self, key)", "def get_info():\r\n\r\n path = \"data.xlsx\" # change path depending on the name and location of the file\r\n xl_book = xlrd.open_workbook(path)\r\n xl_sheet = xl_book.sheet_by_index(0) # selects the first sheet in the spreadsheet\r\n emails = xl_sheet.col_values(1, 1) # emails are in second column\r\n names = xl_sheet.col_values(0, 1) # client names are in first column\r\n return emails, names", "def test_open_order_sheet(self):\n order_processor = OrderProcessor()\n order_processor.open_order_sheet('COMP_3522_A4_orders.xlsx')\n self.assertTrue(self, isinstance(order_processor.orders_data_frame,\n DataFrame))", "def setUp(self):\n wb = open_workbook(filename=self.filename)\n\n self.port_values = {}\n\n # find sheets that contain cash\n sheet_names = wb.sheet_names()\n for sn in sheet_names:\n if len(sn) > 4 and sn[-4:] == '-BOC':\n # print('read from sheet {0}'.format(sn))\n ws = wb.sheet_by_name(sn)\n read_cash(ws, self.port_values)", "def add_headings(self,from_date,to_date,warehouses,locations,workbook,header_bold,body_style,qty_cell_style,value_style,blank_cell_style):\n sheet_data={}\n row_data={}\n if warehouses:\n for warehouse in warehouses:\n warehouse.name_worksheet = workbook.add_sheet(warehouse.name,cell_overwrite_ok=True)\n warehouse.name_worksheet.row(0).height = 400\n warehouse.name_worksheet.row(1).height = 400\n warehouse.name_worksheet.row(3).height = 400\n warehouse.name_worksheet.col(0).width = 8000\n warehouse.name_worksheet.col(1).width = 10000\n warehouse.name_worksheet.col(2).width = 3000\n warehouse.name_worksheet.col(3).width = 3000\n warehouse.name_worksheet.col(4).width = 1200\n warehouse.name_worksheet.col(5).width = 5000\n warehouse.name_worksheet.col(6).width = 5000\n warehouse.name_worksheet.col(7).width = 5000\n warehouse.name_worksheet.col(8).width = 7000\n warehouse.name_worksheet.col(9).width = 7000\n warehouse.name_worksheet.col(10).width = 5000\n warehouse.name_worksheet.col(11).width = 1200\n warehouse.name_worksheet.col(12).width = 7000\n warehouse.name_worksheet.col(13).width = 7000\n warehouse.name_worksheet.col(14).width = 1200\n warehouse.name_worksheet.col(15).width = 6000\n warehouse.name_worksheet.col(16).width = 6000\n warehouse.name_worksheet.write(0,0,'From Date',header_bold)\n warehouse.name_worksheet.write(0,1,from_date,body_style)\n warehouse.name_worksheet.write(1,0,'To Date',header_bold)\n warehouse.name_worksheet.write(1,1,to_date,body_style)\n warehouse.name_worksheet.write(3,0,'Internal Reference ',header_bold)\n warehouse.name_worksheet.write(3,1,'Name',header_bold)\n warehouse.name_worksheet.write(3,2,'Cost',header_bold)\n warehouse.name_worksheet.write(3,3,'Sales Price',header_bold)\n warehouse.name_worksheet.write(3,4,None,blank_cell_style)\n warehouse.name_worksheet.write(3,5,'Opening Stock',header_bold)\n warehouse.name_worksheet.write(3,6,'Purchase in period',header_bold)\n warehouse.name_worksheet.write(3,7,'Sales in Period',header_bold)\n warehouse.name_worksheet.write(3,8,'Discarded in Period(OUT)',header_bold)\n warehouse.name_worksheet.write(3,9,'Adjusted in Period(IN)',header_bold)\n warehouse.name_worksheet.write(3,10,'Closing Stock',header_bold)\n warehouse.name_worksheet.write(3,11,None,blank_cell_style)\n warehouse.name_worksheet.write(3,12,'Warehouse Transfer(IN)',header_bold)\n warehouse.name_worksheet.write(3,13,'Warehouse Transfer(OUT)',header_bold)\n warehouse.name_worksheet.write(3,14,None,blank_cell_style)\n warehouse.name_worksheet.write(3,15,'Last purchase',header_bold)\n warehouse.name_worksheet.write(3,16,'Last sale',header_bold)\n warehouse.name_worksheet.set_panes_frozen(True)\n warehouse.name_worksheet.set_horz_split_pos(4) \n warehouse.name_worksheet.set_vert_split_pos(2)\n sheet_data.update({warehouse.id: warehouse.name_worksheet})\n row_data.update({warehouse.name_worksheet: 4})\n if locations:\n for location in locations:\n location.name_worksheet = workbook.add_sheet(location.name,cell_overwrite_ok=True)\n location.name_worksheet.row(0).height = 400\n location.name_worksheet.row(1).height = 400\n location.name_worksheet.row(3).height = 400\n location.name_worksheet.col(0).width = 8000\n location.name_worksheet.col(1).width = 10000\n location.name_worksheet.col(2).width = 3000\n location.name_worksheet.col(3).width = 3000\n location.name_worksheet.col(4).width = 1200\n location.name_worksheet.col(5).width = 5000\n location.name_worksheet.col(6).width = 5000\n location.name_worksheet.col(7).width = 5000\n location.name_worksheet.col(8).width = 7000\n location.name_worksheet.col(9).width = 7000\n location.name_worksheet.col(10).width = 5000\n location.name_worksheet.col(11).width = 1200\n location.name_worksheet.col(12).width = 7000\n location.name_worksheet.col(13).width = 7000\n location.name_worksheet.col(14).width = 1200\n location.name_worksheet.col(15).width = 6000\n location.name_worksheet.col(16).width = 6000\n location.name_worksheet.write(0,0,'From Date',header_bold)\n location.name_worksheet.write(1,0,'To Date',header_bold)\n location.name_worksheet.write(0,1,from_date,body_style)\n location.name_worksheet.write(1,1,to_date,body_style)\n location.name_worksheet.write(3,0,'Internal Reference ',header_bold)\n location.name_worksheet.write(3,1,'Name',header_bold)\n location.name_worksheet.write(3,2,'Cost',header_bold)\n location.name_worksheet.write(3,3,'Sales Price',header_bold)\n location.name_worksheet.write(3,4,None,blank_cell_style)\n location.name_worksheet.write(3,5,'Opening Stock',header_bold)\n location.name_worksheet.write(3,6,'Purchase in period',header_bold)\n location.name_worksheet.write(3,7,'Sales in Period',header_bold)\n location.name_worksheet.write(3,8,'Discarded in Period(OUT)',header_bold)\n location.name_worksheet.write(3,9,'Adjusted in Period(IN)',header_bold)\n location.name_worksheet.write(3,10,'Closing Stock',header_bold)\n location.name_worksheet.write(3,11,None,blank_cell_style)\n location.name_worksheet.write(3,12,'Warehouse Transfer(IN)',header_bold)\n location.name_worksheet.write(3,13,'Warehouse Transfer(OUT)',header_bold)\n location.name_worksheet.write(3,14,None,blank_cell_style)\n location.name_worksheet.write(3,15,'Last purchase',header_bold)\n location.name_worksheet.write(3,16,'Last sale',header_bold)\n location.name_worksheet.set_panes_frozen(True)\n location.name_worksheet.set_horz_split_pos(4) \n location.name_worksheet.set_vert_split_pos(2)\n sheet_data.update({location.id: location.name_worksheet})\n row_data.update({location.name_worksheet: 4})\n return workbook,sheet_data,row_data", "def load_sheet(sheet_name):\n workbook_path = get_workbook_path()\n wb = openpyxl.load_workbook(workbook_path)\n sheet_obj = wb[sheet_name]\n return sheet_obj, wb", "def __init__(self):\n # Param\n ## self.target_stocks use mainly for a few stocks.\n ## it also use when setting the 45 or 50 stocks at a time to url\n self.target_stocks = ['S58.SI','S68.SI'] ##special character need to be converted\n self.full_stocklist_to_retrieve = [] #full range fo stocks\n \n # for difffernt retrieval, based on the dict available to select the file type\n # currently have \"watcher\", \"all\" where watcher is the selected stocks to watch.\n self.stock_retrieval_type = 'watcher' \n\n ## current data .csv file url formation\n #header to match the sequence of the formed url\n self.cur_quotes_parm_headers = ['NAME', 'SYMBOL', 'LATEST_PRICE', 'OPEN', 'CLOSE','VOL',\n 'YEAR_HIGH','YEAR_LOW'] #label to be use when downloading.\n \n # URL forming for price details\n self.cur_quotes_start_url = \"http://download.finance.yahoo.com/d/quotes.csv?s=\"\n self.cur_quotes_stock_portion_url = ''\n self.cur_quotes_stock_portion_additional_url = '.SI'# for adding additonal str to the stock url.\n self.cur_quotes_property_portion_url = ''\n self.cur_quotes_property_str = 'nsl1opvkj' #default list of properties to copy.\n self.cur_quotes_end_url = \"&e=.csv\"\n self.cur_quotes_full_url = ''\n\n # Properties from excel\n self.enable_form_properties_fr_exceltable = 1\n self.properties_excel_table = r'C:\\pythonuserfiles\\yahoo_finance_data_extract\\Individual_stock_query_property.xls'\n\n # Output storage\n self.cur_quotes_csvfile = r'c:\\data\\temp\\stock_data.csv'\n self.cur_quotes_df = object()\n\n ## !!!\n self.cur_quotes_url_list = [] # store of all the url list being query. For debug.\n\n # for debug/printing\n self.store_individual_set_df = []\n self.__print_url = 0 # for printing the url string\n\n # input file path\n # dict based on the file for different type of retrieval\n self.retrieval_type_input_file_dict = {\n \"all\" : r'C:\\pythonuserfiles\\yahoo_finance_data_extract\\stocklist.csv',\n \"watcher\": r'c:\\data\\google_stock_screener.csv'\n }", "def create_all_inventory_sheet(self,from_date,to_date,workbook,sheet_data,row_data,body_style,qty_cell_style,value_style,blank_cell_style,header_bold):\n worksheet_all_stock_rotation = workbook.add_sheet('All Stock Rotation',cell_overwrite_ok=True)\n worksheet_all_stock_rotation.row(0).height = 400\n worksheet_all_stock_rotation.row(1).height = 400\n worksheet_all_stock_rotation.row(3).height = 400\n worksheet_all_stock_rotation.col(0).width = 8000\n worksheet_all_stock_rotation.col(1).width = 10000\n worksheet_all_stock_rotation.col(2).width = 3000\n worksheet_all_stock_rotation.col(3).width = 3000\n worksheet_all_stock_rotation.col(4).width = 1200\n worksheet_all_stock_rotation.col(5).width = 5000\n worksheet_all_stock_rotation.col(6).width = 5000\n worksheet_all_stock_rotation.col(7).width = 5000\n worksheet_all_stock_rotation.col(8).width = 7000\n worksheet_all_stock_rotation.col(9).width = 7000\n worksheet_all_stock_rotation.col(10).width = 5000\n worksheet_all_stock_rotation.col(11).width = 1200\n worksheet_all_stock_rotation.col(12).width = 7000\n worksheet_all_stock_rotation.col(13).width = 7000\n worksheet_all_stock_rotation.col(14).width = 1200\n worksheet_all_stock_rotation.col(15).width = 6000\n worksheet_all_stock_rotation.col(16).width = 6000\n worksheet_all_stock_rotation.write(0,0,'From Date',header_bold)\n worksheet_all_stock_rotation.write(0,1,from_date,body_style)\n worksheet_all_stock_rotation.write(1,0,'To Date',header_bold)\n worksheet_all_stock_rotation.write(1,1,to_date,body_style)\n worksheet_all_stock_rotation.write(3,0,'Internal Reference ',header_bold)\n worksheet_all_stock_rotation.write(3,1,'Name',header_bold)\n worksheet_all_stock_rotation.write(3,2,'Cost',header_bold)\n worksheet_all_stock_rotation.write(3,3,'Sales Price',header_bold)\n worksheet_all_stock_rotation.write(3,4,None,blank_cell_style)\n worksheet_all_stock_rotation.write(3,5,'Opening Stock',header_bold)\n worksheet_all_stock_rotation.write(3,6,'Purchase in period',header_bold)\n worksheet_all_stock_rotation.write(3,7,'Sales in Period',header_bold)\n worksheet_all_stock_rotation.write(3,8,'Discarded in Period(OUT)',header_bold)\n worksheet_all_stock_rotation.write(3,9,'Adjusted in Period(IN)',header_bold)\n worksheet_all_stock_rotation.write(3,10,'Closing Stock',header_bold)\n worksheet_all_stock_rotation.write(3,11,None,blank_cell_style)\n worksheet_all_stock_rotation.write(3,12,'Warehouse Transfer(IN)',header_bold)\n worksheet_all_stock_rotation.write(3,13,'Warehouse Transfer(OUT)',header_bold)\n worksheet_all_stock_rotation.write(3,14,None,blank_cell_style)\n worksheet_all_stock_rotation.write(3,15,'Last purchase',header_bold)\n worksheet_all_stock_rotation.write(3,16,'Last sale',header_bold)\n worksheet_all_stock_rotation.set_panes_frozen(True)\n worksheet_all_stock_rotation.set_horz_split_pos(4) \n worksheet_all_stock_rotation.set_vert_split_pos(2)\n return workbook,worksheet_all_stock_rotation", "def getClient(self):\r\n client = SpreadsheetsService()\r\n\r\n try:\r\n client.GetWorksheetsFeed(self.spreadsheet_key, visibility='public',\r\n projection='basic')\r\n except gaierror:\r\n client = None\r\n\r\n return client", "def show_book_content(workbook = None):\n if not workbook:\n return\n \n print \"work sheet info:\"\n print \"#, name, #rows, #cols\"\n for casename in workbook.sheet_names():\n sheet = workbook.sheet_by_name(casename)\n print sheet.number, sheet.name, sheet.nrows, sheet.ncols", "def FetchSpreadsheetFeeds(client, key, sheets, cols):\n worksheets_feed = client.GetWorksheetsFeed(key)\n print 'Fetching data from the worksheet: %s' % worksheets_feed.title.text\n worksheets_data = {}\n titles = []\n for entry in worksheets_feed.entry:\n worksheet_id = entry.id.text.split('/')[-1]\n list_feed = client.GetListFeed(key, worksheet_id)\n list_data = []\n # Hack to deal with sheet names like 'sv (Copy of fl)'\n title = list_feed.title.text.split('(')[0].strip()\n titles.append(title)\n if title not in sheets:\n continue\n print 'Reading data from the sheet: %s' % list_feed.title.text\n for i, entry in enumerate(list_feed.entry):\n line_data = {}\n for k in entry.custom:\n if (k not in cols) or (not entry.custom[k].text):\n continue\n line_data[k] = entry.custom[k].text\n list_data.append(line_data)\n worksheets_data[title] = list_data\n PrintDiffs('Exist only on the spreadsheet: ', titles, sheets)\n PrintDiffs('Specified but do not exist on the spreadsheet: ', sheets, titles)\n return worksheets_data", "def _initialize_quotes_spreadsheet(self, spreadsheet_name):\n gc = gspread.authorize(self.credentials)\n sheet = gc.open(spreadsheet_name)\n sheet.worksheets() # Necessary to remind gspread that Sheet1 exists, otherwise gpsread forgets about it\n\n try:\n qs = sheet.worksheet('Quotes')\n except gspread.exceptions.WorksheetNotFound:\n qs = sheet.add_worksheet('Quotes', 1000, 2)\n sheet1 = sheet.worksheet('Sheet1')\n sheet.del_worksheet(sheet1)\n\n qs.update_acell('A1', 'Quote Index')\n qs.update_acell('B1', 'Quote')\n\n # self.update_quote_spreadsheet()", "def excel_out(employees_dict, path):\n # Create workbook and worksheet\n try:\n workbook = xlsxwriter.Workbook(path)\n except:\n return False\n worksheet = workbook.add_worksheet(name='Прокуратура')\n # Add format to workbook\n format_headers_po = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 14,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFCA28',\n 'border': 2})\n format_headers_department = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 13,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFD54F',\n 'border': 2})\n format_headers_division = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFE082',\n 'border': 2})\n format_header = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFF59D',\n 'border': 2})\n employee_format_b = workbook.add_format( {'align': 'left',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'border': 2})\n employee_format = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'border': 2})\n format_attribute = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'font_size': 10,\n 'font_name': 'Times New Roman',\n 'border': 1})\n\n # Set width of columns and height of rows\n worksheet.set_default_row(40, False)\n worksheet.set_column(0, 0, 5)\n worksheet.set_column(1, 1, 25)\n worksheet.set_column(2, 2, 21)\n worksheet.set_column(3, 3, 21)\n worksheet.set_column(4, 4, 21)\n\n # Begin from row\n row = 0\n\n # Parser for employees dictionary\n for po in employees_dict:\n # Прокуратура\n worksheet.merge_range(row, 0, row, 4, data=po.name, cell_format=format_headers_po)\n row += 1\n # Атрибуты Прокуратуры\n row = add_attribute(po, worksheet, row, format_attribute)\n # Header\n row = add_header(worksheet, row, format_header)\n # Работники Прокуратуры\n if 'employees' in employees_dict[po]:\n for num, employee in enumerate(employees_dict[po]['employees'], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n\n # Управление\n if 'departments' in employees_dict[po]:\n for department in employees_dict[po]['departments']:\n worksheet.merge_range(row, 0, row, 4, data=department.name, cell_format=format_headers_department)\n row += 1\n # Атрибуты Управления\n row = add_attribute(department, worksheet, row, format_attribute)\n # Работники Управления\n if 'employees' in employees_dict[po]['departments'][department]:\n for num, employee in enumerate(employees_dict[po]['departments'][department]['employees'], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n # Отдел Управления\n if 'divisions' in employees_dict[po]['departments'][department]:\n for division in employees_dict[po]['departments'][department]['divisions']:\n worksheet.merge_range(row, 0, row, 4, data=division.name, cell_format=format_headers_division)\n row += 1\n # Атрибуты Отдела\n row = add_attribute(division, worksheet, row, format_attribute)\n # Работники Отдела\n for num, employee in enumerate(employees_dict[po]['departments'][department]['divisions'][division], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n\n # Отдел Прокуратуры\n if 'divisions' in employees_dict[po]:\n for division in employees_dict[po]['divisions']:\n worksheet.merge_range(row, 0, row, 4, data=division.name, cell_format=format_headers_division)\n row += 1\n # Атрибуты Отдела\n row = add_attribute(division, worksheet, row, format_attribute)\n # Работники Отдела\n for num, employee in enumerate(employees_dict[po]['divisions'][division], 1):\n row += add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n try:\n workbook.close()\n except:\n return False\n return True", "def get_worksheets(self) -> list or None:\n if self.connected:\n cursor = self.workbook.cursor()\n if cursor:\n worksheet_names = []\n for table in cursor.tables():\n worksheet_names.append(table['table_name'])\n cursor.close()\n return worksheet_names\n return None", "def get_sheet_by_name(book, name):\n i = 0\n for sheetname in book.sheetnames:\n if sheetname == name:\n return book.worksheets[i]\n i += 1\n raise ValidationError(_(\"'%s' sheet not found\") % (name,))", "def read_companies():\n list_of_companies = data_manager.get_data_from_file(filename=\"company/company_data.csv\")\n return list_of_companies", "def front_sheet(entry, village=False, nurses=False):\n printing_resident_sheets(entry, rf'{constants.OUTPUTS_DIR}\\front_sheet.xlsx')\n printing_documents.create_front_sheet(village=village, nurses=nurses)", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response", "def sheet_name(self, name):\n if self.sheet:\n self._newline\n self._cell('')\n\n self._cell(name)\n self.sheet = name", "def readSpreadsheet(url: str, cres_loc: str, alias:str):\n # gc = gspread.oauth() # oauth config, TODO (northdpole): make this configurable\n changes_present = False\n try:\n gc = gspread.service_account()\n sh = gc.open_by_url(url)\n logger.debug(\"accessing spreadsheet \\\"%s\\\" : \\\"%s\\\"\"%(alias,url))\n for wsh in sh.worksheets():\n if wsh.title[0].isdigit():\n logger.debug(\n \"handling worksheet %s (remember, only numbered worksheets will be processed by convention)\" % wsh.title)\n records = wsh.get_all_records()\n toyaml = yaml.safe_load(yaml.dump(records))\n try:\n validateYaml(yamldoc=toyaml, schema=CRE_LINK_schema)\n logger.debug(\"Worksheet is valid, saving to disk\")\n with open(os.path.join(cres_loc, wsh.title + \".yaml\"), \"wb\") as fp:\n fp.write(yaml.dump(toyaml, encoding='utf-8'))\n changes_present = True\n except jsonschema.exceptions.ValidationError as ex:\n logger.error(wsh.title + \" failed validation\")\n logger.error(ex)\n except gspread.exceptions.APIError as ae:\n logger.error(\"Error opening spreadsheet \\\"%s\\\" : \\\"%s\\\"\"%(alias,url))\n logger.error(ae)\n return changes_present", "def home_office(ctx, year=CURRENT_YEAR):\n ss = open_spreadsheet('Home Office %s' % year)\n\n worksheet = ss.worksheet('Monthly fees')\n categories = defaultdict(Decimal)\n\n for row in worksheet.get_all_records():\n categories['hoa assessments'] += get_decimal(row['hoa assessments'])\n categories['homeowners insurance'] += get_decimal(row['homeowners insurance'])\n categories['mortgage'] += get_decimal(row['mortgage'])\n categories['utilities (gas & electric)'] += \\\n get_decimal(row['electric']) + get_decimal(row['gas'])\n\n data = [(k.capitalize(), v) for k, v in categories.items()]\n\n data += [\n (f'Total for {year}', sum(categories.values())),\n (f'Office rent for {year}', sum(categories.values()) / 4),\n ('Repairs & maintenance', get_rm_total(ss)),\n ]\n table = AsciiTable(data, 'Home office')\n table.inner_heading_row_border = False\n print(table.table)", "def get_sheet():\n\n with open(\"survey.csv\", \"r\") as f:\n reader = csv.reader(f)\n users = list(reader)\n return render_template(\"userslist.html\", users=users)", "def set_amiSheetNames(self):\n\n self.pres_sheetname = None\n self.edit_sheetname = None\n self.notransfer_sheetname = None\n\n for sheet in self.wb.sheet_names():\n sheet_lower = sheet.lower()\n #Check if two sheets get identfied by regex below?\n if re.match(\"(original|preservation|file|full|archive)\",\n sheet_lower):\n self.pres_sheetname = sheet\n elif re.match(\"edit\", sheet_lower):\n self.edit_sheetname = sheet\n elif re.match(\"not transferred\", sheet_lower):\n self.notransfer_sheetname = sheet", "def get_balance_sheet(api_key, ticker, period, ftype):\n \n settings.set_apikey(api_key)\n df = cv.balance_sheet(ticker = ticker, period = period, ftype = ftype)\n return df", "def get_repo_names(sheet, json_key):\n # Use creds to create a client to interact with the Google Drive API\n scope = ['https://spreadsheets.google.com/feeds']\n creds = ServiceAccountCredentials.from_json_keyfile_name(json_key, scope)\n client = gspread.authorize(creds)\n \n \n # Load repos from the spreadsheet\n try:\n records = client.open(sheet).get_worksheet(0).get_all_records()\n rtrn = list({rec[\"repo_name\"] for rec in records if rec[\"use_repo\"] == 1})\n rtrn.sort()\n print(\"Got %s repos.\" %(len(rtrn)))\n return rtrn\n except gspread.exceptions.SpreadsheetNotFound as e:\n print(\"\\nSpreadsheet not found. Did you share the sheet with the client email in your JSON oauth file?\")\n all_sheets = client.openall()\n for s in all_sheets:\n print(s.title)\n print(\"\\n\")\n raise e", "def write_one_sheet(self, key):\n # Get sheet #\n sheet = self.writer.sheets[key]\n # Get dataframes #\n all_dfs = self.sheet_to_dfs[key]\n # Initialize #\n row = 0\n # Loop #\n for info in all_dfs:\n # Get dataframe #\n df = info['dataframe']\n # Write custom title #\n sheet.write_string(row, 0, info.get('title', ''))\n row += 2\n # Add extras #\n df.index.name = info.get('y_extra', '')\n df.columns.name = info.get('x_extra', '')\n # Add Y labels #\n title, label = info.get('y_title', ''), info.get('y_label', '')\n df = pandas.concat({title: df}, names=[label])\n # Add X labels #\n title, label = info.get('x_title', ''), info.get('x_label', '')\n df = pandas.concat({title: df}, names=[label], axis=1)\n # Write dataframe #\n df.to_excel(self.writer,\n sheet_name = key,\n startrow = row,\n startcol = self.indentation)\n # Increment #\n row += len(df.index) + self.spacing", "def gsheet_handler(spread_workbook:str, sheet_name:str,path_to_credentials:str('super_user.json'), method='Read',action = 'append_rows',is_add_sheet=False, df=None,row_cutoff=0,col_cutoff=0,keep_headers=False):\n \n scopes = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive',\n 'https://www.googleapis.com/auth/spreadsheets']\n \n credentials = ServiceAccountCredentials.from_json_keyfile_name(path_to_credentials, scopes=scopes)\n gsc = gspread.authorize(credentials)\n ws = gsc.open(spread_workbook)\n \n if is_add_sheet ==False:\n \n # Get existing sheet data\n wb = ws.worksheet(sheet_name)\n wb_df = wb.get_all_records()\n wb_df = pd.DataFrame.from_dict(wb_df)\n \n if wb_df is not None:\n n_row = wb_df.shape[0] \n n_col = wb_df.shape[1]\n \n if method == 'Read':\n \n return wb_df\n \n elif (method =='write') & (is_add_sheet):\n wb = ws.add_worksheet(rows=10000,cols=100,title=sheet_name)\n gd.set_with_dataframe(wb,df,include_column_header= keep_headers)\n \n elif (method =='write') & (action == 'refresh_sheet'):\n wb.clear()\n gd.set_with_dataframe(wb,df,row=1+row_cutoff,include_column_header=keep_headers) \n \n elif (method =='write') & (action == 'append_rows'):\n gd.set_with_dataframe(wb,df,row=n_row+1+row_cutoff,include_column_header=keep_headers) \n \n elif (method =='write') & (action == 'append_columns'):\n gd.set_with_dataframe(wb,df,col=n_col+1+col_cutoff,include_column_header=keep_headers) \n \n else:\n print(\"None action are performed\")\n \n return wb", "def get_worksheet(sheet_id, sheet_name):\n if (sheet_id, sheet_name) in WORKSHEET_CACHE:\n return WORKSHEET_CACHE[(sheet_id, sheet_name)]\n\n sheet = get_spreadsheet(sheet_id)\n worksheet = sheet.worksheet(sheet_name)\n\n WORKSHEET_CACHE[(sheet_id, sheet_name)] = worksheet\n return worksheet", "def getCompanyName(self, stockSymbol):\n return self.db.select_company_name(stockSymbol)", "def mono_sheet(self):\n xls = pandas.read_excel(str(self.source))\n xls.to_csv(str(self.dest), **self.kwargs)", "def init_worksheet(SPREADSHEET_ID, ws_name):\n\n creds = oauth_file.Storage(f\"{os.environ['HOME']}/token.json\").get()\n gc = gspread.authorize(creds)\n wb = gc.open_by_key(SPREADSHEET_ID)\n\n try:\n ws = wb.worksheet(ws_name)\n except WorksheetNotFound:\n ws = wb.add_worksheet(ws_name, rows=1, cols=1)\n return ws", "def get_company_info(company_name):\n\n # Fix formatting of name\n co = company_name.replace(\".\", \"\").replace(\" \", \"%20\")\n\n query = f\"http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={co}\\\n &region=1&lang=en&callback=YAHOO.Finance.SymbolSuggest.ssCallback\"\n\n response = requests.get(query)\n\n fdata = response.text.split(\"(\", 1)[1]\n fdata = fdata.rsplit(\")\", 1)[0]\n data = json.loads(fdata)\n yahoo_json = data[\"ResultSet\"][\"Result\"]\n\n return yahoo_json", "def read_xls_csv(self):\n filename = str(self.filename)\n location_stock_id = self.location\n vals = []\n inventory_create = self.env['stock.inventory']\n\n if (filename.endswith('xls') or filename.endswith('xlsx')):\n wb = xlrd.open_workbook(\n file_contents=base64.decodestring(self.xls_file))\n sheet = wb.sheet_by_index(0)\n\n for i in range(1, sheet.nrows):\n row = sheet.row_values(i)\n firstrow = sheet.row_values(0)\n firstrow = [str(item).lower() for item in firstrow]\n pid = row[firstrow.index('id')]\n quantity = row[firstrow.index('quantity')]\n product_obj = self.env['product.product'].search(\n [('id', '=', pid)])\n vals.append({\n 'product_code': product_obj.default_code,\n 'product_qty': quantity,\n 'location_id': location_stock_id.id,\n 'product_id': product_obj.id\n })\n inv = inventory_create.create({'name': self.inventory_name,\n 'location_id': location_stock_id.id,\n 'filter': 'partial'})\n stock_inventory_line = self.env['stock.inventory.line']\n # inv.prepare_inventory()\n for record in vals:\n record.update({'inventory_id': inv.id})\n stock_inventory_line.create(record)\n inv.action_done()\n\n else:\n xls_file = base64.b64decode(self.xls_file)\n file_input = cStringIO.StringIO(xls_file)\n file_input.seek(0)\n rows = []\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n for row in reader:\n rows.append(row)\n for row in rows[1:]:\n rows[0] = [str(item).lower() for item in rows[0]]\n product_obj = self.env['product.product'].search(\n [('id', '=', row[rows[0].index('id')])])\n vals.append({\n 'product_code': row[rows[0].index('id')],\n 'product_qty': row[rows[0].index('quantity')],\n 'location_id': location_stock_id.id,\n 'product_id': product_obj.id\n })\n inv = inventory_create.create({'name': self.inventory_name,\n 'location_id': location_stock_id.id,\n 'filter': 'partial'})\n stock_inventory_line = self.env['stock.inventory.line']\n # inv.prepare_inventory()\n for record in vals:\n record.update({'inventory_id': inv.id})\n stock_inventory_line.create(record)\n inv.action_done()\n return {\n 'name': 'Stock import',\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'res_id': self.id,\n 'view_mode': 'tree,form',\n 'res_model': 'stock.inventory',\n 'target': 'current',\n }", "def lectxl(NOM):\n #NOM=input(\"nom du fichier:\")#interactif\n #NOM=str(NOM +\".xlsx\")\n workbook = xlrd.open_workbook(NOM)\n SheetNameList = workbook.sheet_names()\n worksheet = workbook.sheet_by_name(SheetNameList[0])\n num_rows = worksheet.nrows \n f=[NOM]\n for curr_row in range(0,num_rows):\n row = worksheet.row(curr_row)\n f.append(row)\n return f", "def get_stock_data(company, start_date_inc, stop_date_inc):\n\n api_key = 'Bo9P_cJnmf5EsQPp1Bdp'\n desired_cols = 'date,close'\n\n# ticker = 'FB'\n# start_date_inc = '20170801'\n# end_date_inc = '20170831'\n\n # format and send the request\n payload = {\n 'date.gte': start_date_inc,\n 'date.lte': stop_date_inc,\n 'ticker': company,\n 'qopts.columns': desired_cols,\n 'api_key': api_key\n }\n meta_url = r'https://www.quandl.com/api/v3/datatables/WIKI/PRICES'\n r = requests.get(meta_url, params=payload)\n\n # convert to a pandas dataframe\n df = pd.DataFrame(r.json()['datatable']['data'])\n if not df.empty:\n df.columns = ['date', 'price']\n df['date'] = pd.to_datetime(df['date'])\n\n return df", "def sheets_service() -> object:\n g_sheets_service = build('sheets', 'v4', credentials=google_creds())\n\n return g_sheets_service", "def getCruXl(session: Session, url: str) -> None:\n \n with session as s:\n try:\n res = s.get(url)\n assert res.status_code == 200\n \n month = datetime.now().strftime(\"%B\").lower()\n year = datetime.now().strftime(\"%Y\")\n yyyy_mm_dd = datetime.now().strftime(\"%Y-%m-%d\")\n\n found = False \n # Check if the current month's Excel file exists:\n for link in res.html.absolute_links:\n if f\"steel-monitor-{month}-{year}-prices.xlsx\" in link:\n found = True\n excel_url = link\n else:\n pass\n print(f\"Downloading Excel file from: {excel_url}\")\n\n # If current month's Excel file is not found, then get the url for previous month\n if not found:\n prev_month = (datetime.now() + relativedelta(months=-1)).strftime(\"%B\").lower()\n prev_year = (datetime.now() + relativedelta(months=-1)).strftime(\"%Y\")\n for link in res.html.absolute_liks:\n if f\"steel-monitor-{prev_month}-{prev_year}-prices.xlsx\" in link:\n excel_url = link\n print(f\"Downloading Excel file from: {excel_url}\")\n\n # Make a REQUEST for the Excel file\n res_xl = s.get(excel_url, stream=True)\n assert res_xl.status_code == 200\n\n print('Checking if storage location exists...')\n save_directory = Path('data')\n\n # Check if \"data\" folder exists: if not, do nothing, else save Excel file\n if not save_directory.exists():\n print('Storage location does not exist. Excel file will not be downloaded.')\n else:\n print('Storage location found. Downloading Excel file...')\n # Stream the Excel file 1MB at a time (\"lazy loading\") to the file system\n with open(save_directory / f\"cru_steel_prices-{yyyy_mm_dd}.xlsx\",\"wb\") as excel:\n for chunk in res_xl.iter_content(chunk_size=1024):\n # writing one chunk at a time to excel file\n if chunk:\n excel.write(chunk)\n print(\"Finished downloading Excel file\")\n except Exception as e:\n print(\"Error - Could not download the Excel file: \", e)\n finally:\n # Log out of the site\n r = session.get('https://cruonline.crugroup.com/logout')\n assert r.status_code == 200", "def one_row_worksheet(scope=\"class\"):\n\n return worksheet.Worksheet(\n [{\"Date\": \"May 2 ,2021\", \"Activity\": \"Play Games!\", \"Leader\": \"RandomPerson1\"}],\n 0)", "def get_sheet_names(file_path):\n with open_xlsb(file_path) as wb:\n return wb.sheets", "def get_company_name(self):\n\t\treturn call_sdk_function('PrlLic_GetCompanyName', self.handle)", "def _open_data_source(self, *args):\n if len(args) != 0:\n # For first call to open (open())\n self.ds_filename = args[0]\n self.ds_tablename = args[1]\n self.ds_file = load_workbook(filename = args[0], use_iterators = True)\n self.ds_table = self.ds_file.get_sheet_by_name(name = args[1])\n else:\n # For reopening the file (reset())\n self.ds_file = load_workbook(filename = self.ds_filename, use_iterators = True)\n self.ds_table = self.ds_file.get_sheet_by_name(name = self.ds_tablename)\n # In any case we need a reader object to iterate over the table content \n self.ds_reader = self.ds_table.iter_rows()", "def create_sheet(self):\n workbook = xlwt.Workbook()\n borders = Borders()\n header_border = Borders()\n header_border.left,header_border.right,header_border.top,header_border.bottom = Borders.THIN,Borders.THIN,Borders.THIN,Borders.THICK\n borders.left,borders.right,borders.top,borders.bottom = Borders.THIN,Borders.THIN,Borders.THIN,Borders.THIN\n header_bold = xlwt.easyxf(\"font: bold on, height 200; pattern: pattern solid, fore_colour gray25;alignment: horizontal center ,vertical center\")\n header_bold.borders=header_border\n body_style = xlwt.easyxf(\"font: height 200; alignment: horizontal left\")\n body_style.borders=borders\n \n ## style for different colors in columns\n xlwt.add_palette_colour(\"light_blue_21\", 0x21)\n workbook.set_colour_RGB(0x21, 153, 255, 255) \n qty_cell_style = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz right, vert center; pattern: pattern solid, fore_colour light_blue_21; borders: top thin,right thin,bottom thin,left thin\")\n \n xlwt.add_palette_colour(\"custom_orange\", 0x22)\n workbook.set_colour_RGB(0x22, 255, 204, 153)\n value_style = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz right, vert center; pattern: pattern solid, fore_colour custom_orange; borders: top thin,right thin,bottom thin,left thin\")\n \n xlwt.add_palette_colour(\"custom_mandys_pink\", 0x20)\n workbook.set_colour_RGB(0x20, 246, 228, 204)\n value_style2 = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz right, vert center; pattern: pattern solid, fore_colour custom_mandys_pink; borders: top thin,right thin,bottom thin,left thin\")\n \n \n xlwt.add_palette_colour(\"custom_yellow\", 0x25)\n workbook.set_colour_RGB(0x25, 255, 255, 179)\n blank_cell_style = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz center, vert center; pattern: pattern solid, fore_colour custom_yellow; borders: top thin,right thin,bottom thin,left thin\")\n return workbook,header_bold,body_style,qty_cell_style,value_style,blank_cell_style,value_style2", "def read_from_xlsx(filename='Infosys.xlsx', sheet='Profit & Loss', cell_range=OrderedDict([('A14','J14'), ('A15','J15')])):\n xl = win32com.client.Dispatch('Excel.Application')\n try:\n filename = env.DOWNLOAD_DIR + '\\\\' + filename\n wb = xl.Workbooks.Open(Filename=filename, ReadOnly=1, Editable=True)\n ws = wb.Worksheets(sheet)\n for k, val in cell_range.items():\n print (ws.Range(k + ':' + val).Value)\n\n except Exception as e:\n logger.exception(e)\n\n else:\n wb.Close(True)", "def create_company_df(companies):\n\n companies = list(set(companies)) # removes duplicates\n\n symbols = []\n exchanges = []\n ynames = []\n is_us = []\n\n for company in companies:\n sym, exch, yco, usa = check_usa_mkts(get_company_info(company))\n symbols.append(sym)\n exchanges.append(exch)\n ynames.append(yco)\n is_us.append(usa)\n\n marketcaps = []\n sizes = []\n urls = []\n urls_pr = []\n\n for sym, co in zip(symbols, companies):\n if sym == \"n/a\":\n print(f\"Skipping {co}\\n\")\n marketcaps.append(\"n/a\")\n sizes.append(\"n/a\")\n urls.append(\"n/a\")\n urls_pr.append(\"n/a\")\n continue\n\n print(f\"Checking {co} [{sym}]\")\n marketcap = get_market_cap(sym)\n size = id_company_size(marketcap)\n url = get_company_url(sym)\n url_pr = get_press_release_page(url)\n\n marketcaps.append(marketcap)\n sizes.append(size)\n urls.append(url)\n urls_pr.append(url_pr[0])\n\n print(\"Search complete\")\n\n df = pd.DataFrame(\n {\n \"Company\": companies,\n \"Yahoo Listed Co.\": ynames,\n \"Symbol\": symbols,\n \"Exchange\": exchanges,\n \"Market Cap\": marketcaps,\n \"Company Size\": sizes,\n \"Is American\": is_us,\n \"Home URL\": urls,\n \"Press Release URL\": urls_pr,\n }\n )\n\n return df", "def main():\n data = get_sales_data()\n sales_data = [int(num) for num in data]\n update_worksheet(sales_data, 'sales')\n new_surplus_data = calculate_surplus_sandwiches(sales_data)\n update_worksheet(new_surplus_data, 'surplus')\n list_of_last_five_sales = get_last_five_sales_entries()\n stock_data = get_average_sales(list_of_last_five_sales)\n update_worksheet(stock_data, 'stock')\n return stock_data", "def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)\n data = {\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'guest_id': self.guest_id.id,\n 'model_id': self.id,\n 'check_out': self.check_out,\n 'date_today': fields.Datetime.now()\n }\n\n print(\"XLSX Wizard data : \", data)\n\n return {\n 'type': 'ir.actions.report',\n 'data': {\n 'model': 'accommodation.reporting',\n 'options': json.dumps(data, default=date_utils.json_default),\n 'output_format': 'xlsx',\n 'report_name': 'Accommodation Report'\n },\n 'report_type': 'xlsx'\n }", "def printing_resident_sheets(entry, file):\n nhi = entry.get()\n if re.match(\"^[A-Za-z]{3}[0-9]{4}$\", nhi):\n pass\n else:\n popup_error(\"Incorrect NHI format entered, please try again\")\n\n if file_available(file):\n ecase_driver = ecase_downloader.ecase_login()\n ecase_downloader.resident_contacts(ecase_driver, nhi)\n ecase_downloader.preferred_name_and_image(ecase_driver, nhi)\n ecase_driver.quit()", "def OpenFileExcel(self, *args, **kwargs):\n directory = None\n if kwargs is not None:\n for key, value in kwargs.items():\n if key == 'directory':\n directory = value\n\n\n\n with wx.FileDialog(self, \"Open report file\", directory,\n wildcard=\"excel files (*.xlsx)|*.xlsx|(*.xls)|*.xlsx|(*.csv)|*.csv\",\n style=wx.FD_OPEN) as fileDialog:\n \n if fileDialog.ShowModal() == wx.ID_CANCEL:\n return \n\n\n else:\n\n pathname = fileDialog.GetPath()\n print('the file to be opened is :'+ pathname)\n\n def openWorkbook(xlapp, xlfile):\n try:\n xlwb = xlapp.Workbooks(xlfile)\n except Exception as e:\n try:\n xlwb = xlapp.Workbooks.Open(xlfile)\n except Exception as e:\n print(e)\n xlwb = None\n return (xlwb)\n\n pathname = os.path.normcase(pathname)\n\n\n try:\n excel = win32.gencache.EnsureDispatch('Excel.Application')\n wb = openWorkbook(excel, pathname)\n #ws = wb.Worksheets('Sheet1')\n excel.Visible = True\n except Exception as e:\n print(e)\n\n finally:\n # RELEASES RESOURCES\n ws = None\n wb = None\n excel = None", "def doLink(self):\n self.log.info('Starting TabLinker for all sheets in workbook')\n \n for n in range(self.rb.nsheets) :\n self.log.info('Starting with sheet {0}'.format(n))\n self.r_sheet = self.rb.sheet_by_index(n)\n self.w_sheet = self.wb.get_sheet(n)\n \n self.rowns, self.colns = self.getValidRowsCols()\n \n self.sheet_qname = urllib.quote(re.sub('\\s','_',self.r_sheet.name))\n self.log.info('Base for QName generator set to: {0}'.format(self.sheet_qname))\n \n self.log.debug('Starting parser')\n self.parseSheet()", "def get_sales_data():\n print(\"Retrieving all the sales information...\")\n data = SHEET.worksheet('sales')\n print(\"Compilation complete!\\n\")\n return data", "def company(self):\n\n x = 0\n my_company = self.data[\"Company Name\"]\n my_account = self.data[\"Account\"]\n result = []\n for i in my_company:\n my_string = i + \" -- \" + my_account[x]\n x += 1\n result.append(my_string)\n\n return result" ]
[ "0.5797708", "0.5627527", "0.55049616", "0.547899", "0.5430646", "0.53921485", "0.53507286", "0.531778", "0.5266958", "0.52605325", "0.5259202", "0.52366793", "0.52347517", "0.5210146", "0.5195938", "0.51869226", "0.51795375", "0.5152984", "0.51428926", "0.51320624", "0.5120968", "0.51051915", "0.5090538", "0.508532", "0.5063839", "0.504789", "0.503222", "0.50286025", "0.4974211", "0.49724165", "0.49562263", "0.49513632", "0.49229735", "0.4903201", "0.48883864", "0.4886683", "0.48852938", "0.4871942", "0.48410335", "0.4831249", "0.48050168", "0.48018327", "0.47849172", "0.47804156", "0.47788355", "0.47777745", "0.47732404", "0.47723225", "0.47663438", "0.47647083", "0.47522318", "0.47438845", "0.473445", "0.47340915", "0.47335702", "0.4729296", "0.47283992", "0.4718238", "0.47026744", "0.46990234", "0.46975332", "0.46875745", "0.46867755", "0.46817005", "0.46405414", "0.46397007", "0.46267968", "0.46219444", "0.4617572", "0.46141887", "0.4612966", "0.46055192", "0.45829865", "0.45793927", "0.45761168", "0.45697862", "0.4565611", "0.45567194", "0.45543432", "0.4553223", "0.4550774", "0.45483607", "0.45407912", "0.45384446", "0.45164096", "0.45063385", "0.44946805", "0.44908863", "0.4489673", "0.4487017", "0.44816154", "0.44800943", "0.44753334", "0.4473198", "0.44539747", "0.44488868", "0.44487146", "0.44482556", "0.44453734", "0.4444937" ]
0.5508073
2
Read CSV in folder "general" in database. Also used in setup.py
def open_general(file, setup=False): try: if setup is False: p = datapath(True, 'general', file) df = _pd.read_csv(p + '.csv') elif setup is True: p = datapath(True, 'general', file) df = _pd.read_csv(p + '.py') else: df = None # not tested here return df except FileNotFoundError as e: print("There is no record of {} in your database. Go to your chosen setup path to check, if not there go to " "Github and download the missing sheet".format(file)) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def getFake(directory=\"../FakeRealNews/Data\"):\r\n return pd.read_csv(directory + \"/Fake.csv\")", "def read_csv_file(self):\n pass", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def getReal(directory=\"../FakeRealNews/Data\"):\r\n return pd.read_csv(directory + \"/True.csv\")", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def read_csv():\n global csvdata\n global CONFIG\n if type(csvdata) == type(None):\n if not os.path.exists(CONFIG[\"csvfile\"]):\n csvdata = pandas.read_csv(CONFIG[\"csvrepo\"],\n na_values=[\"-999999\",\"NOT AVAILABLE\"])\n os.makedirs(CONFIG[\"cachedir\"],exist_ok=True)\n csvdata.to_csv(CONFIG[\"csvfile\"])\n else:\n csvdata = pandas.read_csv(CONFIG[\"csvfile\"])\n return csvdata", "def get_data(self, csv_file):\n pass", "def load_csv(self):\n self.database = pd.read_csv(\n self.settings['database_path'],\n encoding='utf-8')", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def read_csv_file(dir_name, csv_file, collection, error_list):\n count = 0\n try:\n filename = os.path.join(dir_name, csv_file)\n with open(filename, 'r') as file:\n csv_reader = csv.DictReader(file)\n # create the document for products collection\n for row in csv_reader:\n collection.insert_one(row)\n except FileNotFoundError:\n LOGGER.info('FileNotFoundError')\n count += 1\n except Exception as error:\n count += 1\n LOGGER.info('Exception:')\n LOGGER.info(error)\n error_list.append(count)", "def loadCSV(input_file):", "def test_findCSV(self,\n filename=\"page-views.csv\",\n input_folder='../../input/raw-data/'):\n\n csv_file = retrive_csv_file(filename, input_folder)\n expected_output = input_folder + filename\n self.assertEqual(csv_file, expected_output)", "def read(self, database ='project'):\n\t\tfile = open(self.file_name, \"r\")\n\n\t\ti = 1\n\t\tseptics = []\n\t\tfor line in file:\n\t\t\tif i > 2:\n\t\t\t\tval = line.split()\n\t\t\t\tself.check_cols(val, 13, 'septic')\n\n\t\t\t\tsep = {\n\t\t\t\t\t'name': val[0].lower(),\n\t\t\t\t\t'q_rate': val[1],\n\t\t\t\t\t'bod': val[2],\n\t\t\t\t\t'tss': val[3],\n\t\t\t\t\t'nh4_n': val[4],\n\t\t\t\t\t'no3_n': val[5],\n\t\t\t\t\t'no2_n': val[6],\n\t\t\t\t\t'org_n': val[7],\n\t\t\t\t\t'min_p': val[8],\n\t\t\t\t\t'org_p': val[9],\n\t\t\t\t\t'fcoli': val[10],\n\t\t\t\t\t'description': val[12] if val[12] != 'null' else None # 12 index because extra column\n\t\t\t\t}\n\t\t\t\tseptics.append(sep)\n\t\t\ti += 1\n\n\t\tif database == 'project':\n\t\t\tdb_lib.bulk_insert(project_base.db, project_parmdb.Septic_sep, septics)\n\t\telse:\n\t\t\tdb_lib.bulk_insert(datasets_base.db, datasets_parmdb.Septic_sep, septics)", "def importAll():\n csvFile = openCsv()\n items = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,\n # name, pricePerOne, subCategory, subKey, totalTradeCount,\n # mainLabel, subLabel, description\n\n with open(csvFile) as i:\n readItem = csv.reader(i)\n itemRow = next(readItem)\n for row in readItem:\n items.append(row)\n\n return items", "def get_raw_data():\n data_files = []\n for i, f in enumerate(os.listdir(config.RAW_DATA_DIR)):\n data_files.append(f)\n print i, \": \", f\n while True:\n try:\n index = int(raw_input(\"Type the index of the data file you'd like to import: \"))\n fn_raw_data = data_files[int(index)]\n break\n except ValueError:\n print(\"Not a valid index. Try again.\")\n except IndexError:\n print(\"Not a valid index. Try again.\")\n print \"Importing %s...\" % fn_raw_data\n with open(config.RAW_DATA_DIR + fn_raw_data) as infile:\n next(infile)\n raw_data = list(csv.DictReader(infile))\n return (fn_raw_data, raw_data)", "def update_csv():\n return os.listdir('./data')", "def import_csv_data(cr, registry):\n files = ['data/sc.info.csv']\n for file in files:\n tools.convert_file(cr, 'prospects_app', file, None,\n mode='init', noupdate=True, kind='init')", "def load_file_data_from_db(sip, base_path):\n my_entry = FSEntries(sip)\n md_object = add_collection_name(my_entry.md_info, base_path)\n return md_object", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\lesson5\\\\data\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = ImportUnitTestData()\n result = mongo_insert.import_data(key, tmp_file)\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def load_cfda(fullpath):\n try:\n with open(fullpath, errors='backslashreplace') as csvfile:\n\n reader = csv.DictReader(csvfile, delimiter=',', quotechar='\"', skipinitialspace='true')\n for row in reader:\n cfda_program, created = CFDAProgram.objects.get_or_create(\n program_number=row['Program Number'])\n\n cfda_program.data_source = \"USA\"\n cfda_program.program_title = row['Program Title']\n cfda_program.popular_name = row['Popular Name (020)']\n cfda_program.federal_agency = row['Federal Agency (030)']\n cfda_program.authorization = row['Authorization (040)']\n cfda_program.objectives = row['Objectives (050)']\n cfda_program.types_of_assistance = row['Types of Assistance (060)']\n cfda_program.uses_and_use_restrictions = row['Uses and Use Restrictions (070)']\n cfda_program.applicant_eligibility = row['Applicant Eligibility (081)']\n cfda_program.beneficiary_eligibility = row['Beneficiary Eligibility (082)']\n cfda_program.credentials_documentation = row['Credentials/Documentation (083)']\n cfda_program.pre_application_coordination = row['Preapplication Coordination (091)']\n cfda_program.application_procedures = row['Application Procedures (092)']\n cfda_program.award_procedure = row['Award Procedure (093)']\n cfda_program.deadlines = row['Deadlines (094)']\n cfda_program.range_of_approval_disapproval_time = row['Range of Approval/Disapproval Time (095)']\n cfda_program.appeals = row['Appeals (096)']\n cfda_program.renewals = row['Renewals (097)']\n cfda_program.formula_and_matching_requirements = row['Formula and Matching Requirements (101)']\n cfda_program.length_and_time_phasing_of_assistance = row['Length and Time Phasing of Assistance (102)']\n cfda_program.reports = row['Reports (111)']\n cfda_program.audits = row['Audits (112)']\n cfda_program.records = row['Records (113)']\n cfda_program.account_identification = row['Account Identification (121)']\n cfda_program.obligations = row['Obligations (122)']\n cfda_program.range_and_average_of_financial_assistance = row['Range and Average of Financial Assistance (123)']\n cfda_program.program_accomplishments = row['Program Accomplishments (130)']\n cfda_program.regulations_guidelines_and_literature = row['Regulations, Guidelines, and Literature (140)']\n cfda_program.regional_or_local_office = row['Regional or Local Office (151) ']\n cfda_program.headquarters_office = row['Headquarters Office (152)']\n cfda_program.website_address = row['Website Address (153)']\n cfda_program.related_programs = row['Related Programs (160)']\n cfda_program.examples_of_funded_projects = row['Examples of Funded Projects (170)']\n cfda_program.criteria_for_selecting_proposals = row['Criteria for Selecting Proposals (180)']\n cfda_program.url = row['URL']\n cfda_program.recovery = row['Recovery']\n cfda_program.omb_agency_code = row['OMB Agency Code']\n cfda_program.omb_bureau_code = row['OMB Bureau Code']\n if row['Published Date']:\n cfda_program.published_date = datetime.strptime(row['Published Date'], '%b, %d %Y')\n if row['Archived Date']:\n cfda_program.archived_date = datetime.strptime(row['Archived Date'], '%b, %d %Y')\n\n cfda_program.save()\n\n # self.logger.log(20, \"loaded %s %s \", cfda_program.program_number, cfda_program)\n\n except IOError:\n logger = logging.getLogger('console')\n logger.log(\"Could not open file to load from\")", "def read_files(folder):\n print_header(\"READING FILES FROM FOLDER (RECURSIVE)\", \"=\")\n files = []\n for dirpath, dirnames, filenames in os.walk(folder):\n if not dirpath.endswith(\"updates\"):\n for filename in filenames:\n root, ext = os.path.splitext(filename)\n if ext.lower() == \".sql\":\n full_path = os.path.join(dirpath, filename)\n with open(full_path, \"r\") as f:\n sql = f.read()\n sql = sql.decode(\"latin-1\")\n\n files.append((filename, sql))\n return files", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = mdb.ImportData(key, tmp_file)\n result = mongo_insert.import_data()\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def _load_csv(root_path, table_meta):\n relative_path = os.path.join(root_path, table_meta['path'])\n dtypes = _read_csv_dtypes(table_meta)\n\n data = pd.read_csv(relative_path, dtype=dtypes)\n data = _parse_dtypes(data, table_meta)\n\n return data", "def test_misc_csv_read():\n r = csv_reader(\"../test/test.csv\")\n fields = r.hdr()\n data = str(fields)\n while True:\n row = r.row()\n if not row: break\n data += '\\n' + str(row)\n\n assert(data == \"\"\"\n['EVT_CODE*', 'EVT_DATE.DE', 'CODE', 'AGE', 'FRST', 'LST', 'SPEC', 'de.id']\n['tea', '2018/01/01', 'X', '35', 'PRE', 'WHO', 'BUG', '1']\n['coffee', '2018/05/05', 'X', '35', 'JAN,Z', 'WHO', 'FRG', '1']\n['water', '2018/01/01', 'Y', '35', 'TAN', 'POST', 'CAT', '2']\n \"\"\".strip())", "def import_data(directory_name, product_file, customer_file, rentals_file):\n customer = DATABASE['customer']\n product = DATABASE['product']\n rental = DATABASE['rental']\n\n counts = (\n import_csv_to_mongodb(product, f\"{directory_name}/{product_file}\"),\n import_csv_to_mongodb(customer, f\"{directory_name}/{customer_file}\"),\n import_csv_to_mongodb(rental, f\"{directory_name}/{rentals_file}\")\n )\n return counts", "def read_dataset_files(datasetid, clean_folder):\n fnotu = datasetid + '.otu_table.clean.feather'\n fnmeta = datasetid + '.metadata.clean.feather'\n\n df = feather.read_dataframe(os.path.join(clean_folder, fnotu))\n # Feather format does not support index names, first column has index\n df.index = df.iloc[:,0]\n df = df.iloc[:, 1:]\n\n meta = feather.read_dataframe(os.path.join(clean_folder, fnmeta))\n meta.index = meta.iloc[:, 0]\n meta = meta.iloc[:, 1:]\n\n ## Make sure sample names are strings\n if df.index.dtype != 'O':\n df.index = pd.read_csv(os.path.join(clean_folder, fnotu), sep='\\t', dtype=str).iloc[:,0]\n\n if meta.index.dtype != 'O':\n meta.index = pd.read_csv(os.path.join(clean_folder, fnmeta), sep='\\t', dtype=str).iloc[:,0]\n\n return df, meta", "def readInCSV(csvFile):\n\tprint \"Checking if helper app is installed...\"\n\tandroidCheckAndInstallHelper()\n\ttry:\n\t\tprint \"Will read in the files from %s\" % csvFile\n\t\tstatus = subprocess.call([\"adb\",\"shell\",\"am\",\"startservice\",\n\t\t\t\t\t\t\t\t \"-a\", \"com.synchronoss.androidDev.contactcreaterapp.action.IMPORT\",\n\t\t\t\t\t\t\t\t \"-e\", \"CSV\", csvFile,\n\t\t\t\t\t\t\t\t \"com.synchronoss.androidDev.contactcreaterapp/.CreateAndAddContacts\"],\n\t\t\t\t\t\t\t\t stdout=stdout,stderr=stderr)\n\t\tif (status == 1):\n\t\t\tprint \"Contacts successfully copied from csv on target device.\"\n\t\tif (status != 0):\n\t\t\tprint >>sys.stderr, \"Unable to launch contact adder app\"\n\t\t\tsys.exit()\n\texcept OSError as e:\n\t\tprint >>sys.stderr, \"Execution failed: \", e\n\t\tsys.exit()\n\twaitForHelperApp()", "def _collect(self, conll_directory) -> Iterator[Any]:\n logging.info(\"Reading .conll from %s\", conll_directory)\n return dataset_path_iterator(conll_directory, self.configs.file_ext)", "def main(csvfile, dbfile, verbose=False):\n CONN = sqlite3.connect(dbfile)\n cursor = CONN.cursor()\n create_schema(cursor)\n process_data(cursor, csvfile, verbose=verbose)\n CONN.commit()\n CONN.close()", "def read_meta_data(data_dir):\n meta_data = pd.read_csv(join(data_dir, 'index.csv'))\n\n return meta_data", "def read_file():\r\n #with nos permite manejar el archivo dentro del bloque y despues cerrarlo\r\n with open('Entries.csv') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n return data", "def read_db():\n # read config file\n config = configparser.ConfigParser()\n config.read_file(open(\"options.cfg\"))\n\n return config['DEFAULT']['DatabaseFilename']", "def read_test_rf_csv():\n if os.path.exists(\"test_rf.csv\"):\n #print (\"--testing CSV imported\\n\")\n results = pd.read_csv(\"test_rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results", "def readFile(filename):\n\twith open(filename, 'rU') as csvIN:\n\t\tnext(csvIN)\n\t\toutCSV=(line for line in csv.reader(csvIN, dialect='excel'))\n\t\tfor row in outCSV:\n e = Entry(row)\n e.pass_import()", "def read_csv(folder):\n csv_paths = [(f, os.path.join(folder, f)) for f in os.listdir(folder) if f.endswith('.csv') and '刑事' in f and '司法院-刑事補償_刑事' not in f and '最高' not in f]\n return csv_paths", "def csv_path(name):\n return \"./data/%s\" % name", "def managecsv(data):\n\n checkfolderdata()\n if not datafileexist(data[7]):\n createcsv(data[7])\n managecsv(data)\n else:\n addcsv(data, data[7])", "def readDB():\n if not os.path.exists(filenameDB):\n return { }\n \n with open(filenameDB, \"r\") as csvfile:\n rows = csv.reader(csvfile)\n if rows:\n db = { }\n for r in rows:\n if len(r)==2 and isinstance(r[0],str) and isinstance(r[1],str):\n db[r[1]] = r[0]\n return db\n return { }", "def read_csv(csv_path, fieldnames=None, restkey=None,\n restval=None, dialect='excel', *args, **kwds):\n with CSVFile(os.path.expanduser(csv_path), fieldnames=fieldnames, restkey=restkey, restval=restval,\n dialect=dialect, *args, **kwds) as csvfile:\n return csvfile", "def ingest_rental_csv(csv_path):\n # Create a CSV import generator (next yields one db row)\n import_generator = import_csv_gen(csv_path)\n # Skip over the title row\n next(import_generator)\n # Iterate over all other rows\n while True:\n try:\n data = next(import_generator)\n if len(data) != 2:\n logger.error(f'Data with incorrect item count: {len(data)}')\n continue\n # extract items from list and add document to database\n with Connection():\n rental = Rental(\n product_id=data[RENTAL_PROD_ID],\n user_id=data[RENTAL_USER_ID]\n )\n rental.save() # This will perform an insert\n except StopIteration:\n break", "def get_local_dataset(\n self, \n file_name: str\n ):\n pd.read_csv(file_name)\n #save", "def GetOptionsData(directory):\n\n op_data = pd.read_csv(directory)\n return op_data", "def process_csv(filepath):\n suburb = get_suburb(filepath)\n read_file = pd.read_csv(filepath,\n infer_datetime_format=True,\n parse_dates=[\"SALE DATE\"],\n dayfirst=True)\n read_file[\"SUBURB\"] = suburb\n separate_date(read_file)\n return read_file", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def process_csv(csv_file=None):\n if csv_file:\n _process_csv_data(csv_file, USER_DATA_MAP)\n else:\n csv_files_list = [os.path.join(DATA_DIR, f) for f in os.listdir(DATA_DIR) if f.endswith('.csv')]\n for fl in csv_files_list:\n _process_csv_data(fl, USER_DATA_MAP)\n return USER_DATA_MAP", "def parse_csv(db: sqlite3.Connection, symbols_meta):\n\n logger.info('Parsing csv files for days.')\n\n csv_directory = settings.DATA_DIRECTORY / 'csv'\n\n # Get list of days by enumerating csv files in directory.\n csv_list = sorted(os.listdir(csv_directory), reverse=False)\n for jdx, f in enumerate(csv_list):\n csv_path = csv_directory / f\n if csv_path.is_file and csv_path.suffix == '.csv':\n\n day = csv_path.name[:-4]\n day = '-'.join((day[:4], day[4:6], day[6:8]))\n\n db.execute('INSERT INTO iex_days(date) VALUES(?);', (day,))\n db.commit()\n day_id = db.execute('SELECT last_insert_rowid();').fetchone()[0]\n\n logger.info(f'Found day {jdx+1} of {len(csv_list)} : {day} @ {f}.')\n\n with open(csv_path, 'r') as csv_file:\n reader = csv.reader(csv_file, delimiter=',')\n date_str = '-'.join((f[:4], f[4:6], f[6:8]))\n\n rows = list()\n for idx, row in enumerate(reader):\n timestamp, symbol, price, size = row\n qdl_symbol = symbol.replace('.', '_').replace('-', '_')\n if qdl_symbol in symbols_meta:\n rows.append((date_str, timestamp, qdl_symbol, price, size))\n\n logger.info(f'Storing {len(rows)} of {idx+1} messages to database.')\n\n db.executemany('''\nINSERT INTO iex_trade_reports(day, timestamp, symbol, price, size)\nVALUES(?, ?, ?, ?, ?);\n''', rows)\n db.commit()", "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def post_init(cr, registry):\n import_csv_data(cr, registry)", "def load_daily_data():\n return pd.read_csv(os.path.join('data', 'raw', 'full_grouped.csv'))", "def import_csv(directory_name, collection_file, database):\n LOGGER.debug('Importing %s CSV file...', collection_file)\n count = 0\n errors = 0\n try:\n filename = f'{collection_file}.csv'\n collection = database[collection_file]\n with open(os.path.join(directory_name, filename)) as file:\n collection.insert_many(data_convert(csv.DictReader(file)))\n count = collection.count_documents({})\n except OSError as err:\n print(f'OS error: {err}')\n LOGGER.error('Error reading %s file: %s', collection_file, err)\n errors = 1\n\n return count, errors", "def main():\n for db_csv_export in current_dir.glob(\"template*.csv\"):\n data_projects = load_projects(db_csv_export)\n json_path = db_csv_export.with_suffix(\".json\")\n with open(json_path, \"w\") as fh:\n json.dump(data_projects, fh, indent=2)", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def _load(self, config: Dict):\n return pd.read_csv(config['path'])", "def import_data(dir_name, product_file, customer_file, rentals_file):\n client = MongoDBConnection()\n with client:\n LOGGER.info('Create A MongoDB database')\n hp_norton_db = client.connection.rental\n hp_norton_db.products.drop()\n hp_norton_db.customers.drop()\n hp_norton_db.rentals.drop()\n\n # create three collections.\n LOGGER.info('Create three collections')\n products = hp_norton_db['products']\n customers = hp_norton_db['customers']\n rentals = hp_norton_db['rentals']\n error_list = []\n\n # 1. load the products collection\n #LOGGER.info('Load the products collection')\n read_csv_file(dir_name, product_file, products, error_list)\n for doc in products.find():\n LOGGER.debug(f'-- products:{doc}.')\n LOGGER.debug(f'Error_list:{error_list}')\n\n # 2. load the customers collection\n #LOGGER.info('Load the customers collection')\n read_csv_file(dir_name, customer_file, customers, error_list)\n for doc in customers.find():\n LOGGER.debug(f'-- cusotmers:{doc}.')\n LOGGER.debug(f'Error_list:{error_list}')\n\n # 3. load the rentals collection\n #LOGGER.info('Load the rentals collection')\n read_csv_file(dir_name, rentals_file, rentals, error_list)\n for doc in rentals.find():\n LOGGER.debug(f'-- rentals:{doc}.')\n LOGGER.debug(f'Error_list:{error_list}')\n for i in error_list:\n if i == 1:\n LOGGER.debug('!!! Error in importing csv files')\n LOGGER.info('Finish import three csv files')\n return [(products.count(), customers.count(), rentals.count()),\n tuple(error_list)]", "def read(self):\n all_files = glob.glob(os.path.join(self.path, \"*.csv\"))\n start_time = datetime.now()\n for file in all_files:\n print(\"\\nImporting file: \" + file + \"\\n\")\n command = \"mongoimport -d ci_311db -c ci_311_incident --type csv --file \" + file + \" --headerline \" \\\n \"--columnsHaveTypes --numInsertionWorkers 4\"\n os.system(command)\n end_time = datetime.now()\n print(\"All CSVs imported in collection.\\nTotal import time: \" + str(end_time - start_time))", "def _read_recs(basedir):\n for borotag in boro_tags:\n datafile = \"%s/%s.csv\" % (basedir,borotag)\n print(\"slurp '%s' ..\" % datafile)\n recs = read_recs(datafile)\n yield from (pluto.parse.normalize(r) for r in recs)", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def read_samples(base_path):\n samples = []\n path = os.path.join(base_path, 'driving_log.csv')\n with open(path) as csvfile:\n reader = csv.reader(csvfile)\n # Skip header line\n header = next(reader)\n for line in reader:\n samples.append(line)\n return samples", "def readRecordFromFile():\n\twith open(gbl.sourceFile, newline='') as csvfile:\n\t\trowReader = csv.reader(csvfile, delimiter=gbl.csvDiscriminator, quotechar=gbl.csvQuotechar)\n\t\tfor row in rowReader:\n\t\t\tROWData.append(row)", "def read_relations(db, openfile):\n pass", "def loadDB(self,dbfilename):\n \n db=[]\n with open(dbfilename,'r',encoding='ISO-8859-1') as dbfilename:\n dbreader= csv.reader(dbfilename,delimiter=self.sDelimiter )\n for lFields in dbreader:\n db.append(lFields)\n\n return db", "def import_db(import_file):\n import_data(import_file)", "def read_stock(db, openfile):\n pass", "def fetch_csv(review_channel):\n with open (review_channel.file_name) as file:\n return file.read()\n return \"\"", "def test_read_in_file(self):\r\n filename = \"CrimeDataSmall.csv\"\r\n\r\n lst = cds.read_in_file(filename)\r\n\r\n self.assertIsInstance(lst, list, \"Returned datatype should be a list\")\r\n self.assertEqual(len(lst), 4, \"There should be 4 rows returned from CrimeDataSmall 1 header and 3 data rows\")\r\n self.assertEqual(len(lst[0]), 23, \"Each row should have 23 columns\")\r\n self.assertEqual(lst[0][1], \"Reported_Date\", \"Column 1 was incorrect header\")\r\n self.assertEqual(lst[0][7], \"Offense\", \"Column 7 was incorrect header\")\r\n self.assertEqual(lst[0][13], \"Zip Code\", \"Column 13 header was incorrect\")\r\n self.assertEqual(lst[1][1], \"03/19/2019\", \"Column 1 was incorrect in first data row\")\r\n self.assertEqual(lst[1][7], \"Vehicular – Non-Injury\", \"Column 7 was incorrect in first data row\")\r\n self.assertEqual(lst[1][13], \"64161\", \"Column 13 in first data row was incorrect\")\r\n self.assertEqual(lst[3][1], \"03/27/2019\", \"Column 1 was incorrect in 3rd data row\")\r\n self.assertEqual(lst[3][7], \"Embezzlement\", \"Column 7 was incorrect 3rd data row\")\r\n self.assertEqual(lst[3][13], \"64112\", \"Column 13 3rd data row was incorrect\")\r\n self.assertEqual(lst[3][11], \"4600, S WORNALL RD\", \"Column 11 3rd data row was incorrect. Use csv module to read \")", "def readcsv(path, delimiter= ','):\n my_data = genfromtxt(path, delimiter= delimiter)\n return my_data", "def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))", "def load_multiple_csv(self, path, column):\n df = pd.concat([pd.read_csv(f\"{path}/{f}\") for f in tqdm(os.listdir(f\"{path}/\"))], ignore_index=True)\n return df[column]", "def main():\n langs = []\n\n with open(\"sql/07_populate.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in data folder\\n\\n\")\n\n langs = write_lang_city(sql)\n write_groups_diets(sql, langs)\n\n with open(\"sql/10_populate_test_data.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in moc_data folder\\n\\n\")\n write_test_data(sql)\n # This command sets postgis coordinates based on latitude and longitude\n sql.write(\"UPDATE restaurant SET geo_location = ST_POINT(latitude, longitude);\\n\")\n sql.close()", "def loadCSV(file: str, database: str, table: str) -> list:\n\n bd = _database(database)\n\n if bd:\n\n tb = _table(database, table)\n\n encoding = bd[\"encoding\"]\n\n if tb:\n\n mode = tb[\"modo\"]\n try:\n with open(file, 'r', encoding='utf-8-sig') as leer:\n reader = csv.reader(leer, delimiter=',')\n for x in reader:\n for y in x:\n if type(y) == str:\n y.encode(encoding, \"strict\")\n leer.close()\n except:\n return []\n val = -1\n\n if mode == \"avl\":\n val = avl.loadCSV(file, database, table)\n\n elif mode == \"b\":\n val = b.loadCSV(file, database, table)\n\n elif mode == \"bplus\":\n val = bplus.loadCSV(file, database, table)\n\n elif mode == \"hash\":\n val = hash.loadCSV(file, database, table)\n\n elif mode == \"isam\":\n val = isam.loadCSV(file, database, table)\n\n elif mode == \"json\":\n val = json.loadCSV(file, database, table)\n\n elif mode == \"dict\":\n val = dict.loadCSV(file, database, table)\n\n nombreST = str(database) + '-' + str(table)\n if 0 in val:\n if BC.EsUnaTablaSegura(nombreST, _main_path):\n BC.insertCSV(nombreST, file, _main_path, val)\n return val\n\n else:\n return []\n\n else:\n return []", "def load(db):\n r = db.truncate_table('calibrations')\n print \"Truncated calibrations table\"\n\n # Allowed columns\n columns = ['class','asset_uid','start_date','serial','name','value','notes']\n\n # Read in calibration data\n file_mask = \"repos/asset-management/calibration/*\"\n directory_list = glob.glob(file_mask)\n for directory in directory_list:\n file_list = glob.glob(directory + '/*.csv')\n for ifile in file_list:\n with open(ifile, 'rb') as csvfile:\n print \"Loading file: \" + ifile\n reader = csv.DictReader(csvfile)\n for row in reader:\n row['class'] = directory.split('/')[-1]\n row['asset_uid'] = ifile.split('/')[-1].split('__')[0]\n row['start_date'] = ifile.split('/')[-1].split('__')[1].split('.')[0]\n data = remove_extraneous_columns(columns, row)\n save_cal(db,data)", "def _read_csv(self) -> pd.DataFrame:\n\n return pd.concat(\n [\n pd.read_csv(f, usecols=[1, 2, 3, 4, 5])\n for f in self.temp_path.iterdir()\n if f.name.endswith(\".csv\")\n ]\n )", "def __init__(self, root_dir):\n self.paths = glob.glob(root_dir + \"/*.csv\")\n self.target = 'Default'\n # Grouping variable names", "def load_csv(filename: str, solr_url: typing.Optional[str]):\n\n solr_client = Solr(solr_url, always_commit=True) if solr_url else Solr(\"\")\n\n csv_data = { row[\"Item ARK\"]: row for row in csv.DictReader(open(filename)) }\n\n config = {\n \"collection_names\": {\n row[\"Item ARK\"]: row[\"Title\"] for row in csv_data.values() if row[\"Object Type\"] == \"Collection\"\n },\n \"controlled_fields\": load_field_config(\"./fields\"),\n \"child_works\": collate_child_works(csv_data),\n }\n\n controlled_fields = load_field_config(\"./fields\")\n\n mapped_records = []\n for row in rich.progress.track(csv_data.values(), description=f\"Importing {filename}...\"):\n if row[\"Object Type\"] not in (\"ChildWork\", \"Page\"):\n mapped_records.append(map_record(row, solr_client, config=config))\n\n if solr_url:\n solr_client.add(mapped_records)\n else:\n print(json.dumps(mapped_records))", "def _read_source_data(self) -> pd.DataFrame:\n df = None\n try:\n logger.info(\"reading csv base file under simulation folder\", class_name=self.__class__.__name__)\n df = pd.read_csv(\n f\"{Path(__file__).parents[1]}/data/simulation/{self.base_data_filename}\"\n )\n except FileNotFoundError:\n logger.warning(\"base file not processed, trying under unprocessed folder\",\n class_name=self.__class__.__name__)\n try:\n df = pd.read_csv(\n f\"{Path(__file__).parents[1]}/data/unprocessed/{self.base_data_filename}\"\n )\n except FileNotFoundError:\n logger.error(\"base file not found... exiting\", class_name=self.__class__.__name__)\n exit(1)\n return df", "def read_rf_csv():\n if os.path.exists(\"rf.csv\"):\n #print (\"--decision trees CSV imported\\n\")\n results = pd.read_csv(\"rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results", "def read_sql(self):\n pass", "def read_file(path: str):\n try:\n return pd.read_csv(f\"{Config.DATA_RAW_DIRECTORY}/{path}\")\n except OSError as ex:\n if Config.VERBOSE:\n print(ex)\n return None", "def get_raw_data_from_csv():\n data_df = pd.read_csv(static_constants.RAW_DATA_PATH)\n return data_df", "def test_load_with_csv(self):\n\n corpus = Corpus(\n common.TEST_CORPUS_PATH,\n csv_path=common.LARGE_TEST_CORPUS_CSV,\n name='test_corpus',\n )\n assert len(corpus) == 99\n assert isinstance(corpus.documents, list)\n assert corpus.name == 'test_corpus'", "def walk_csv_data(**kwargs):\n for path, name in walk(**kwargs):\n if path.endswith('.csv'):\n with open(path, newline='') as f:\n text = f.read()\n reader = csv.DictReader(StringIO(text))\n try:\n fieldnames = reader.fieldnames\n rows = list(reader)\n yield (path, name, text, fieldnames, rows)\n except csv.Error:\n continue", "def _read_directory(self):\n self._filenames = glob.glob(self._directory + \"/*.project\")", "def read_metadata(data_dir):\n return pd.read_csv(os.path.join(data_dir, \"metadata.csv\"), index_col=0)", "def read_metadata(data_dir):\n return pd.read_csv(os.path.join(data_dir, \"metadata.csv\"), index_col=0)", "def download_global_csv(output_dir: str):\n for filename, url_path in CSVS_TO_READ:\n url = urljoin(GITHUB_BASE_URL, url_path)\n path = os.path.join(output_dir, filename)\n df = pd.read_csv(url)\n df.to_csv(path)", "def load_subjects_to_db():\n try:\n with open(configuration.get_file_location(\"materias.csv\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\";\")\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n logging.info(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n subject = MateriaClass.Materia(row[1], row[0])\n logging.info(subject.print())\n sql = connectSQLite.save_subject(subject)\n for row in sql.fetchall():\n logging.info(row)\n sql = connectSQLite.get_db().close()\n except Exception as error:\n logging.info(\"FALSE, exception ocurred\")\n print(error)\n # line_count += 1\n # print(f'Processed {line_count} lines.')", "def _importInDjango(self):\n\n with open(settings.DATA_PATH, 'r', encoding='latin-1') as csv_file:\n reader = csv.DictReader(csv_file, delimiter=';')\n for raw in reader:\n\n # Créer ou mettre à jour la division\n division, created = Division.objects.get_or_create(\n nom=raw['Division']\n )\n if created:\n self.stdout.write(\n 'Divion {} ajoutée'.format(division.nom)\n )\n\n # Créer ou mettre à jour les équipes\n equipeDom, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 1'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeDom.nom)\n )\n\n equipeExt, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 2'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeExt.nom)\n )\n\n # Créer ou mettre à jour la rencontre\n scoreDom = 0 if raw['Score 1'] == '' else int(raw['Score 1'])\n scoreExt = 0 if raw['Score 2'] == '' else int(raw['Score 2'])\n forfaitDom = True if raw['Forfait 1'] == 'true' else False\n forfaitExt = True if raw['Forfait 2'] == 'true' else False\n date = datetime.datetime.strptime(raw['Date de rencontre'], '%d/%m/%Y')\n heure = datetime.datetime.strptime(raw['Heure'], '%H:%M')\n rencontre, created = Rencontre.objects.update_or_create(\n numero=int(raw['N° de match']),\n equipeDom=equipeDom,\n equipeExt=equipeExt,\n defaults={\n 'date': date,\n 'heure': heure,\n 'scoreDom': scoreDom,\n 'scoreExt': scoreExt,\n 'forfaitDom': forfaitDom,\n 'forfaitExt': forfaitExt,\n }\n )\n if created:\n self.stdout.write(\n 'Rencontre {} / {} ajoutée'.format(\n rencontre.equipeDom,\n rencontre.equipeExt\n )\n )", "def import_directory_csv(d_in, d_out, target_column, merge_columns):\n\n INPUT_FILES = grab_files(\"*.csv\", d_in)\n\n if not INPUT_FILES:\n logger.warning(\"No matching CSV files found, exiting\")\n exit(2)\n\n for f_csv in INPUT_FILES:\n f_csv_out = os.path.join(d_out, os.path.basename(f_csv))\n vals = (f_csv, f_csv_out, target_column, merge_columns)\n import_csv(vals)", "def read_csv(\n type: CSVTypes,\n csv_file: UploadFile = File(...),\n db: Session = Depends(get_db),\n authorization: str = Header(None),\n settings: config.Settings = Depends(get_settings),\n):\n if authorization != settings.upload_secret:\n raise HTTPException(401, \"Operação inválida!\")\n\n lines = 0\n\n with csv_file.file as file:\n content = file.read()\n content = content.decode(\"utf-8\")\n content = content.split(\"\\n\")\n if type == CSVTypes.results:\n lines = len(import_results_csv(content, db))\n elif type == CSVTypes.templates_results:\n lines = len(import_templates_results_csv(content, db))\n elif type == CSVTypes.hospitals:\n lines = len(import_hospitals_csv(content, db))\n else:\n raise HTTPException(400)\n\n log(\"[CSV] CSV foi importado.\", db)\n\n return {\"lines\": lines}", "def make_up(self, base_path='./data/'):\n for csv_file_path in [f\"{base_path}{_}\" for _ in os.listdir(base_path)]:\n self.append_file(csv_file_path)", "def run_load(rootpath):\n global CSV_PATH\n CSV_PATH = rootpath+'/csv_files/'\n load_movies_details()\n load_movies_cast()\n load_movies_reviews()", "def read_csv(product_name=str, directory=DIRS['EOIR_DATA_DIR']):\n filename = ('%s.csv' % product_name)\n path = get_dir(os.path.join(directory, filename))\n with io.open(path, mode='r', encoding='utf-8-sig') as f:\n spec_dict = {}\n filtered = (line.replace(\"\\n\", '') for line in f) # Removes \\n from the created as a byproduct of encoding\n for line in filtered:\n field, value = line.split(',')\n if has_number(value) and value.find('\"') == -1:\n if value.find('x') != -1:\n if value.find('.') != -1:\n value = [float(i) for i in value.split('x')]\n else:\n value = [int(i) for i in value.split('x')]\n else:\n value = float(value)\n else:\n value = value.replace('\"', '')\n if value.find('/') != -1:\n value = [str(i) for i in value.split('/')]\n elif (value.lower()).find('true') != -1:\n value = True\n elif (value.lower()).find('false') != -1:\n value = False\n else:\n value = str(value)\n spec_dict['%s' % str(field)] = value\n f.close()\n return spec_dict", "def walk_csv(self, filepath: str):\n with open(filepath, encoding='ISO-8859-1') as f:\n reader = csv.DictReader(f)\n for row in reader:\n logger.debug('Loading map {}'.format(row.get('id', None)))\n yield row", "def read_locations(db, openfile):\n pass", "def __loaddata(filename, datatype='flightcsv', minprob=0.001, maxprob=0.20):\n if datatype is 'flightcsv':\n return extract_flight_csv(filename, minprob=minprob, maxprob=maxprob)\n else:\n raise Exception('unknown datatype %s' % datatype)", "def get_book_csv(apps, schema_editor):\n Book = apps.get_model('core', 'Book')\n Author = apps.get_model('core', 'Author')\n Category = apps.get_model('core', 'Category')\n datapath = os.path.join(settings.BASE_DIR, 'initial_data')\n datafile = os.path.join(datapath, 'Freeshelf_CSV_3_15.csv')\n\n Book.objects.all().delete()\n Category.objects.all().delete()\n\n with open(datafile) as file:\n reader = csv.DictReader(file)\n for row in reader:\n book_title = row['title']\n # The following ensures we will not create duplicate books\n if Book.objects.filter(title=book_title).count():\n continue\n # get or create\n author, _ = Author.objects.get_or_create(\n name=row['author'],\n )\n author.save()\n\n category, _ = Category.objects.get_or_create(\n name=row['category'],\n )\n category.slug = slugify(category.name)[:49]\n category.save()\n \n\n book = Book(\n title=row['title'],\n author=author,\n description=row['description'],\n book_url=row['book_url'],\n date_added=row['date_added'],\n )\n # Needed to slugify my title here because I was getting an error:\n # 'django.db.utils.IntegrityError: UNIQUE constraint failed: core_book.slug'\n book.slug = slugify(book.title)[:49]\n book.save()\n book.categories.add(category)", "def load_database(self, fsp='Species'):\n self.df_species = pd.read_csv(fsp + '.csv', header=0,\n index_col=0)", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def read_file( self ):\n return pd.read_csv( self.tools_data_path )" ]
[ "0.6300106", "0.61802393", "0.6075419", "0.60727006", "0.606177", "0.6019453", "0.5933561", "0.58593744", "0.5730161", "0.5652817", "0.56373644", "0.5618824", "0.55967456", "0.5585875", "0.55683404", "0.5567126", "0.5550129", "0.5536807", "0.55361545", "0.55278426", "0.5509099", "0.55055", "0.54797417", "0.54628044", "0.5454694", "0.5448278", "0.5446893", "0.5443432", "0.5435057", "0.5431615", "0.5427859", "0.54255456", "0.54115665", "0.5404387", "0.54016083", "0.5395689", "0.539515", "0.5385714", "0.5359664", "0.5347773", "0.5347087", "0.5345841", "0.5341072", "0.53324723", "0.53249544", "0.53236455", "0.5321167", "0.53118414", "0.5283609", "0.52815014", "0.52806836", "0.5270571", "0.5264767", "0.5261232", "0.5258631", "0.5256721", "0.5256003", "0.5250841", "0.5240356", "0.5240289", "0.52283394", "0.52274483", "0.5226044", "0.52110946", "0.52097356", "0.51973325", "0.51952964", "0.5193145", "0.5190511", "0.5187554", "0.5185836", "0.51851094", "0.5182132", "0.51786005", "0.51746017", "0.51686496", "0.5166145", "0.51653016", "0.5159428", "0.51547754", "0.5147211", "0.5145628", "0.51408684", "0.51405674", "0.51405674", "0.5136245", "0.51344734", "0.51343644", "0.5134078", "0.5131566", "0.51270807", "0.5123928", "0.51181805", "0.5110532", "0.51093453", "0.5104726", "0.51036197", "0.5102687", "0.51012516", "0.5101236" ]
0.65410346
0
Read the stock list in database, a wrap up of open_general. Open stock list files in database using open_general() function.
def open_stock_list(exchange='ALL'): if exchange not in ['NYSE', 'NASDAQ'] and exchange != 'ALL': raise ValueError("Parameter 'exchange' should either NYSE or NASDAQ") if exchange == 'ALL': # all tickets c1 = open_general('NASDAQ') c2 = open_general('NYSE') df = _pd.concat([c1, c2], ignore_index=True).drop('Unnamed: 9', axis=1) # drop duplicated column else: _csv = open_general(exchange) df = _csv.drop('Unnamed: 9', axis=1) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_stock(db, openfile):\n pass", "def read_stock_codes_from_db():\n\n print('connecting to database...')\n Stocks = get_db()['Stocks']\n print('reading...')\n\n stocks = Stocks.find()\n return stocks", "def database_open(self):\n\t\n\t\tfilename = tkFileDialog.askopenfilename(multiple=False)\n\n\t\tif filename:\n\t\t\n\t\t\ttry:\n\t\t\t\tself.log.info(\"Trying to load file {0}\".format(filename))\n\t\t\t\tself.source = yahoo.LocalSource(filename)\n\t\t\texcept Exception, ex:\n\t\t\t\tself.log.critical(\"Cannot open file {0} as local database\".format(filename))\n\t\t\t\tself.log.error(ex.message)\n\t\t\t\n\t\t\tself.refresh_all()", "def get_stock_data():\n if not os.path.exists('./catalog/stock_data'):\n os.mkdir('./catalog/stock_data')\n \n inventory_data = {}\n inventory_file = './catalog/stock_data/inventory-bro.txt'\n \n download_data = True\n if os.path.exists(inventory_file):\n # Check that inventory file is no more than 1 day old\n filestat = os.stat(inventory_file)\n tm = datetime.datetime.fromtimestamp(filestat.st_mtime)\n today = datetime.datetime.now()\n dt = today - tm\n if dt.days < 1:\n download_data = False\n \n if download_data:\n # Get inventory data from ftp site\n from ftplib import FTP_TLS\n print 'Downloading inventory-bro.txt ....'\n ftps = FTP_TLS('ftp.appareldownload.com')\n ftps.login('Br0d3r', 'Br0d3r2oll')\n ftps.prot_p()\n #ftps.retrlines('LIST')\n ftps.retrbinary('RETR inventory-bro.txt', open(inventory_file, 'wb').write)\n ftps.quit()\n \n print \"Parse inventory-bro.txt ... \"\n first_row = None\n for row in csv.reader(open(inventory_file, 'rb')):\n itemRef = row[4].lower()\n if itemRef == 'style number':\n # save first row to be used as column header\n first_row = row\n continue\n \n source_attribs = [{'attribute_type': 'source', 'attribute_value': 'broderbros'}]\n \n inventory_data.setdefault(itemRef, [])\n \n color = row[8].lower()\n size = row[10].lower()\n \n # Warehouses starts at column 13\n for i in range(13, len(first_row)):\n wh_name = first_row[i]\n options = [\n {'option_type': 'color', 'option_value': color, 'attributes': []},\n {'option_type': 'size', 'option_value': size, 'attributes': []},\n {'option_type': 'warehouse', 'option_value': wh_name, 'attributes': source_attribs, 'shared': True},\n {'option_type': 'vendor', 'option_value': 'broderbros', 'attributes': source_attribs, 'shared': True},\n ]\n inventory_data[itemRef].append({'options': options, 'inventory': row[i]})\n \n # Pricing data\n pricing_tarfile = \"./catalog/stock_data/bro-AllStyles_R06.tar.gz\"\n download_data = True\n if os.path.exists(pricing_tarfile):\n # Check that file is no more than 1 day old\n filestat = os.stat(pricing_tarfile)\n tm = datetime.datetime.fromtimestamp(filestat.st_mtime)\n today = datetime.datetime.now()\n dt = today - tm\n if dt.days < 1:\n download_data = False\n \n if download_data:\n print 'Downloading items.csv for price data ....'\n br = utils.create_browser(1, 2)\n br.open(\"https://www.broderbros.com/cgi-bin/online/webbro/bro-index.w\")\n try:\n # Fill login form\n br.select_form(name = 'frmLogin')\n frm = br.form\n \n ctrl = frm.find_control('userName')\n ctrl.value = USERNAME\n ctrl = frm.find_control('password')\n ctrl.value = PASSWORD\n \n # Submit login form\n if TESTRUN: print 'Submit Login Form'\n \n br.select_form(name = 'frmLogin')\n br.submit()\n except:\n print \"Login form does not exist, please check URL, downloaded html or site is down\"\n return None\n try:\n tar_url = \"https://www.broderbros.com/cgi-bin/download/webshr/prod-info-view.w?f=bro-AllStyles_R06.tar.gz\"\n br.retrieve(tar_url, pricing_tarfile)\n except:\n print \"Error when downloading pricing file\"\n return None\n \n try:\n tar = tarfile.open(pricing_tarfile)\n for member in tar.getmembers():\n member.name = member.name.split('/')[-1] # strip directory from filename\n tar.extractall('catalog/stock_data/bro-AllStyles_R06')\n tar.close()\n except:\n print \"Error when extracting items.csv\"\n return None\n \n f_object = open('./catalog/stock_data/bro-AllStyles_R06/items_R06.csv', 'rb')\n #~ f_object = open('items_R06.csv', 'rb')\n \n print \"Parse items_R06.csv ... \"\n for row in csv.reader(f_object):\n itemRef = row[7].lower()\n if itemRef == 'style code':\n continue\n \n size = row[8].lower()\n color = row[11].lower()\n price = row[18]\n \n item_data = inventory_data.get(itemRef)\n if not item_data:\n continue\n # Find data with same size and color\n for var_dict in item_data:\n options = var_dict['options']\n opt_dict = {}\n for opt in options:\n opt_type = opt['option_type']\n opt_value = opt['option_value']\n if opt_type == 'size':\n opt_dict['size'] = opt_value\n elif opt_type == 'color':\n opt_dict['color'] = opt_value\n if opt_dict['size'] == size and opt_dict['color'] == color:\n var_dict['price'] = [{'price_type': 'retail_price', 'price': price}]\n \n f_object.close()\n \n try:\n shutil.rmtree(\"./catalog/stock_data/bro-AllStyles_R06\")\n #~ os.remove(\"./catalog/stock_data/bro-AllStyles_R06.tar.gz\")\n except:\n pass\n \n return inventory_data", "def read_database(db_path, db_file, *args):\n\n db_filepath = os.path.join(db_path, db_file)\n\n # list to store loaded data\n data_imported = []\n conn = sqlite3.connect(db_filepath)\n\n for data_name in args:\n\n\n info = f'Reading {data_name} from database................'\n print(info, end=\"\")\n data_name_in_db = conn.execute(\n f\"\"\"SELECT name FROM sqlite_master WHERE type='table' \n AND name='{data_name}'; \"\"\").fetchall()\n if data_name_in_db:\n df = pd.read_sql(f\"select * from {data_name}\", con=conn)\n substitute_names(df)\n # revert single column DataFrame to Series\n if 'index' in df.columns:\n df.set_index('index', inplace=True)\n df = df.squeeze('columns')\n data_imported.append(df)\n print('ok')\n else:\n data_imported.append(None)\n print('no data')\n conn.close()\n return data_imported #if len(data_imported)>1 else data_imported[0]", "def read_stock_list():\n print(\"Reading list of stocks.\")\n stocks = {}\n with open(STOCKS_FILE) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n stocks[row['Symbol']] = (row['Name'], row['Sector'])\n return stocks", "def load_stock_symbol_fr_file(self):\n stock_list = pandas.read_csv(self.retrieval_type_input_file_dict[self.stock_retrieval_type])\n stock_list = list(stock_list['SYMBOL'])\n self.set_full_stocklist_to_retrieve(stock_list)", "def read_locations(db, openfile):\n pass", "def readStockFromOG(self):\n # TODO If not in stock, do not display item\n self.og_stock = pd.read_csv('data/stock.csv')\n self.og_stock.to_csv('data/menu.csv', index = False)", "def read_database():\n file = tables.open_file(glob.datafile)\n table_d = file.root.VelibData.dynamic\n table_s = file.root.VelibData.static\n n_rows = len(table_d)\n print \"Nrows in dynamic table:\", n_rows\n print \"N stations:\", len(table_d[0][\"last_update\"])\n print \"Time of most recent sampling:\", \\\n time.asctime(time.localtime(recover_time(table_d[-1][\"sample_time\"])))\n print \"Nbikes available at most recent sampling:\", \\\n table_d[n_rows-1][\"available_bikes\"]\n print \"Time of last_update at most recent sampling:\", \\\n time.asctime(\n time.localtime(recover_time(table_d[n_rows-1][\"last_update\"][0])))\n print \"Number arr\", table_s[0][\"number\"]\n file.close()", "def get_com_data_fr_all_stocks(self):\n full_list = self.replace_special_characters_in_list(self.full_stocklist_to_retrieve)\n chunk_of_list = self.break_list_to_sub_list(self.full_stocklist_to_retrieve)\n \n self.temp_full_data_df = None\n for n in chunk_of_list:\n # print the progress\n sys.stdout.write('.')\n\n # set the small chunk of list\n self.set_target_stocks_list(n)\n self.get_com_data()\n\n # convert to dataframe\n self.com_data_allstock_df = pandas.DataFrame(self.com_data_allstock_list)\n self.com_data_allstock_df.rename(columns ={'symbol':'SYMBOL'}, inplace=True)\n \n print 'Done\\n'", "def read(self):\n self.connect()\n get_books = f\"select * from {self.book_table}\"\n try:\n self.cur.execute(get_books)\n self.con.commit()\n for i in self.cur:\n yield i\n except MySQLError as err:\n messagebox.showinfo(\"Failed to fetch files from database\")\n print(err)", "def load_stock(self):\n lines = []\n with Transaction().start(DBNAME, 1):\n stock_lines = self.Inventory.search([('state', '=', 'done'), ('location', '=', self.location.id)])\n if stock_lines:\n for i in stock_lines:\n batch = i.batch_number\n for j in i.lines:\n if j.quantity <= 0:\n continue\n dictionary = {}\n dictionary['code'] = j.product.code\n dictionary['item'] = j.product.template.name\n dictionary[\n 'category'] = j.product.template.category.name if j.product.template.category else None\n dictionary['quantity'] = Decimal(j.quantity).quantize(Decimal('0.11')).to_eng()\n dictionary['batch_number'] = batch\n dictionary['supplier'] = j.supplier.name if j.supplier else None\n dictionary['expiry_date'] = j.expiry_date.strftime('%d-%m-%Y') if j.expiry_date else None\n lines.append(dictionary)\n return lines", "def open_file(stock, name, setup=False):\n if not isinstance(stock, str):\n raise TypeError(\"Parameter 'stock' should be a string, not a \"\n + type(stock).__name__)\n if setup is True: # when setup, name is \"AAPL_income.csv\", not \"income\"\n # path = _os.path.join(datapath(setup=False), stock, name)\n path = datapath(True, stock, name)\n df = _pd.read_csv(path)\n _gc.collect()\n return df\n # not setup, normal open_file\n names = ['major_holders', 'top_institutional_holders', 'top_mutual_fund_holders',\n 'Trading_Information', 'Financial_Highlights', 'Valuation_Measures',\n 'Executives', 'Description',\n 'Earnings_Estimate', 'Revenue_Estimate', 'Earnings_History',\n 'EPS_Trend', 'EPS_Revisions', 'Growth_Estimates',\n 'stats', 'statements', 'reports',\n 'Executives', 'Description', 'analysis', 'Summary',\n 'balance', 'cash_flow', 'income']\n if name not in names:\n try:\n name = _path(name) # when client mistakenly input factor instead of sheet name\n except ValueError:\n raise ValueError(\n 'Parameter \"name\" should be the name of the financial sheets, not a factor name...Use path method to '\n 'find the location of a factor')\n path = datapath(True, stock, stock)\n try:\n df = _pd.read_csv(path + '_' + name + '.csv')\n _gc.collect()\n except FileNotFoundError:\n _gc.collect()\n if _os.path.exists(datapath(True, stock)):\n raise ValueError(\"There is no sheet - {} - for company {}. Use main_get to retrieve the sheet\".format\n (name, stock))\n else:\n raise ValueError(\"There is no record of '\" + stock + \"' in database\")\n return df", "def read_opsim_db(opsim_db_path='/global/projecta/projectdirs/lsst/groups/SSim/DC2/minion_1016_desc_dithered_v4.db'):\n conn = sqlite3.connect(opsim_db_path)\n # Check the table names\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n table_names = cursor.fetchall()\n print(table_names)\n # Turn table into Pandas df\n obs_history = pd.read_sql(sql=\"SELECT * from ObsHistory\", con=conn, index_col=None)\n field = pd.read_sql(sql=\"SELECT * from Field\", con=conn, index_col=None)\n return obs_history, field", "def read_relations(db, openfile):\n pass", "def load_file():\n global list_of_table, data_base, new_data\n open_name = askopenfilename()\n\n if Path(open_name).suffix == '.db':\n data_base = open_name\n data_base = str(data_base)\n new_data_base = parse(data_base)\n new_data = update_list_tables(new_data_base)\n new_data.clear()\n\n else:\n mistake_db_file()", "def __init__(self):\n # Param\n ## self.target_stocks use mainly for a few stocks.\n ## it also use when setting the 45 or 50 stocks at a time to url\n self.target_stocks = ['S58.SI','S68.SI'] ##special character need to be converted\n self.full_stocklist_to_retrieve = [] #full range fo stocks\n \n # for difffernt retrieval, based on the dict available to select the file type\n # currently have \"watcher\", \"all\" where watcher is the selected stocks to watch.\n self.stock_retrieval_type = 'watcher' \n\n ## current data .csv file url formation\n #header to match the sequence of the formed url\n self.cur_quotes_parm_headers = ['NAME', 'SYMBOL', 'LATEST_PRICE', 'OPEN', 'CLOSE','VOL',\n 'YEAR_HIGH','YEAR_LOW'] #label to be use when downloading.\n \n # URL forming for price details\n self.cur_quotes_start_url = \"http://download.finance.yahoo.com/d/quotes.csv?s=\"\n self.cur_quotes_stock_portion_url = ''\n self.cur_quotes_stock_portion_additional_url = '.SI'# for adding additonal str to the stock url.\n self.cur_quotes_property_portion_url = ''\n self.cur_quotes_property_str = 'nsl1opvkj' #default list of properties to copy.\n self.cur_quotes_end_url = \"&e=.csv\"\n self.cur_quotes_full_url = ''\n\n # Properties from excel\n self.enable_form_properties_fr_exceltable = 1\n self.properties_excel_table = r'C:\\pythonuserfiles\\yahoo_finance_data_extract\\Individual_stock_query_property.xls'\n\n # Output storage\n self.cur_quotes_csvfile = r'c:\\data\\temp\\stock_data.csv'\n self.cur_quotes_df = object()\n\n ## !!!\n self.cur_quotes_url_list = [] # store of all the url list being query. For debug.\n\n # for debug/printing\n self.store_individual_set_df = []\n self.__print_url = 0 # for printing the url string\n\n # input file path\n # dict based on the file for different type of retrieval\n self.retrieval_type_input_file_dict = {\n \"all\" : r'C:\\pythonuserfiles\\yahoo_finance_data_extract\\stocklist.csv',\n \"watcher\": r'c:\\data\\google_stock_screener.csv'\n }", "def retrieve_company_data(self):\n self.set_stock_sym_append_str('')\n self.set_stock_retrieval_type('all') #'all', watcher\n self.load_stock_symbol_fr_file()", "def _open_data_source(self, *args):\n if len(args) != 0:\n # For first call to open (open())\n self.ds_filename = args[0]\n self.ds_tablename = args[1]\n self.ds_file = load_workbook(filename = args[0], use_iterators = True)\n self.ds_table = self.ds_file.get_sheet_by_name(name = args[1])\n else:\n # For reopening the file (reset())\n self.ds_file = load_workbook(filename = self.ds_filename, use_iterators = True)\n self.ds_table = self.ds_file.get_sheet_by_name(name = self.ds_tablename)\n # In any case we need a reader object to iterate over the table content \n self.ds_reader = self.ds_table.iter_rows()", "def download_stocks(stocklist=STOCKLIST, fresh=False):\n # load stocklist\n with open(stocklist) as f:\n stocks = f.read().strip('\\n').split('\\n')\n\n dfs = {}\n for s in stocks:\n print(s)\n stockfile = '../stockdata/' + s + '.csv.gz'\n if fresh or not os.path.exists(stockfile):\n print('downloading fresh')\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n continue\n\n else:\n stock = pd.read_csv(stockfile, index_col=0)\n stock.index = pd.to_datetime(stock.index)\n timedelta_step = 1\n if HOUR > 2 and WEEKDAY not in [5, 6]: # for mtn time\n timedelta_step = 0\n elif WEEKDAY == 0: # it's monday\n timedelta_step = 3 # can be up to last friday\n elif WEEKDAY in [5, 6]: # if a weekend, last data is from friday\n timedelta_step = WEEKDAY - 4\n print('date gap:', TODAY.date() - stock.iloc[-2:].index[-1].date())\n print('step, timedelta:', timedelta_step, datetime.timedelta(timedelta_step))\n if (TODAY.date() - stock.iloc[-2:].index[-1].date()) <= datetime.timedelta(timedelta_step):\n dfs[s] = stock\n print('latest date close enough to up-to-date:')\n print(stock.iloc[-2:].index[-1].date())\n print('not downloading')\n print('')\n continue\n else:\n print('latest date is')\n print(stock.iloc[-2:].index[-1].date())\n print('downloading fresh')\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n\n return dfs", "def _process_stocks(self, limit):\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n line_counter = 0\n\n raw = '/'.join((self.rawdir, 'stock'))\n logger.info(\"building labels for stocks\")\n\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n line_counter += 1\n\n (stock_id, dbxref_id, organism_id, name, uniquename,\n description, type_id, is_obsolete) = line\n# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670\n\n stock_num = stock_id\n stock_id = 'FlyBase:'+uniquename\n self.idhash['stock'][stock_num] = stock_id\n stock_label = description\n\n organism_key = organism_id\n taxon = self.idhash['organism'][organism_key]\n\n # from what i can tell, the dbxrefs are just more FBst,\n # so no added information vs uniquename\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if self.testMode \\\n and int(stock_num) not in self.test_keys['strain']:\n continue\n\n # tax_label = self.label_hash[taxon] # unused\n # add the tax in case it hasn't been already\n model.addClassToGraph(taxon)\n model.addIndividualToGraph(stock_id, stock_label, taxon)\n if is_obsolete == 't':\n model.addDeprecatedIndividual(stock_id)\n\n return", "def openDB(self, dbpath, updateOnIdle=True):\n\t\tself.openDBFile( last_file_in_directory(dbpath, \"*sqlite\"), updateOnIdle )", "def fetch_all_stocks(config):\n start_date_dt = datetime.datetime.strptime(config['start_date'], '%Y%m%d').date()\n end_date_dt = datetime.datetime.strptime(config['end_date'], '%Y%m%d').date()\n\n with open(DATA_DIR / 'nasdaq_screener_1619356287441.csv', 'r') as fp:\n reader = csv.reader(fp)\n next(reader) # skip header\n symbols = [row[0] for row in reader]\n\n dir_path = DATA_DIR / 'nasdaq_historical' / '{}_{}'.format(config['start_date'], config['end_date'])\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n for symbol in symbols:\n filename = '{}.csv'.format(symbol)\n full_path = dir_path / filename\n if not os.path.exists(full_path):\n try:\n df = web.get_data_yahoo(symbol, config['start_date'], config['end_date'])\n except:\n # print(traceback.format_exc())\n print('Could not load ' + symbol)\n else:\n if df.index[0].date() == start_date_dt and df.index[-1].date() == end_date_dt:\n print('Loaded: ' + symbol)\n df.to_csv(full_path)", "def query_stock_prices(db_path, ticker, price, filter_zero_volume):\n\n try:\n db_connection = sqlite3.connect(db_path)\n except sqlite3.Error as e:\n print(e)\n return False\n\n if type(ticker) == str:\n if ticker.lower() == 'all_available': # if is all_available, transform ticker into a list using the function\n ticker = get_all_available_stock_table(db_path)\n else: # if not, will consider the string is a stock ticker\n if filter_zero_volume == True:\n main_df = pd.read_sql_query(\"SELECT formatted_date,%s FROM %s WHERE volume>0\" % (price,ticker), db_connection)\n else:\n main_df = pd.read_sql_query(\"SELECT formatted_date,%s FROM %s\" % (price, ticker), db_connection)\n main_df['formatted_date'] = pd.to_datetime(main_df['formatted_date'])\n main_df.set_index('formatted_date',drop=True,inplace=True)\n main_df.columns = [ticker]\n\n else:\n lst_ = []\n for t_ in ticker:\n if filter_zero_volume == True:\n df_ = pd.read_sql_query(\"SELECT formatted_date,%s FROM %s WHERE volume>0\" % (price, t_), db_connection)\n else:\n df_ = pd.read_sql_query(\"SELECT formatted_date,%s FROM %s\" % (price, t_), db_connection)\n\n df_['formatted_date'] = pd.to_datetime(df_['formatted_date'])\n df_.drop_duplicates(subset='formatted_date', inplace=True)\n df_.set_index('formatted_date', drop=True, inplace=True)\n df_.columns = [t_]\n lst_.append(df_) # storing the DataFrame into a list\n\n main_df = pd.concat(lst_, axis=1, sort=False) # concatenating the list into one main DataFrame\n\n main_df.sort_index(inplace=True)\n main_df = main_df[(main_df.index.year >= 2000)] # filtra ano < 2000\n\n db_connection.close()\n\n return main_df", "def read():\n with open(DBNAME) as f:\n foo = pickle.loads(f.read())\n print foo", "def db_processing(database):\n print(\"---Processing database---\")\n connection = sqlite3.connect(database)\n vocabulary = _get_vocabulary(connection)\n genreDict = _get_unique_genres(connection)\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM shared_lyrics;\")\n lyrics = cursor.fetchall()\n songs = {}\n print(\"---Reading lyrics and genres---\")\n num_songs = 0\n for lyric in lyrics:\n if lyric[0] not in songs:\n if num_songs != 0 and num_songs % 1000 == 0:\n print(\"{} songs processed.\".format(num_songs))\n num_songs += 1\n songs[lyric[0]] = {}\n songs[lyric[0]]['lyrics'] = {'words':[], 'count':[]}\n sql_command = \"SELECT * FROM shared_genres WHERE track_id='{}';\"\\\n .format(lyric[0])\n cursor.execute(sql_command)\n labels = cursor.fetchone()\n songs[lyric[0]]['genre'] = genreDict[labels[1]]\n songs[lyric[0]]['is_test'] = labels[2]\n songs[lyric[0]]['lyrics']['words'].append(vocabulary[lyric[1]])\n songs[lyric[0]]['lyrics']['count'].append(lyric[2])\n df, idf = _get_idf(songs)\n write_new_database(songs)\n write_list('data/vocabulary.txt', vocabulary, idf)\n write_list('data/genresList.txt', genreDict)", "def getStockData():\n pass", "def read_database(app):\n app.status.cursorToHourglass()\n app.central.closeAllSubWindows()\n app.database().scan()\n app.status.cursorToNormal() \n app.refresh()", "def openDB(self, dbpath, FskHz):\n\t\tself.openDBFile( last_file_in_directory(dbpath, \"*sqlite\"), FskHz)", "def set_full_stocklist_to_retrieve(self, list_of_stocks):\n self.full_stocklist_to_retrieve = list_of_stocks", "def get_stocks_from_file(self, limit=0):\n\n file_rows = []\n with open(self.file_name) as f:\n reader = csv.reader(f)\n for row in reader:\n file_rows.append(row)\n\n for stock_info in file_rows:\n ticker, name, industry, cap, exchange = stock_info[:5]\n\n if cap[-1] == 'B' or not self.only_billions:\n stock_data = StockData(ticker, name, industry, exchange)\n self.stock_list.append(stock_data)\n\n # Handle limiting\n if limit > 0:\n if len(self.stock_list) >= limit:\n break\n\n print(\"TOTAL SIZE:\", len(self.stock_list))", "def get_inform_from_db(database_file_name: str) -> list:\n global data\n con = sqlite3.connect(database_file_name)\n cur = con.cursor()\n master = 'sqlite_master'\n query = \"SELECT name FROM \" + master + \" WHERE type = 'table'\"\n cur.execute(query)\n data = cur.fetchall()\n return data", "def getAllStocks():\n return pandas.read_excel('D:\\The Fastlane Project\\Coding Projects\\Stock Analysis\\stocks\\stock_data_2.xlsx')", "def loadDatabase ():\n database = []\n # Open a file\n path = \"lyd/\"\n dirs = os.listdir( path )\n \n # This prints all of the files and directories\n for file in dirs:\n if file == \".DS_Store\": #Mac file\n continue\n songdict = {}\n print (file)\n Zxx = STFTsignal.getSTFTofFile(path + file) #STFT of the file\n #mean, eigen and weights are stored in dictionary songdict\n songdict[\"mean\"], songdict[\"eigen\"], songdict[\"weights\"] = PCA(Zxx)\n songdict[\"name\"] = file\n database.append (songdict) \n return database", "def open_file():\r\n\tr_ct = 0\r\n\t\r\n\twith open('flavors_of_cacao.csv', 'r') as csvfile:\r\n\t\tcacao_stream = csv.DictReader(csvfile)\r\n\t\tfor cacao_row in cacao_stream:\r\n\t\t\tr_ct += 1\r\n\t\t\t\r\n\t\t\t#quit after 100 records\r\n\t\t\tif r_ct > 100:\r\n\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\t\t#pull the data out of the dictionary for sqlite3\r\n\t\t\tt_Company = cacao_row['Company']\r\n\t\t\tt_Specific_Bean_Origin = cacao_row['Specific_Bean_Origin']\r\n\t\t\tt_REF = cacao_row['REF']\r\n\t\t\tt_Review = cacao_row['Review']\r\n\t\t\tt_Cocoa = cacao_row['Cocoa']\r\n\t\t\tt_Location = cacao_row['Location']\r\n\t\t\tt_Rating = cacao_row['Rating']\r\n\t\t\tt_Bean = cacao_row['Bean']\r\n\t\t\tt_Broad_Bean_Origin = cacao_row['Broad_Bean_Origin']\r\n\t\t\t\r\n\t\t\t#print the first 15 lines\r\n\t\t\tif r_ct <= 15:\r\n\t\t\t\tprint (r_ct, t_Company, t_Bean, t_Cocoa, t_Review)\r\n\t\t\t\t\r\n\t\t\t#creates a sql cursor, formats the insert sql and executes it\r\n\t\t\tc = conn.cursor()\r\n\t\t\tstrsql = \"\"\"\r\n\t\t\t\tINSERT INTO cacao\r\n\t\t\t\t\t(Company, Specific_Bean_Origin, REF, Review, Cocoa, Location, Rating, Bean, Broad_Bean_Origin)\r\n\t\t\t\tvalues (\r\n\t\t\t\t\t'{t_Company}', '{t_Specific_Bean_Origin}', '{t_REF}', '{t_Review}', '{t_Cocoa}', '{t_Location}', '{t_Rating}', '{t_Bean}', '{t_Broad_Bean_Origin}');\r\n\t\t\t\t\"\"\".format(\r\n\t\t\t\t\tt_Company = t_Company,\r\n\t\t\t\t\tt_Specific_Bean_Origin = t_Specific_Bean_Origin,\r\n\t\t\t\t\tt_REF = t_REF,\r\n\t\t\t\t\tt_Review = t_Review,\r\n\t\t\t\t\tt_Cocoa = t_Cocoa,\r\n\t\t\t\t\tt_Location = t_Location,\r\n\t\t\t\t\tt_Rating = t_Rating,\r\n\t\t\t\t\tt_Bean = t_Bean,\r\n\t\t\t\t\tt_Broad_Bean_Origin = t_Broad_Bean_Origin\r\n\t\t\t\t\t)\r\n\t\t\tc.execute(strsql)\r\n\t\t\tconn.commit()", "def load_expenditures():\n\n Expenditure.query.delete()\n\n with open(expenditure_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n expenditure_data = row.split(\",\")\n print(expenditure_data)\n\n id = expenditure_data[0]\n category_id = expenditure_data[1]\n price = expenditure_data[2]\n date_of_expenditure = expenditure_data[3]\n expenditure_userid = expenditure_data[4]\n where_bought = expenditure_data[5]\n description = expenditure_data[6]\n\n expenditure = Expenditure(\n id = id,\n category_id = category_id,\n price = price,\n date_of_expenditure = get_datetime(date_of_expenditure),\n expenditure_userid = expenditure_userid,\n where_bought = where_bought,\n description = description\n )\n\n db.session.add(expenditure)\n\n db.session.commit()", "def read(self, database ='project'):\n\t\tfile = open(self.file_name, \"r\")\n\n\t\ti = 1\n\t\tseptics = []\n\t\tfor line in file:\n\t\t\tif i > 2:\n\t\t\t\tval = line.split()\n\t\t\t\tself.check_cols(val, 13, 'septic')\n\n\t\t\t\tsep = {\n\t\t\t\t\t'name': val[0].lower(),\n\t\t\t\t\t'q_rate': val[1],\n\t\t\t\t\t'bod': val[2],\n\t\t\t\t\t'tss': val[3],\n\t\t\t\t\t'nh4_n': val[4],\n\t\t\t\t\t'no3_n': val[5],\n\t\t\t\t\t'no2_n': val[6],\n\t\t\t\t\t'org_n': val[7],\n\t\t\t\t\t'min_p': val[8],\n\t\t\t\t\t'org_p': val[9],\n\t\t\t\t\t'fcoli': val[10],\n\t\t\t\t\t'description': val[12] if val[12] != 'null' else None # 12 index because extra column\n\t\t\t\t}\n\t\t\t\tseptics.append(sep)\n\t\t\ti += 1\n\n\t\tif database == 'project':\n\t\t\tdb_lib.bulk_insert(project_base.db, project_parmdb.Septic_sep, septics)\n\t\telse:\n\t\t\tdb_lib.bulk_insert(datasets_base.db, datasets_parmdb.Septic_sep, septics)", "def get_basics():\n today = _dt.date.today().isoformat()\n log.info('Start to collect HS_basics_all stocks...')\n log.info('Collecting the main stock basics from tushare for %s' % today)\n hs = __get_hs_daily(today)\n \n log.info('Collecting SH50 and prepare to merge into the main frame...')\n sh50 = __get_sh50()\n log.info('Collection HS300 and prepare to merge into the main frame...')\n hs300 = __get_hs300()\n log.info('Collection ZZ500 and prepare to merge into the main frame...') \n zz500 = __get_zz500()\n\n # Add 上证50 column\n log.trace('Adding SH50 to main frame as \\'sh50\\'')\n hs['sh50'] = np.where(hs['code'].isin(sh50['code']), True, False)\n # Merge from hs300 and change column name to 'hs300'\n log.trace('Adding HS300 to main frame as \\'hs300\\'') \n hs = hs.merge(hs300, on='code', how='left')\n hs.fillna({'hs300':0}, axis=0, inplace=True)\n # Add 中证500 column\n log.trace('Adding ZZ500 to main frame as \\'zz500\\'')\n hs['zz500'] = np.where(hs['code'].isin(zz500['code']), True, False)\n # Filte 中小板\n log.trace('Adding SME to main frame as \\'sme\\'')\n __filter_sme(hs)\n # Filte 创业板\n log.trace('Adding GEM to main frame as \\'gem\\'')\n __filter_gem(hs)\n # Filte ST\n log.trace('Filting ST and tag to main frame as \\'st\\'')\n __filter_st(hs)\n # Fill 概念分类\n log.trace('Adding concept classfied \\'concept\\'')\n hs = __concept_classified(hs)\n\n # Reset index, prepare to write to database\n log.info('Reindexing the DataFrame and rename to match the column name in db...')\n hs = hs.reindex(['code','name','industry','concept','sme','gem','st','hs300','sh50','zz500','pe','outstanding','totals','totalAssets','liquidAssets','timeToMarket','pb','rev','profit','holders'], axis=1)\n hs.rename({'totalAssets': 'total_assets', 'liquidAssets': 'liquid_assets', 'timeToMarket': 'time_to_market'}, axis='columns', inplace=True)\n\n log.info('Successfully collect stock basics, got [%s] records.' % len(hs))\n \n return hs", "def _read_dataset(self):\n import pandas as pd\n\n freesolv_path = get_data_file_path(FREESOLV_PATH)\n\n freesolv_db = pd.read_csv(freesolv_path, delimiter=';',\n skipinitialspace=True,\n skiprows=[0, 1, 2], header=0,\n names=['compound id', 'SMILES',\n 'iupac name',\n 'experimental value',\n 'experimental uncertainty',\n 'calculated value (GAFF)',\n 'calculated uncertainty',\n 'experimental reference',\n 'calculated reference',\n 'notes'])\n\n compound_ids = freesolv_db['compound id'].to_list()\n smiles_tags = freesolv_db['SMILES'].to_list()\n experimental_v = freesolv_db['experimental value'].to_list()\n return compound_ids, smiles_tags, experimental_v", "def retrieve_po_and_warehouse_lists(po_file_name):\n print \"Parsing the specified PO and Warehouse orders and items lists from file \\\"\" + po_file_name + \"\\\"...\"\n book = open_workbook(po_file_name)\n sheet = book.sheet_by_index(0)\n\n # section denotes the section of the spreasheet currently being parsed\n # it begins with START, moves to the purchase orders, then ends with\n # the warehouse section\n section = \"START\"\n\n # Generic spreadsheet details\n project_number = \"\"\n project_name = \"\"\n store_number = \"\"\n purchase_order_count = 0\n warehouse_order_count = 0\n\n # Column name static variables\n VENDOR_NAME_COL = 0\n PURCHASE_ORDER_NUMBER_COL = 1\n\n purchase_order_tuple_list = []\n warehouse_order_tuple_list = []\n\n # Variables to hold current purchase order data\n current_po_id = 0\n current_vendor = \"\"\n current_target_del_date = \"\" \n\n # Variables to hold current warehouse data\n current_warehouse_id = 0 \n current_warehouse_WHO = \"\"\n \n # iterate through each row of the spreadsheet\n for row_index in range(sheet.nrows):\n \n state = state_change(sheet.row_slice(row_index,0)[0].value)\n if \"\" != state:\n section = state\n \n elif (not sheet.row_slice(row_index,0)[0].value is empty_cell.value):\n if \"START\" == section:\n project_number = sheet.row_slice(row_index,0)[2].value\n project_name = sheet.row_slice(row_index,0)[4].value\n store_number = sheet.row_slice(row_index,0)[6].value\n \n elif \"PURCHASE_ORDERS\" == section:\n # if the row constitutes a new PO\n if sheet.row_slice(row_index,0)[3].value is empty_cell.value:\n # if purchase order list is empty\n if purchase_order_tuple_list:\n purchase_order_count += 1\n current_po_id = sheet.row_slice(row_index,0)[PURCHASE_ORDER_NUMBER_COL].value\n current_vendor = sheet.row_slice(row_index,0)[0].value\n current_target_del_date = sheet.row_slice(row_index,0)[2].value \n # else the row is the seperate items for a PO\n else:\n purchase_order_tuple_list.append((sheet.row_slice(row_index,0)[0].value,sheet.row_slice(row_index,0)[1].value,sheet.row_slice(row_index,0)[2],sheet.row_slice(row_index,0)[3].value,sheet.row_slice(row_index,0)[4].value,sheet.row_slice(row_index,0)[5],current_po_id,current_vendor,current_target_del_date))\n \n elif \"WAREHOUSE_ORDERS\" == section:\n # if the row constitutes a new warehouse\n if sheet.row_slice(row_index,0)[1].value is empty_cell.value:\n current_warehouse_id = sheet.row_slice(row_index,0)[0].value\n current_warehouse_WHO = sheet.row_slice(row_index,0)[1].value\n if warehouse_order_tuple_list:\n warehouse_order_count += 1\n else: \n warehouse_order_tuple_list.append((current_warehouse_WHO,sheet.row_slice(row_index,0)[0].value,sheet.row_slice(row_index,0)[1].value,sheet.row_slice(row_index,0)[2],sheet.row_slice(row_index,0)[3].value,sheet.row_slice(row_index,0)[4].value,sheet.row_slice(row_index,0)[5],sheet.row_slice(row_index,0)[6],current_warehouse_id))\n\n # Uncomment the following two lines to print out the stored PO and warehouse data\n # po_print(purchase_order_tuple_list)\n # warehouse_print(warehouse_order_tuple_list)\n\n print \"Completed for Project [\" + project_name + \"], [# \" + str(project_number) + \"], Store #\" + str(store_number)\n print \"Purchase order count: \" + str(purchase_order_count)\n print \"Warehouse order count: \" + str(warehouse_order_count)\n print \"*******************************\"\n print\n\n return purchase_order_tuple_list, warehouse_order_tuple_list", "def open(self):\n\n self._key_generator = KeyGenerator()\n\n # A map from LOD to LODHistory instance for all LODs that have\n # been referenced so far:\n self._lod_histories = {}\n\n # This corresponds to the 'nodes' table in a Subversion fs. (We\n # don't need a 'representations' or 'strings' table because we\n # only track file existence, not file contents.)\n self._node_db = _NodeDatabase()\n\n # Start at revision 0 without a root node.\n self._youngest = 0", "def open_general(file, setup=False):\n try:\n if setup is False:\n p = datapath(True, 'general', file)\n df = _pd.read_csv(p + '.csv')\n elif setup is True:\n p = datapath(True, 'general', file)\n df = _pd.read_csv(p + '.py')\n else:\n df = None # not tested here\n return df\n except FileNotFoundError as e:\n print(\"There is no record of {} in your database. Go to your chosen setup path to check, if not there go to \"\n \"Github and download the missing sheet\".format(file))\n return None", "def open_project(uid, project_to_open, DB_NAME='cloud_storage.db', DB_DIRECTORY='server_side_storage/'):\n if any('drop tables' in var for var in [DB_NAME, DB_DIRECTORY, uid, str(project_to_open)]):\n raise DropTablesError(\"Drop Tables command detected in input commands - Print Error Message\")\n db = sqlite3.connect('{}{}'.format(DB_DIRECTORY, DB_NAME))\n cursor = db.cursor()\n cursor.execute(\"SELECT user_table_name FROM user_ids WHERE uid=?\", (uid,))\n table_name = cursor.fetchall()\n table_name = table_name[0][0]\n variable_table_command = \"SELECT * FROM {} WHERE row_id={}\".format(table_name, project_to_open)\n cursor.execute(variable_table_command)\n project_data = cursor.fetchall()\n project_data = list(itertools.chain(*project_data))\n db.commit()\n cursor.close()\n db.close()\n return project_data", "def get_all_available_stock_table(db_path):\n\n try:\n db_connection = sqlite3.connect(db_path)\n except sqlite3.Error as e:\n print(e)\n return False\n\n cursor = db_connection.cursor()\n\n available_tables_1 = cursor.execute(\"SELECT name FROM sqlite_master WHERE type ='table'\").fetchall()\n available_tables = [tab[0] for tab in available_tables_1 if tab[0] != 'Consumption_Index']\n available_tables.sort()\n db_connection.close()\n\n return available_tables", "def setupStockTable(self):\n # Get the date\n # NOTE: This is probably un\n date = datetime.date()\n dateStr = date.month() + \"/\" + date.day() + \"/\" + date.year()\n\n stocks = (\"INTC\", \"AAPL\", \"GOOG\", \"YHOO\", \"SYK\", \"VZ\")\n\n for stock in stocks:\n stockObj = self.securityFactory(stock)\n stockObj.queryAPI()\n\n self.stockDB.query(\"INSERT INTO basic_info (ticker, price, daily_change, company, year_high, year_low, \\\n daily_percent, date, streak) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\", (stockObj.target, stockObj.curr, \\\n stockObj.daily_change, stockObj.company,\\\n stockObj.year_high, stockObj.year_low,\\\n stockObj.daily_percent, dateStr, 0))", "def download_all():\r\n f = open('stock_symbols.txt', 'r')\r\n fout = open('../data/stocks_read.txt', 'w')\r\n count_max = 500\r\n count = 0\r\n for stock_symbol in f:\r\n stock_symbol = stock_symbol.strip()\r\n try:\r\n stock_download(stock_symbol)\r\n fout.write(stock_symbol + '\\n')\r\n except:\r\n print(\"was not able to read file \", stock_symbol)\r\n count = count + 1\r\n if count >= count_max:\r\n break\r\n f.close()\r\n fout.close", "def val_db(dbfile):\n\n conn = sqlite3.connect(dbfile)\n c = conn.cursor()\n c.execute('SELECT * FROM bringatrailer ORDER BY id')\n for elem in c.fetchall():\n print(elem)\n conn.close()", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def put_in_db(dbase,defined_symbols,createDB):\n\t\n\tmysql = DB.connect(host=\"127.0.0.1\",user=myutils.mysqluser,passwd=myutils.mysqlpwd) \n\tif createDB:\n\t createDataBase(mysql,dbase)\n\ttry:\n\t mysql.select_db(dbase)\n\texcept : \n\t print 'DataBase '+dbase+ ' does not exist'\n\t return\n\treferenceKeys = sorted(defined_symbols.iterkeys())\n\n\tcr = mysql.cursor()\n\ttry:\n\t\tif createDB:\n\t\t\tfor i in referenceKeys:\n\t\t\t\tdict_of_files = defined_symbols[i]\t\t\t\t\t\t# dict of files:list of tuples\n\t\t\t\tif len(dict_of_files) > 0:\n\t\t\t\t\tlist_of_files = sorted(dict_of_files.iterkeys()) \t# list of tuples (file , list of tuples)\n\t\t\t\t\tsql1 = '\"%s\"' % (i)\n\t\t\t\t\tsql = \"INSERT INTO names (name) VALUES (\"+sql1+\");\"\n\t\t\t\t\tcr.execute(sql)\n\t\t\t\t\tmysql.commit()\n\t\t\t\t\tlast_name = cr.lastrowid\n\t\t\t\t\tinsertValues(defined_symbols,mysql,list_of_files,last_name,i,False,0)\n\t\t\tallfilesd = buildallfiles(defined_symbols)\n\t\t\tcrs = mysql.cursor()\n\t\t\tfor j in allfilesd.keys():\n\t\t\t\tfn = j.replace(os.sep,'=')\n\t\t\t\tsql1 = '\"%s\"' % (fn)\n\t\t\t\tsql = \"INSERT INTO allfiles (file) VALUES (\"+sql1+\");\"\n\t\t\t\tcr.execute(sql)\n\t\t\t\tmysql.commit()\n\t\t\t\tlast_file = cr.lastrowid\n\t\t\t\tfor n in allfilesd[j]:\n\t\t\t\t\tsql1 = '\"%d\", \"%s\"' % (last_file,n)\n\t\t\t\t\tsql = 'INSERT INTO allnames (file,name) VALUES ('+sql1+');'\n\t\t\t\t\tcrs.execute(sql)\n\t\t\tmysql.commit()\n\t\t\tcrs.close()\n\t\telse:\n\t\t\tfor i in referenceKeys:\n\t\t\t\tsql2 = ''\n\t\t\t\tdict_of_files = defined_symbols[i]\t\t\t\t\t# dict of files:list of tuples\n\t\t\t\tlist_of_files = sorted(dict_of_files.iterkeys()) \t# list of tuples (file , list of tuples)\n\t\t\t\tsql1= '\"%s\"' % (i)\n\t\t\t\tsql = \"SELECT * FROM names WHERE name=\"+ sql1+\";\"\n\t\t\t\tcr.execute(sql)\n\t\t\t\tmysql.commit()\n\t\t\t\trow = cr.fetchone()\n\t\t\t\tif row != None:\n\t\t\t\t\tsql2 = '\"%d\"' % (row[0])\n\t\t\t\t\tsql = 'SELECT * FROM files WHERE name='+ sql2+\";\"\n\t\t\t\t\tcr.execute(sql)\n\t\t\t\t\trowd = cr.fetchall()\n\t\t\t\t\tinsertValues(defined_symbols,mysql,list_of_files,row[0],i,True,rowd)\n\t\t\t\telse:\n\t\t\t\t\tsql = \"INSERT INTO names (name) VALUES (\"+sql1+\");\"\n\t\t\t\t\tcr.execute(sql)\n\t\t\t\t\tmysql.commit()\n\t\t\t\t\tlast_name = cr.lastrowid\n\t\t\t\t\tinsertValues(defined_symbols,mysql,list_of_files,last_name,i,False,0)\n\n\texcept mysql.Error as err:\n\t\tprint 'ERROR DataBase '+str(err)\n\t\tmysql.rollback() \n\t\treturn\n\n\tcr.close()\n\tmysql.close()", "def read_file(file_name, table):\r\n table.clear() # this clears existing data and allows to load data from file\r\n loadctr=0\r\n try:\r\n with open(file_name, 'rb') as objFile:\r\n data=pickle.load(objFile) #dump my 2D list into data\r\n while loadctr < len(data):\r\n table.append(data[loadctr]) #append my list element (which is a dictionary) into table\r\n loadctr+=1 #count number of rows loaded into memory\r\n print ('{} CD(s) loaded into inventory.\\n'.format(loadctr))\r\n except FileNotFoundError as e:\r\n print('Unable to load inventory from ' + file_name + '.') #exception handling for file not existing\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print()\r\n except EOFError as e:\r\n print(file_name + ' is empty.') #exception handling for empty file\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print()\r\n except pickle.UnpicklingError as e:\r\n print(file_name + ' is corrupted.') #exception handling for unpickling error\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print()", "def read_database(self):\n # open the database\n f = open('KISS_LINES','r')\n # make a list which will contain lines\n tlc = []\n for row in f:\n tlc.append(f.readline())\n f.close()\n\n return tlc", "def get_data(db_dir, command, args = None):\n with lite.connect((db_dir)) as conn:\n try:\n cursor = conn.cursor()\n if args:\n cursor.execute(command,args)\n else:\n cursor.execute(command)\n data = cursor.fetchall()\n #print '[sql management] got all of the data requested according to:\\n--- %s ---\\n the data: %s'%(command, data)\n return data\n except:\n return None", "def stored_stocks():\n return [filename_to_stockname(file.split(\".\")[0]) for file in listdir(file_prefix)]", "def get_cur_quotes_fr_list(self):\n\n ## full list with special characters take care\n full_list = self.replace_special_characters_in_list(self.full_stocklist_to_retrieve)\n chunk_of_list = self.break_list_to_sub_list(self.full_stocklist_to_retrieve)\n self.temp_full_data_df = None\n for n in chunk_of_list:\n # print the progress\n sys.stdout.write('.')\n\n # set the small chunk of list\n self.set_target_stocks_list(n)\n self.get_cur_quotes()\n \n ## need take care of cases where the return is empty -- will return Missing symbols list\n if not len(self.cur_quotes_df.columns) < len(self.cur_quotes_parm_headers):\n self.store_individual_set_df.append(self.cur_quotes_df)\n if self.temp_full_data_df is None:\n self.temp_full_data_df = self.cur_quotes_df\n else:\n self.temp_full_data_df = self.temp_full_data_df.append(self.cur_quotes_df)\n\n ## Remove the % symbol fr self.temp_full_data_df columns\n self.rm_percent_symbol_fr_cols()\n\n print 'Done\\n'", "def read_db():\n f_result = []\n result = execute_query('select sitename, id from {} order by sitename;'.format(TABLES[0]))\n sites = [(x['sitename'], x['id']) for x in result]\n for sitename, site_id in sites:\n sitedict = {'name': sitename}\n querystring = 'select settname, settval from {} order by settname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[1]), (site_id,))\n sitedict['settings'] = {x: y for x, y in cur.fetchall()}\n querystring = 'select dirname, id from {} order by dirname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[2]), (site_id,))\n sitedirs = [(x['dirname'], x['id']) for x in cur.fetchall()]\n sitedict['docs'] = []\n # if we keep the site_id in the docstats table we could restrict this to one db-query\n # and filter the result set inside the loop\n # although this should also be possible with a subselect or something like that\n for dirname, dir_id in sitedirs:\n dirlist = []\n querystring = 'select * from {} order by docname where dir_id = %s;'\n result = execute_query(querystring.format(TABLES[3]), (dir_id,))\n for resultdict in cur:\n resultdict['dirname'] = dirname\n dirlist.append(resultdict)\n sitedict['docs'].append(dirlist)\n f_result.append(sitedict)\n return f_result", "def read():\n # checks if existing alarms exist and places them in a list for faster data manipulation\n event_log(\"reading event database....\",\"\")\n data_file = open(read_json(\"Event_database\"), \"r+\")\n temp_list = []\n if os.stat(read_json(\"Event_database\")).st_size > 0:\n for z in data_file:#reads each line of file\n temp = \"\"\n for element in z:\n if element == \",\":#looks for comma as its used for seperating data in file\n temp_list.append(temp)\n temp = \"\"\n else:\n temp = temp + element\n Events_list.append(temp_list.copy())\n if math.floor(time.time()) - (convert_to_epoch(temp_list[1])) < 0:#determines if event is not expired\n events.enter(-(math.floor(time.time()) - (convert_to_epoch(temp_list[1]))), 1, expired_alarm)\n else: # already expired\n expired_alarm()\n temp_list.clear()\n data_file.close()", "def load_symbol_list():\n\n if not os.path.isfile(\"stonks.json\"):\n print(\"generating stock list...\")\n generate_stock_list()\n\n print(\"loading stocks...\")\n with open(\"stonks.json\", \"r\") as read_file:\n data = json.load(read_file)\n\n list_date = int(\n dt.datetime.strptime(data[\"download_time\"], \"%Y-%m-%d %H:%M:%S %Z\").timestamp()\n )\n\n if TODAY - 60 * 60 * 24 * 30 > list_date:\n print(\"updating stock list...\")\n generate_stock_list()\n\n def is_stock_symbol(symbol):\n if symbol in data[\"stonks\"]:\n return True\n return False\n\n return is_stock_symbol", "def loadDB(self,dbfilename):\n \n db=[]\n with open(dbfilename,'r',encoding='ISO-8859-1') as dbfilename:\n dbreader= csv.reader(dbfilename,delimiter=self.sDelimiter )\n for lFields in dbreader:\n db.append(lFields)\n\n return db", "def get_data_from_file(file_name):\n stocks = []\n with open(file_name) as fh:\n keys = line2words(fh.readline()) # assigns the first line of the text document as the keys\n for line in fh: # reads the subsequent lines and assigns them as the as the values\n stocks.append(dict(zip(keys, line2words(line))))\n return stocks", "def download_all_stocks():\n stocks = get_stocklist()\n dfs = {}\n for i, r in stocks.iterrows():\n start = time.time()\n s = r['Ticker']\n stockfile = '../stockdata/' + s + '.csv.gz'\n print('downloading', s)\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n print('took', time.time() - start, 's')\n\n return dfs", "def reqData(self):\r\n #self.reqGlobalCancel()\r\n #self.add_historical(\"Stock('TSLA', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('IBM', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('MSFT', 'SMART', 'USD')\")\r\n self.add_historical(\"Stock('FB', 'SMART', 'USD')\")", "def read_db(self):\n with open(self.filename, 'r') as database:\n data = json.load(database)\n self.data = data", "def getHistoricalData(stockName, startDate):\n conn = r.connect(db = db.DB)\n stockName = stockName.upper()\n startDate = dateToString(startDate)\n endDate = dateToString(datetime.datetime.now())\n\n if not stockName in db.STOCK_MAP.keys():\n return dict(\n error = 1,\n message = \"The info you want is not what I can give\"\n )\n\n stock = yf.StockInfo(stockName + db.IN_LONDON)\n cachedData = r.table(db.HISTORICAL_TABLE).get(stockName).run(conn)\n infoDict = dict()\n\n if cachedData == None:\n print \"\\n-- DB -- \" + stockName + \" == Inserting New Information ==\\n\"\n histList = stock.historical_prices(startDate, endDate)\n infoDict[\"history_list\"] = createHistoryDictList(histList)\n infoDict[\"index\"] = stockName\n infoDict[\"name\"] = db.STOCK_MAP[stockName]\n infoDict[\"timestamp\"] = getTime()\n r.table(db.HISTORICAL_TABLE).insert(infoDict).run(conn)\n else:\n elapsedTime = (\n getTime() -\n cachedData[\"timestamp\"]\n )\n if elapsedTime > db.HISTORICAL_INTERVAL:\n print \"\\n-- DB -- \" + stockName + \" == Updating Database ==\\n\"\n histList = stock.historical_prices(startDate, endDate)\n infoDict[\"history_list\"] = createHistoryDictList(histList)\n infoDict[\"index\"] = stockName\n infoDict[\"timestamp\"] = getTime()\n r.table(db.HISTORICAL_TABLE).get(stockName).update(\n infoDict\n ).run(conn)\n else:\n print \"\\n-- DB -- \" + stockName + \" == Using Cached Data ==\\n\"\n infoDict = cachedData\n\n infoDict[\"name\"] = db.STOCK_MAP[stockName]\n return infoDict", "def openDBFile(self, dbfile, updateOnIdle=True):\n\t\t# Create an index to speed up queries\n\t\tself._createDBIndex(dbfile)\n\t\t\n\t\tself.qWorker=sqlworker.sqlQueryWorker(dbfile)\n\t\n\t\tself.dbFile=dbfile\n\n\t\t# Open a handle to the database\n\t\tself.dbHnd=sqlite.sqlite3MDIO()\n\t\tself.dbHnd.openDB(self.dbFile)\n\n\t\t# set the length of the trajectory in sec.\n\t\tself.trajLength=self.dbHnd.readAnalysisInfo()['dataLengthSec']\n\n\t\t# Connect signals and slots\n\t\tself.qWorker.resultsReady2.connect(self.OnDataReady)\n\n\t\tself.qWorker.moveToThread(self.qThread)\n\t\n\t\tself.qWorker.finished.connect(self.qThread.quit)\n\n\t\tself.qThread.start()\n\n\t\t# reset elapsed time\n\t\tself.analysisTime=0.0\n\n\t\t# reset wall time and analysis start time\n\t\tself.wallTime=0.0\n\t\tself.startTime=time.time()\n\n\t\t# self update on idle flag\n\t\tself.updateDataOnIdle=updateOnIdle\n\n\t\t# Query the DB\n\t\tself._updatequery()\n\n\t\t# Idle processing\n\t\tQtCore.QObject.connect(self.idleTimer, QtCore.SIGNAL('timeout()'), self.OnAppIdle)", "def retrieve_proposed_orders_lists(po_file_name):\n print \"*******************************\"\n print \"Parsing proposed orders list from file \\\"\" + po_file_name + \"\\\"...\"\n\n print \"Retrieving PO and Warehouse orders and items lists\"\n book = open_workbook(po_file_name)\n sheet = book.sheet_by_index(0)\n\n # section denotes the section of the spreasheet currently being parsed\n # it begins with START, then ends with the proposed orders section\n section = \"START\"\n\n # Generic spreadsheet details\n project_number = \"\"\n project_type = \"\"\n store_number = \"\"\n store_name = \"\"\n\n proposed_order_count = 0\n\n # Column name static variables\n DESIGN_ID_COL = 0\n MAPPING_STATUS_COL = 1\n REVIT_DESCRIPTION_COL = 2\n CATEGORY_COL = 3\n QUANTITY_COL = 4\n COVERAGE_UNIT_COL = 5\n RESPONSIBILITY_COL = 6\n COMMENTS_COL = 7\n\n proposed_order_tuple_list = []\n \n # Pull general details for the proposed orders\n project_number = sheet.row_slice(0,0)[1].value\n project_type = sheet.row_slice(3,0)[1].value\n store_number = sheet.row_slice(2,0)[1].value\n store_name = sheet.row_slice(1,0)[1].value\n\n for row_index in range(sheet.nrows):\n state = state_change(sheet.row_slice(row_index,0)[0].value)\n if \"\" != state:\n section = state\n\n elif (not sheet.row_slice(row_index,0)[0].value is empty_cell.value):\n if \"PROPOSED_ORDERS\" == section:\n # if proposed order list is empty\n if proposed_order_tuple_list:\n proposed_order_count += 1\n proposed_order_tuple_list.append((sheet.row_slice(row_index,0)[DESIGN_ID_COL].value,sheet.row_slice(row_index,0)[MAPPING_STATUS_COL].value,sheet.row_slice(row_index,0)[REVIT_DESCRIPTION_COL].value,sheet.row_slice(row_index,0)[CATEGORY_COL].value,sheet.row_slice(row_index,0)[QUANTITY_COL].value,sheet.row_slice(row_index,0)[COVERAGE_UNIT_COL].value,sheet.row_slice(row_index,0)[RESPONSIBILITY_COL].value,sheet.row_slice(row_index,0)[COMMENTS_COL].value,[]))\n\n # Uncomment the following two lines to print out the stored PO and warehouse data\n # proposed_order_print(proposed_order_tuple_list)\n\n print \"Completed retrieval of proposed orders for Project type [\" + project_type + \"], [# \" + str(project_number) + \"], Store #\" + str(store_number)\n print \"Proposed order count: \" + str(proposed_order_count)\n print \"*******************************\"\n print\n return proposed_order_tuple_list", "def get_sp500_stocks_file(file_path=None):\n\n df = pd.read_csv(file_path)\n\n return df", "def open_reader(self, **kw):\n return self.table.open_reader(str(self), **kw)", "def test_find_stock_items(self):\n pass", "def test_readstockcsv(self):\n stocks = read_stock_csv('AAPL.csv')\n self.assertEqual(53, len(stocks))\n self.assertEqual('2016-08-01', stocks[0][\"Date\"])\n self.assertEqual('104.410004', stocks[0][\"Open\"])\n self.assertEqual('107.650002', stocks[0][\"High\"])\n self.assertEqual('104.000000', stocks[0][\"Low\"])\n self.assertEqual('107.480003', stocks[0][\"Close\"])\n self.assertEqual('105.460434', stocks[0][\"Adj Close\"])\n self.assertEqual('170149200', stocks[0][\"Volume\"])", "def getStockList(storeExcel=False, path=None):\n import time\n\n start = time.time()\n stockList = Custom().ScreenerView(columns=[0,1,2,3,4,5,6,7,8,25,30,65,66,67])\n end = time.time()\n\n print('Took {0} Min and {1} Seconds to Query'.format((end - start)//60, (end-start)%60))\n\n if storeExcel:\n stockList.to_excel(path)\n\n return stockList", "def open(self):\r\n if not self.filename:\r\n raise ValueError(\"Can only open on-disk databases\")\r\n self.db = anydbm.open(self.filename, \"w\") #raises anydbm.error\r\n try:\r\n if self.db[\"--Reserved--type\"] != self.type:\r\n raise ValueError(\"Not a %s database\" % self.type)\r\n except KeyError:\r\n raise ValueError(\"Not a recognized database\")", "def open(self):\n if not self.filename:\n raise ValueError(\"Can only open on-disk databases\")\n self.db = dbm.open(self.filename, \"w\") #raises anydbm.error\n try:\n if self.db[\"--Reserved--type\"] != self.type:\n raise ValueError(\"Not a %s database\" % self.type)\n except KeyError:\n raise ValueError(\"Not a recognized database\")", "def test_get_parsed_files(self):\n files = Historical_ROAs_Parser()._get_parsed_files()\n with Historical_ROAs_Parsed_Table() as t:\n for f in files:\n sql = f\"SELECT * FROM {t.name} WHERE file = '{f}'\"\n assert len(t.execute(sql)) == 1", "def loaddata(self):\n # Connect to the db\n self.conn, self.c = self.connect_db(self.dbname)\n # create the bdefile table to \n self.c.execute(oeeutil.sql_create_bdefile_table)\n # Delete any previous records\n self.c.execute('DELETE FROM bdefile')\n # hold the content for analysis\n for item in self.content:\n self.c.execute('INSERT INTO bdefile VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)', item)\n self.c.executescript(oeeutil.sql_create_bdefile_view)\n self.conn.commit()", "def open(self):", "def get_all_files_to_instrument():\n sql=\"SELECT * FROM files\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n results=c.fetchall()\n conn.close()\n return results", "def getStock(stockName, infoType):\n stockName = stockName.upper()\n\n conn = r.connect(\n db = db.DB\n )\n\n if not stockName in db.STOCK_MAP.keys():\n return dict(\n error = 1,\n message = \"The info you want is not what I can give\"\n )\n\n stock = yf.StockInfo(stockName + db.IN_LONDON)\n cachedData = r.table(db.CACHE_TABLE).get(stockName).run(conn)\n infoDict = dict()\n\n if cachedData == None:\n print \"\\n-- DB -- \" + stockName + \" == Inserting New Information ==\\n\"\n infoDict = stock.all()\n infoDict[\"index\"] = stockName\n infoDict[\"timestamp\"] = getTime()\n infoDict[\"name\"] = db.STOCK_MAP[stockName]\n r.table(db.CACHE_TABLE).insert(infoDict).run(conn)\n else:\n elapsedTime = (\n getTime() -\n cachedData[\"timestamp\"]\n )\n if elapsedTime > db.UPDATE_INTERVAL:\n print \"\\n-- DB -- \" + stockName + \" == Updating Database ==\\n\"\n infoDict = stock.all()\n infoDict[\"index\"] = stockName\n infoDict[\"timestamp\"] = getTime()\n try:\n r.table(db.CACHE_TABLE).get(stockName).update(\n infoDict\n ).run(conn)\n except:\n pass\n else:\n print \"\\n-- DB -- \" + stockName + \" == Using Cached Data ==\\n\"\n infoDict = cachedData\n\n if infoType == \"all\":\n return infoDict\n else:\n return {infoType: infoDict[infoType]}", "def get_52_week_high_low_for_stocks(stocks):\n print(\"Fetching stock quotes.\")\n # Build a full list of symbols\n symbols = []\n for key in stocks.keys():\n symbols.append(key)\n\n num_of_batches = int(len(symbols)/BATCH_SIZE) + 1\n\n all_stocks_df = pandas.DataFrame()\n\n #all_stocks_df = pandas.DataFrame()\n\n # Get quotes for all the stocks in batches\n for i in range(0, num_of_batches):\n print(\"Fetching quotes in batch: \" + str(i+1) + \"/\" + str(num_of_batches))\n start = i*BATCH_SIZE\n end = start + BATCH_SIZE\n batch_symbols = symbols[start: end]\n batch_symbols_query = '+'.join(batch_symbols)\n request_url = YAHOO_FINANCE_API + \"?\" + YAHOO_FINANCE_SYMBOL_PARAM + \"=\" + batch_symbols_query +\\\n \"&\" + YAHOO_FINANCE_FORMAT_PARAM + \"=\" + YAHOO_FINANCE_SYMBOL_PARAM + YAHOO_FINANCE_52_ASK_PRICE +\\\n YAHOO_FINANCE_BID_PRICE + YAHOO_FINANCE_52_CLOSE_PRICE + YAHOO_FINANCE_52_WEEK_LOW +\\\n YAHOO_FINANCE_52_WEEK_HIGH + YAHOO_FINANCE_52_LOW_CHANGE +\\\n YAHOO_FINANCE_52_HIGH_CHANGE + YAHOO_FINANCE_DIV_YIELD\n r = requests.get(request_url)\n\n # Read the returned CSV as a pandas table\n # Returned format is NAME,ASK,BID,52-wLow,52-wHigh\n df = pandas.read_table(StringIO(r.text), header=None, sep=',')\n all_stocks_df = all_stocks_df.append(df, ignore_index=True)\n\n # Delay to slow down things\n time.sleep(1)\n\n\n # Assign columns\n print(\"Stock quotes have been fetched. Beginning analysis...\")\n all_stocks_df.columns=['symbol', 'ask', 'bid', 'close', '52w-low', '52w-high', '52w-low-change', '52w-high-change', 'div-iteryield']\n\n # Add the percent change columns\n all_stocks_df['52w-%-low-change'] = all_stocks_df['52w-low-change']/all_stocks_df['52w-low']*100\n all_stocks_df['52w-%-high-change'] = all_stocks_df['52w-high-change'] / all_stocks_df['52w-high'] * 100\n\n # Add the names and sectors\n all_stocks_df['name'] = \"\"\n all_stocks_df['sector'] = \"\"\n for index, row in all_stocks_df.iterrows():\n all_stocks_df.loc[index, 'name'] = stocks[row['symbol']][0]\n all_stocks_df.loc[index, 'sector'] = stocks[row['symbol']][1]\n\n\n # Process the received quotes\n sorted_values = all_stocks_df.sort_values('52w-%-low-change')\n\n # Done\n print(\"Analysis completed.\")\n return sorted_values", "def prices(symbol):\n to = date.today().strftime(\"%Y%m%d\")\n c = db.cursor()\n c.execute(\"SELECT DATE_ADD(max(date), INTERVAL 1 DAY) FROM quote where symbol = %s\",\n (symbol))\n (_from, ) = c.fetchone()\n if _from == date.today():\n print \"Skipping %s\" % symbol\n return\n print \"Downloading %s\" % symbol\n if _from is None: \n _from = start_date\n else:\n _from = _from.strftime(\"%Y%m%d\")\n prices = stockquote.get_historical_prices(symbol, _from, to)\n headers = prices[0]\n try:\n close = get_idx(headers, 'Close')\n date_ = get_idx(headers, 'Date')\n open = get_idx(headers, 'Open')\n high = get_idx(headers, 'High')\n low = get_idx(headers, 'Low')\n quotes = prices[1:]\n for l in quotes:\n #print \"%s %s\" % (l[date_], l[close])\n try:\n insert(symbol, l[date_], l[close], l[high], l[low], l[open])\n except Exception, e:\n print \"Could not insert %s:%s\" % (symbol, e)\n print \"Inserted %s new quotes for %s\" % (len(quotes), symbol)\n except Exception, e:\n print \"Could not download %s\" % symbol\n print e", "def doopen(lines, password):\n \n if lines:\n fh = FileHandles()\n try:\n SpssClient.StartClient()\n uialerts = SpssClient.GetUIAlerts()\n SpssClient.SetUIAlerts(False)\n for line in lines:\n line = fh.resolve(line.lstrip()) # file handles are supported for all file types\n ext = os.path.splitext(line)[-1].lower()\n if ext == \".sav\":\n cmd = \"\"\"GET FILE=\"%s\" \"\"\" % line\n if password is not None:\n cmd = cmd + \"\"\"PASSWORD=\"%s\". \"\"\" % password\n spss.Submit(cmd)\n # assign a random dataset name\n spss.Submit(\"\"\"DATASET NAME %s.\"\"\" % _(\"\"\"Dataset\"\"\") + str(random.randint(1000, 100000)))\n print(_(\"\"\"Opened file %s\"\"\") % line)\n elif ext == \".sps\":\n try:\n if password is None:\n SpssClient.OpenSyntaxDoc(line)\n else:\n SpssClient.OpenSyntaxDoc(line, password)\n print(_(\"\"\"Opened file %s\"\"\") % line)\n except:\n print(_(\"\"\"File: %s already open and has changed or could not be opened. Not opened\"\"\") % line)\n elif ext == \".spv\":\n try:\n if password is None:\n SpssClient.OpenOutputDoc(line)\n else:\n SpssClient.OpenOutputDoc(line, password)\n print(_(\"\"\"Opened file %s\"\"\") % line)\n except:\n print(_(\"\"\"File: %s already open and has changed or could not be opened. Not opened\"\"\") % line) \n else:\n raise ValueError(_(\"\"\"File to open has unknown extension: %s\"\"\") % line)\n except:\n print(_(\"\"\"File open failure: %s\"\"\") % line)\n finally:\n SpssClient.SetUIAlerts(uialerts)\n SpssClient.StopClient()", "def _open_database(self, sessionUID: str):\n assert self._conn is None\n filename = \"F1_2019_{:s}.sqlite3\".format(sessionUID)\n logging.info(\"Opening file {!r}.\".format(filename))\n conn = sqlite3.connect(filename)\n cursor = conn.cursor()\n\n # Get rid of indentation and superfluous newlines in the 'CREATE TABLE' command.\n query = \"\".join(line[8:] + \"\\n\" for line in PacketRecorder._create_packets_table_query.split(\"\\n\")[1:-1])\n\n # Try to execute the 'CREATE TABLE' statement. If it already exists, this will raise an exception.\n try:\n cursor.execute(query)\n except sqlite3.OperationalError:\n logging.info(\" (Appending to existing file.)\")\n else:\n logging.info(\" (Created new file.)\")\n\n self._conn = conn\n self._cursor = cursor\n self._filename = filename\n self._sessionUID = sessionUID", "async def stocks(self, ctx):\n\t\tpass", "def read_db_list(tablename = None):\n\n # Set the default tablename\n if tablename is None:\n tablename = config[\"default-table\"]\n\n conn, tunnel = create_db_conn()\n result = None\n\n try:\n cur = conn.cursor()\n cur.execute(\"USE %s\"%(config['db']))\n cur.execute(\"SELECT * FROM %s;\"%(tablename,))\n conn.commit()\n result = cur.fetchall()\n\n except Exception as e:\n print(\"read_data_list failed\")\n print(e)\n\n conn.close()\n tunnel.close()\n return result", "def creatingItemSets(self, iFileName):\n # import pandas as pd\n # global Database\n self.Database = []\n lineNumber = 0\n # data = []\n if isinstance(iFileName, list):\n self.Database = iFileName\n if isinstance(iFileName, pd.DataFrame):\n if iFileName.empty:\n print(\"its empty..\")\n quit()\n i = iFileName.columns.values.tolist()\n if 'Transactions' in i:\n self.Database = iFileName['Transactions'].tolist()\n if 'Patterns' in i:\n self.Database = iFileName['Patterns'].tolist()\n\n if '.CSV' in iFileName:\n file1 = pd.read_csv(iFileName)\n columns = list(file1.head(0))\n if \"Patterns\" in columns:\n with open(iFileName, newline='') as csvFile:\n data = csv.DictReader(csvFile)\n for row in data:\n listValue = row['Patterns']\n l1 = listValue.replace(\"[\", \"\")\n l2 = l1.replace(\"]\", \"\")\n li = list(l2.split(\",\"))\n li1 = [int(i) for i in li]\n self.Database.append(li1)\n if \"Transactions\" in columns:\n with open(iFileName, newline='') as csvFile:\n data = csv.DictReader(csvFile)\n for row in data:\n listValue = row['Transactions']\n l1 = listValue.replace(\"[\", \"\")\n l2 = l1.replace(\"]\", \"\")\n li = list(l2.split(\",\"))\n li1 = [int(i) for i in li]\n self.Database.append(li1)\n else:\n try:\n with open(iFileName, 'r', encoding='utf-8') as f:\n for line in f:\n # line.strip()\n if lineNumber == 0:\n lineNumber += 1\n delimiter = self.findDelimiter([*line])\n # li=[lineNumber]\n li = line.split(delimiter)\n li1 = [i.rstrip() for i in li]\n self.Database.append([i.rstrip() for i in li1])\n # else:\n # self.Database.append(li)\n # data.append([lineNumber,li1])\n else:\n lineNumber += 1\n li = line.split(delimiter)\n # if delimiter==',':\n li1 = [i.rstrip() for i in li]\n self.Database.append(li1)\n except IOError:\n print(\"File Not Found\")\n quit()\n\n # else:\n # self.Database=iFileName['Transactions'].tolist()", "def get_stock_price_df(info, symbols):\n\n df_l = []\n\n for num, i in enumerate(info):\n df = pd.DataFrame.from_dict(i, orient='index')\n df['Symbol'] = symbols[num]\n df_l.append(df)\n\n df_full = pd.concat(df_l)\n df_full = df_full.rename(columns={'1. open': 'Open',\n '2. high': 'High',\n '3. low': 'Low',\n '4. close': 'Close',\n '5. volume': 'Volume'})\n\n return df_full", "def read(name, db):\n \n # Make connection with the database\n\tconn = sqlite3.connect(db)\n\tdf = pd.read_sql_query(\"select * from \" + name + ';', conn)\n \n # Print loaded data table name and return DataFrame\n\tprint(name + ': loaded')\n\treturn df", "def open(self) -> None:", "def open(self) -> None:", "def open(self) -> None:", "def test_open_database(self):\n self.assertTrue(self.ss.open_database(self.test_database))\n self.assertEqual(self.ss.data_file(), self.test_database)\n self.assertTrue(os.path.exists(self.test_database))\n # database should be created if it doesn't exist\n self.assertTrue(self.ss.open_database(self.alt_database))\n self.assertEqual(self.ss.data_file(), self.alt_database)\n self.assertTrue(os.path.exists(self.alt_database))", "def load_data_sql(): \r\n conn = mysql.connect(**st.secrets[\"mysql\"])\r\n\r\n data = pd.read_sql('SELECT * FROM song_data', conn)\r\n lookup_table = pd.read_sql('SELECT * FROM lookup_table', conn)\r\n \r\n return data, lookup_table", "def insert_into_sql(chunk):\n bulk_list = []\n for row in chunk:\n bulk_list.append(StockData(\n date=str(row[0])[0:4] + '-' + str(row[0])[4:6] + '-' + str(row[0])[6:8],\n code=row[1],\n code_name=row[2],\n d1_diff_rate=row[3],\n close=row[4],\n open=row[5],\n high=row[6],\n low=row[7],\n volume=row[8],\n clo5=row[9],\n clo10=row[10],\n clo20=row[11],\n clo40=row[12],\n clo60=row[13],\n clo80=row[14],\n clo100=row[15],\n clo120=row[16],\n clo5_diff_rate=row[17],\n clo10_diff_rate=row[18],\n clo20_diff_rate=row[19],\n clo40_diff_rate=row[20],\n clo60_diff_rate=row[21],\n clo80_diff_rate=row[22],\n clo100_diff_rate=row[23],\n clo120_diff_rate=row[24],\n yes_clo_5=row[25],\n yes_clo_10=row[26],\n yes_clo_20=row[27],\n yes_clo_40=row[28],\n yes_clo_60=row[29],\n yes_clo_80=row[30],\n yes_clo_100=row[31],\n yes_clo_120=row[32],\n vol5=row[33],\n vol10=row[34],\n vol20=row[35],\n vol40=row[36],\n vol60=row[37],\n vol80=row[38],\n vol100=row[39],\n vol120=row[40],\n ))\n StockData.objects.bulk_create(bulk_list)\n return bulk_list", "def get_stock_data(name):\n from ._ids import _stock_parts\n\n if name in _stock_parts:\n return read_drive_data(_stock_parts[name])\n else:\n raise IndexError(f\"Uknown company name. {name} was given.\")", "def list(file, action = 'Read'):\n\t\taction = action.lower()\n\t\tquote = dict()\n\t\twith open(file, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tsymbol = line[:-1]\n\t\t\t\tquote[symbol] = Historical(symbol)\n\n\t\tif action in ['price']:\n\t\t\tprice = dict()\n\t\t\tfor symbol, q in quote.iteritems():\n\t\t\t\tq.read()\n\t\t\t\tprice[symbol] = q.DateSerie().TimeSerie()\n\t\t\treturn price\n\t\tif action in ['log-price']:\n\t\t\tfrom math import log\n\t\t\tprice = dict()\n\t\t\tfor symbol, q in quote.iteritems():\n\t\t\t\tq.read()\n\t\t\t\tprice[symbol] = q.DateSerie().map(log).TimeSerie()\n\t\t\treturn price\n\t\telif action in ['read', 'load']:\n\t\t\tfor symbol in sorted(quote.iterkeys()):\n\t\t\t\tquote[symbol].read()\n\t\t\t\tprint symbol\n\t\t\treturn quote\n\t\telif action in ['download']:\n\t\t\tfor symbol in sorted(quote.iterkeys()):\n\t\t\t\tquote[symbol].__download()\n\t\t\treturn quote\n\t\telif action in ['update']:\n\t\t\tfor symbol in sorted(quote.iterkeys()):\n\t\t\t\tquote[symbol].update()\n\t\telif action in ['clean']:\n\t\t\tfor symbol in sorted(quote.iterkeys()):\n\t\t\t\tquote[symbol].clean()", "def db_open():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = db_connect()\n return g.sqlite_db", "def getInputData():\n\n # Get current allocations.\n current_alloc_dict = DataIO.getCurrentData('data/current_allocations.csv')\n\n # Get tickers and expense ratios.\n ticker_list, expense_ratio_dict = DataIO.getTickerList(\n 'data/tickers_expenses.csv')\n\n # Get raw data.\n raw_data = DataIO.getRawData(ticker_list)\n\n # Create all stock objects.\n stock_dict = {}\n for ticker in raw_data.keys():\n stock_dict[ticker] = Stock(\n raw_data[ticker], ticker, expense_ratio_dict[ticker])\n\n if not len(stock_dict.keys()):\n raise ValueError('No keys found.')\n\n # Create stock database.\n stock_db = StockDatabase(stock_dict)\n\n # Create current portfolio.\n current_portfolio = Portfolio(\n stock_db, percent_allocations_dict=current_alloc_dict)\n\n return current_portfolio, stock_db", "def opendatasets(self):\n\n print \"Open datasets\"\n self.combo_dataset_list.clear()\n self.combo_variable_list.clear()\n self.combo_wms_time_first_d.clear()\n self.combo_wms_time_first_h.clear()\n self.combo_wms_time_last_d.clear()\n self.combo_wms_time_last_h.clear()\n self.combo_wms_time_first_d_2.clear()\n self.combo_wms_time_first_h_2.clear()\n self.combo_wms_time_last_d_2.clear()\n self.combo_wms_time_last_h_2.clear()\n self.combo_wms_layer_depth.clear()\n self.combo_wms_layer_depth_2.clear()\n self.combo_wms_layer_depth_max_2.clear()\n self.combo_colorbar.clear()\n self.combo_proj.clear()\n product=str(self.combo_product_list.currentText())\n for key in self.dict_prod[product].keys():\n print \"Variable\"\n self.combo_dataset_list.addItem(str(key))\n self.combo_variable_list.setEnabled(True)", "def internetData(tickerSymbol):\n #this qeureys the SQL database for all stock data\n temp = Stock.query.filter_by(ticker=tickerSymbol).all()\n #init the lists to store data for output\n prices = []#closing price\n volumes = []\n \n for i in temp:\n prices.append(i.close)\n volumes.append(i.volume)\n print tickerSymbol\n print \"prices length is \" + str(len(prices))\n print \"volumes length is \" + str(len(volumes))\n outputDict = {'Prices':prices,'Volumes':volumes}\n return outputDict", "def get_raw_data():\n data_files = []\n for i, f in enumerate(os.listdir(config.RAW_DATA_DIR)):\n data_files.append(f)\n print i, \": \", f\n while True:\n try:\n index = int(raw_input(\"Type the index of the data file you'd like to import: \"))\n fn_raw_data = data_files[int(index)]\n break\n except ValueError:\n print(\"Not a valid index. Try again.\")\n except IndexError:\n print(\"Not a valid index. Try again.\")\n print \"Importing %s...\" % fn_raw_data\n with open(config.RAW_DATA_DIR + fn_raw_data) as infile:\n next(infile)\n raw_data = list(csv.DictReader(infile))\n return (fn_raw_data, raw_data)" ]
[ "0.8676556", "0.6498297", "0.6392442", "0.62527007", "0.6116841", "0.6115268", "0.60903823", "0.6079111", "0.605728", "0.60320973", "0.59506726", "0.58965456", "0.58925784", "0.58342135", "0.581336", "0.57088405", "0.5691312", "0.56265354", "0.56252694", "0.5618806", "0.5607603", "0.55794805", "0.55766153", "0.5537384", "0.5532492", "0.55275625", "0.5523256", "0.5521839", "0.5512011", "0.5507301", "0.54820234", "0.5475616", "0.54677635", "0.545728", "0.5446161", "0.5438879", "0.542651", "0.5422815", "0.5417692", "0.5410987", "0.54007375", "0.5383533", "0.53766584", "0.5369752", "0.53556657", "0.5350688", "0.53476", "0.5338088", "0.53340584", "0.5314884", "0.5314094", "0.53088427", "0.52944803", "0.5291709", "0.5265435", "0.5228093", "0.5225766", "0.52253234", "0.52202886", "0.52043027", "0.52009", "0.51966506", "0.517909", "0.5173227", "0.51715297", "0.51623744", "0.51554066", "0.51518303", "0.51516044", "0.5141414", "0.51397187", "0.5123353", "0.51224875", "0.5119482", "0.5119235", "0.51150185", "0.5111146", "0.5109698", "0.5107926", "0.510288", "0.50860614", "0.5083541", "0.5079242", "0.5072", "0.5071619", "0.5069505", "0.5066477", "0.50609845", "0.50609845", "0.50609845", "0.5059105", "0.5046053", "0.50397456", "0.503824", "0.50340617", "0.50338566", "0.5028127", "0.5023764", "0.5020949", "0.5019732" ]
0.6232286
4
Determines whether the discrepancy has been sufficiently resolved; used as return value for fix_discrepancy.
def discrepancy_resolved(self): # If there's a discrepancy and distance change matches the existing data, we're good. if self.distance_change == self.existing_data: return True # If recommend_updates, i.e., if self.distance_change == self.new_data, we'll update the data and we're good elif self.recommend_updates: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_solved(self):\n if not self._find_empty():\n return True\n else:\n return False", "def is_solved(self):\n\n marker = self._marker\n amount_of_pegs = 0\n for row in marker:\n for i in row:\n if i == \"*\":\n amount_of_pegs += 1\n return amount_of_pegs == 1", "def is_solved(self):\n marker = self._marker\n\n count = 0\n for row in marker:\n for piece in row:\n if piece == \"*\":\n count += 1\n if count == 1:\n return True\n else:\n return False", "def is_solved(self):\n peg_count = 0\n for row in self._marker:\n for item in row:\n if item == '*':\n peg_count += 1\n return peg_count == 1", "def check_initial_confidence(self): # pragma: no cover\n if self.test_type != 'perf':\n return True\n\n if self.required_initial_confidence is None:\n return True # pragma: no cover\n\n # TODO(robertocn): Remove all uses of \"confidence\".\n if self.dummy_initial_confidence is not None:\n self.initial_confidence = float(\n self.dummy_initial_confidence)\n if (float(self.initial_confidence) <\n float(self.required_initial_confidence)):\n self._set_insufficient_confidence_warning()\n return False\n return True\n\n if self.dummy_builds:\n dummy_result = self.good_rev.values != self.bad_rev.values\n if not dummy_result:\n self._set_insufficient_confidence_warning()\n return dummy_result\n\n with self.api.m.step.nest('Re-testing reference range'):\n expiration_time = time.time() + REGRESSION_CHECK_TIMEOUT\n while time.time() < expiration_time:\n if len(self.good_rev.values) >= 5 and len(self.bad_rev.values) >= 5:\n if self.significantly_different(self.good_rev.values,\n self.bad_rev.values):\n return True\n if len(self.good_rev.values) == len(self.bad_rev.values):\n revision_to_retest = self.last_tested_revision\n else:\n revision_to_retest = min(self.good_rev, self.bad_rev,\n key=lambda x: len(x.values))\n if len(revision_to_retest.values) < MAX_REQUIRED_SAMPLES:\n revision_to_retest.retest()\n else:\n break\n self._set_insufficient_confidence_warning()\n return False", "def is_solved(self):\n i = 0\n for row in self._marker:\n for x in row:\n if x == \"*\":\n i += 1\n if i > 1:\n return False\n return True", "def has_conflict(self):\n for diffstat in self.diffstat():\n if diffstat.has_conflict:\n return True\n return False", "def is_equivalence(self) -> bool:", "def discrepancy(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result -= value * math.log(self.betP(focal), 2)\n return round(result, 6)", "def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True", "def is_solved(self):\n return self._start == self._target", "def is_inequality(self):\n return True", "def is_inequality(self):\n return True", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def _has_needs_correcting(self, dframe):\n return (dframe.loc[dframe.sync_status == int(ConsentSyncStatus.NEEDS_CORRECTING)].shape[0] > 0)", "def is_solved(self):\n self.solved = self.current_pos == self.finish_pos\n return self.solved", "def is_inequality(self):\n return False", "def is_deficient(n):\r\n if sum_proper_divisors(n) < n:\r\n return True\r\n else:\r\n return False", "def is_concealed(self) -> bool:\n # return not self._exposed\n return sum(self.concealed_part.values()) == 13", "def is_inequality(self): \n return False", "def did_solve(self):\n return self._solution[\"status\"] == \"optimal\"", "def is_deficient_number(x):\n return sum(proper_divisors(x)) < x", "def did_solve(self) -> bool:\n return self._stats[\"success\"]", "def is_solved(self):\n return (self.from_grid == self.to_grid)", "def is_solved(self):\n return self._from_word == self._to_word", "def is_correctness_available_for_response(self, response):\n return True", "def isInconsistent(self, problemname : str) -> bool:\n return problemname in self.inconsistentset", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def has_compatible_ligands(self, identity):\n return ((len(self.bad_coords[identity]) == 0) and\n (not self.BAD_COORD_RESIDUE in self.inaccuracies[identity]))", "def is_solved(self):\n return self.to_grid == self.from_grid", "def is_solved(self):\n raise NotImplementedError()", "def pops_agree(x):\n return len(x.all_open_closed) == 1", "def has_convergence_delta(self) -> bool:\n return False", "def compute_confidence_interval(self) -> bool:\n return False", "def consensus_reached(self):\n pos, com, success = self.perception\n if len(com) > 0 and self.time > 1:\n return all(map(lambda x: x[1][\"consensus\"], com)) and self.consensus\n else:\n return True", "def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer", "def satisfied(self):\n\n if self.var1.get_value() is None or self.var2.get_value() is None:\n return False\n\n return self.var1.get_value() != self.var2.get_value()", "def is_cyclically_reduced(self):\n if not self:\n return True\n return self[0] != self[-1]**-1", "def is_solved(self) -> bool:\n return set(self.boxes) == set(self.storage_locations)", "def check_reco_dist_consistency(self, dist_list):\n logging.trace(\" Verifying correct normalisation of resolution function.\")\n # Obtain list of all distributions. The sum of their relative weights\n # should yield 1.\n frac_sum = np.zeros_like(dist_list[0]['fraction'])\n for dist_dict in dist_list:\n frac_sum += dist_dict['fraction']\n if not recursiveEquality(frac_sum, np.ones_like(frac_sum)):\n err_msg = (\"Total normalisation of resolution function is off\"\n \" (fractions do not add up to 1).\")\n raise ValueError(err_msg)\n return True", "def did_solve(self) -> bool:\n return self._solution.info.status == \"solved\"", "def _calculate_whether_correct(self) -> Optional[bool]:\n assert self.match\n\n if not self.match.has_been_played:\n return None\n\n # In footy tipping competitions its typical to grant everyone a correct tip\n # in the case of a draw\n return self.match.is_draw or bool(\n self.predicted_winner\n and self.match.winner\n and self.predicted_winner.id == self.match.winner.id\n )", "def fix_has_no_advisory(self):\n fixed_in = self.fixed_artifact()\n return fixed_in and fixed_in.vendor_no_advisory", "def should_ask_if_examiner_want_to_give_another_chance(self):\n if self.assignment.is_electronic:\n return (self.delivery_status == \"corrected\" and not self.feedback.is_passing_grade) \\\n or self.delivery_status == 'closed-without-feedback'\n else:\n return False", "def check_compositionality(cls, fraction_total_reads: Series[float]) -> bool:\n # Bracken reports fractions with five decimals but rounding errors accumulate.\n return fraction_total_reads.empty or bool(\n np.isclose(fraction_total_reads.sum(), 1.0, atol=0.02)\n )", "def missing_expected_delivery(self):\n from devilry.apps.core.models import Delivery\n from devilry.apps.core.models import Deadline\n if self.assignment.is_electronic and self.get_status() == \"waiting-for-feedback\":\n return not Delivery.objects.filter(\n deadline__assignment_group=self,\n deadline=Deadline.objects.filter(assignment_group=self).order_by('-deadline')[0]\n ).exists()\n return False", "def isSolved(self):\n return self.isComplete() and self.isLegal()", "def check_conservation(self):\n # Compute #\n df1 = self.simulated\n df2 = self.grouped_bins\n all_close = numpy.testing.assert_allclose\n # Check #\n all_close(df1[self.sum_col].sum(), df2[self.sum_col].sum())", "def feasible_ratio(self, solutions):\r\n count = np.zeros(len(solutions[0]))\r\n for x in solutions:\r\n count += x.unrepaired == x\r\n return count / float(len(solutions))", "def _calculate_discrepancy(row, df, Y):\n knn_idx = row[\"KNN_IDX\"]\n knn_y = df.iloc[knn_idx][Y]\n n = len(knn_y)\n row_y = row[Y]\n num_of_deviations = np.abs(row_y - knn_y).sum()\n discrepancy = (n / (n + 10)) * num_of_deviations\n return discrepancy", "def isDisturbance(self):\n return True", "def goal_test(self, state):\r\n assignment = dict(state)\r\n return (len(assignment) == len(self.variables)\r\n and all(self.nconflicts(variables, assignment[variables], assignment) == 0\r\n for variables in self.variables))", "def _isproperdist(X):\n X = np.asarray(X)\n if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1):\n return False\n else:\n return True", "def isReferenceConsistent(self, x : pd.Series) -> str :\n\n problemname = x.get(Key.ProblemName)\n pb = x.get(Key.PrimalBound)\n db = x.get(Key.DualBound)\n obs = self.getObjSense(problemname, x)\n sstatus = x.get(Key.SolverStatus)\n\n reference = self.referencedict.get(problemname, (None, None))\n\n logger.debug(\"Checking against reference {} for problem {}\".format(reference, problemname))\n\n referencepb = self.getPbValue(reference[self.__primalidx__], obs)\n referencedb = self.getDbValue(reference[self.__dualidx__], obs)\n\n if self.isUnkn(reference):\n return ProblemStatusCodes.Ok\n\n elif self.isInf(reference):\n if sstatus != SolverStatusCodes.Infeasible and not pd.isnull(pb) and not isInf(pb):\n return ProblemStatusCodes.FailSolOnInfeasibleInstance\n\n elif self.isFeas(reference):\n if sstatus == SolverStatusCodes.Infeasible:\n return ProblemStatusCodes.FailDualBound\n\n else:\n\n pb = self.getPbValue(pb, obs)\n db = self.getDbValue(db, obs)\n if not self.isPbReferenceConsistent(pb, referencedb, obs):\n return ProblemStatusCodes.FailObjectiveValue\n if sstatus == SolverStatusCodes.Infeasible and abs(referencepb) < infty():\n return ProblemStatusCodes.FailDualBound\n if not self.isDbReferenceConsistent(db, referencepb, obs):\n return ProblemStatusCodes.FailDualBound\n\n return ProblemStatusCodes.Ok", "def valid_reflexiveness(self, fact: Tuple[str, str, str]) -> bool:\n is_not_reflexive = fact[0] != fact[2]\n # increment the counter by the inverse boolean value (+1 if it is reflexive, +0 if is not reflexive)\n self.num_facts_violating_non_reflexiveness += not is_not_reflexive\n return is_not_reflexive", "def is_exceptional(self):\n G = self.poset().hasse_diagram()\n for x in G:\n nx = list(G.neighbors_out(x))\n nx.append(x)\n if min(nx) < x and max(nx) > x:\n return False\n return True", "def is_valid(self) -> bool:\n if self.total <= 1:\n # Definitely valid (i.e. no conflict) if 0 or 1. In practice, this\n # function probably won't be called if there are 0 fixes, but 0 is\n # valid; it simply means \"no fixes to apply\".\n return True\n if self.total == 2:\n # This is only OK for this special case. We allow this because\n # the intent is clear (i.e. no conflict): Insert something *before*\n # the segment and something else *after* the segment.\n return self.create_before == 1 and self.create_after == 1\n # Definitely bad if > 2.\n return False # pragma: no cover", "def consistent(self):\n if self.var1.get_value() is None or self.var2.get_value() is None:\n return True\n\n return self.var1.value != self.var2.value", "def isDisturbance(self):\n return False", "def arecloseenough(x1, x2):\n\n if abs(x1 - x2) <= VERYSMALL:\n return True\n \n return False", "def _check_flow_consistencity (sg_map, fr_sg):\n if isinstance(fr_sg, Flowrule):\n flowclass = NFFGToolBox._extract_flowclass(fr_sg.match.split(\";\"))\n else:\n flowclass = fr_sg.flowclass\n consistent = True\n if sg_map[fr_sg.id][2] != flowclass:\n consistent = False\n if (sg_map[fr_sg.id][3] is None or sg_map[fr_sg.id][3] == float(\"inf\")) != \\\n (fr_sg.bandwidth is None or fr_sg.bandwidth == float(\"inf\")):\n # If not both of them are None\n consistent = False\n elif (sg_map[fr_sg.id][3] is not None) and (fr_sg.bandwidth is not None):\n if consistent and math.fabs(sg_map[fr_sg.id][3] - fr_sg.bandwidth) > 1e-8:\n consistent = False\n if (sg_map[fr_sg.id][4] is None or sg_map[fr_sg.id][4] == 0.000000000) != \\\n (fr_sg.delay is None or fr_sg.delay == 0.0000000000):\n # If not both of them are None\n consistent = False\n elif (sg_map[fr_sg.id][4] is not None) and (fr_sg.delay is not None):\n if math.fabs(sg_map[fr_sg.id][4] - fr_sg.delay) > 1e-8:\n consistent = False\n if not consistent:\n raise RuntimeError(\"Not all data of a Flowrule equal to the other \"\n \"Flowrules of the sequence for the SGHop %s! Or the\"\n \" SGHop to be added differs in data from the existing\"\n \" SGHop!\" % fr_sg.id)", "def identical_to(self, n):\n \n return (self.distance_to(n) < gc.tolerance)", "def validSuccesor(self):\n lines, cols = len(FINAL_Node.state[0]), len(FINAL_Node.state[1])\n finalFrq = FINAL_Node.getFrq()\n def tooLess(node: StateNode) -> bool:\n return len(node.state[0]) < lines or len(node.state[1]) < cols\n def badFrq(node: StateNode) -> bool:\n frq = node.getFrq()\n for i in range(26):\n if frq[i] < finalFrq[i]:\n return True\n return False\n \n return not tooLess(self) and not badFrq(self)", "def is_complete(self):\n acquired_points = self.dset.shape[0]\n total_nr_pts = np.shape(self.get_sweep_points())[0]\n if acquired_points < total_nr_pts:\n return False\n elif acquired_points >= total_nr_pts:\n if self.soft_avg() != 1 and self.soft_iteration == 0:\n return False\n else:\n return True", "def is_legal_solution(self, solution):\r\n if self.sorting_order is ScoresSortingOrder.ASCENDING:\r\n return self.fit_score(solution) == 0\r\n else:\r\n return self.fit_score(solution) == sum(x for x in range(1, 12))", "def is_over(self, state: StonehengeState) -> bool:\n total_result = state.hori_result + state.left_result + state.right_result\n total_line = len(total_result)\n p1_taken = 0\n p2_taken = 0\n # all_taken = True\n for item in total_result:\n if item == '1':\n p1_taken+=1\n elif item =='2':\n p2_taken += 1\n # else:\n # all_taken = False\n # print('p1 taken:' + str(p1_taken))\n # print('p2 taken:' + str(p2_taken))\n # print('p1_taken more than half?')\n # print(float(p1_taken) >= total_line/2)\n # print('p2_taken more than half?')\n # print(float(p2_taken) >= total_line/2)\n return float(p1_taken) >= total_line/2 or float(p2_taken) >= total_line/2", "def getSusceptible(self):\n\n # use a mask and sum it to see the number of healthy people, designated as having a value equal to zero\n self.susceptible = np.sum((self.getSpace()) == 0)\n\n return self.susceptible", "def has_warning(self):\n \n if self['n_madloop_calls'] > 0:\n fraction = self['exceptional_points']/float(self['n_madloop_calls'])\n else:\n fraction = 0.0\n \n if self['skipped_subchannel'] > 0:\n return True\n elif fraction > 1.0e-4:\n return True\n else:\n return False", "def has_mismatch(self) -> bool:\n return self.mismatch_error is not None", "def is_correctly_identified(self, identity = None):\n if identity is None:\n identity = self.identity()\n\n return len(self.inaccuracies[identity]) == 0", "def violated(self) -> bool:\n ...", "def is_contradiction(formula: Formula) -> bool:\r\n # Task 2.5b\r\n return not is_satisfiable(formula)", "def check_correctness(self):\n\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = gt_file.readlines()\n\n # Check for inequality\n if len(out_lines) != len(gt_lines):\n return 0\n\n # Check for inequality\n for i in range(len(out_lines)):\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n\n return 1", "def consistent(self):\n return all((constraint.consistent() for constraint in self.constraints))", "def is_contradiction(formula: Formula) -> bool:\n # Task 2.5b\n return not is_satisfiable(formula)", "def is_resolved(self):\n return self.get_data(\"state\") == self.STATE_RESOLVED", "def is_free(self) -> bool:\n return self.places < self.total", "def is_perfect(self):\n if self._is_perfect is None:\n self._is_perfect = self.equals(self.derived_subgroup())\n return self._is_perfect", "def _is_converged(self):\n if self._last_operating_point is None:\n return False\n\n # Tolerance for comparing operating points. If all states changes\n # within this tolerance in the Euclidean norm then we've converged.\n TOLERANCE = 1e-4\n for ii in range(self._horizon):\n last_x = self._last_operating_point[0][ii]\n current_x = self._current_operating_point[0][ii]\n\n if np.linalg.norm(last_x - current_x) > TOLERANCE:\n return False\n\n return True", "def is_valid_single_attempt(self, atoms_init, atoms_final):\n from scipy.spatial import cKDTree as KDTree\n from random import shuffle\n atoms1 = atoms_init.copy()\n atoms2 = atoms_final.copy()\n\n vol1 = atoms1.get_volume()\n vol2 = atoms2.get_volume()\n if vol2 > vol1:\n ratio = (vol2/vol1)**(1.0/3.0)\n cell1 = atoms1.get_cell()\n atoms1.set_cell(cell1*ratio, scale_atoms=True)\n else:\n ratio = (vol1/vol2)**(1.0/3.0)\n cell2 = atoms2.get_cell()\n atoms2.set_cell(cell2*ratio, scale_atoms=True)\n\n # Try construct the relation\n used_indices = []\n tree = KDTree(atoms2.get_positions())\n indices = list(range(0, len(atoms1)))\n shuffle(indices)\n for atom in atoms1:\n if atom.symbol in self.exclude:\n continue\n dist, closest = tree.query(atom.position, k=12)\n srt_indx = np.argsort(dist)\n dist = [dist[indx] for indx in srt_indx]\n closest = [closest[indx] for indx in srt_indx]\n\n if all(c in used_indices for c in closest):\n # More than one atom is closest to this\n # structure\n self.rejected_reason = \"More than one atom mapped onto the \"\n self.rejected_reason += \"same atoms in the initial structure\"\n return False\n\n # First, unused with mathing symbol\n closest_indx = None\n closest_dist = None\n for i, indx in enumerate(closest):\n if atoms2[indx].symbol == atom.symbol and indx not in used_indices:\n closest_indx = indx\n closest_dist = dist[i]\n break\n\n if closest_indx is None:\n self.rejected_reason = \"No unused atoms with macthing symbol!\"\n return False\n \n used_indices.append(closest_indx)\n if closest_dist > self.max_displacement:\n # The displacement is larger than the tolereance\n self.rejected_reason = \"Max displacement too large\"\n return False\n \n if atom.symbol != atoms2[closest_indx].symbol:\n self.rejected_reason = \"Mapped symbol does not match!\"\n return False\n return True", "async def should_handle(self):\n local_controller = self.controller\n workers_total = len(local_controller.workers)\n geysers = local_controller.extractors\n drones_in_queue = local_controller.already_pending(DRONE)\n if (\n not local_controller.close_enemies_to_base\n and local_controller.can_train(DRONE)\n and not local_controller.counter_attack_vs_flying\n ):\n if workers_total == 12 and not drones_in_queue:\n return True\n if (\n workers_total in (13, 14, 15)\n and len(local_controller.overlords) + local_controller.already_pending(OVERLORD) > 1\n ):\n return True\n optimal_workers = min(\n sum(x.ideal_harvesters for x in local_controller.townhalls | geysers), 90 - len(geysers)\n )\n return (\n workers_total + drones_in_queue < optimal_workers\n and np.sum(\n np.array(\n [\n len(local_controller.zerglings),\n len(local_controller.hydras),\n len(local_controller.ultralisks),\n ]\n )\n * np.array([1, 2, 3])\n )\n > 15\n )\n return False", "def is_full(self):\n core_full = self.drone.complete() and self.subject.complete()\n if self.peds is None:\n return core_full\n else:\n return core_full and all([p.complete() for p in self.peds.values()])", "def correct(self):\n return self._solution == self._alternatives.value", "def _CheckConvergence(self):\n self.is_converged = True\n self.are_converged[0] = (abs(self.delta_e) < self.conv_delta_e)\n self.are_converged[1] = (self.grad_rms < self.conv_grad_rms)\n self.are_converged[2] = (self.grad_max < self.conv_grad_max)\n self.are_converged[3] = (self.disp_rms < self.conv_disp_rms)\n self.are_converged[4] = (self.disp_max < self.conv_disp_max)\n for i in range(5):\n if self.must_converge[i] and not self.are_converged[i]:\n self.is_converged = False", "def get_compatibility(self, submitted):\n this_disease_symtoms = list(map(lambda x: x.id, self.main_symptoms.all()))\n\n matched = list(set(submitted) & set(this_disease_symtoms))\n unmatched = [i for i in submitted if i not in this_disease_symtoms]\n # print logs\n # print(this_disease_symtoms, submitted, matched)\n\n compatibility = len(matched) / len(this_disease_symtoms)\n # print (compatibility)\n if len(unmatched) != 0:\n compatibility = compatibility / len(unmatched)\n # print(compatibility)\n\n return compatibility", "def fusable(self) -> bool:\n obs_fusable = self._can_fuse_set_of_gridded_perms(self.obstruction_fuse_counter)\n req_fusable = all(\n self._can_fuse_set_of_gridded_perms(counter)\n for counter in self.requirements_fuse_counters\n )\n ass_fusable = all(\n self._can_fuse_assumption(assumption, counter)\n for assumption, counter in zip(\n self._tiling.assumptions, self.assumptions_fuse_counters\n )\n )\n return (\n obs_fusable\n and req_fusable\n and ass_fusable\n and self._check_isolation_level()\n )", "def check_if_solved(self):\n for cell in self.board.values():\n if not cell.value:\n return False\n return True", "def is_abundant(n):\r\n if sum_proper_divisors(n) > n:\r\n return True\r\n else:\r\n return False", "def isdone(node_dict):\n # compute heading difference\n hdiff = heading_diff(r_target, node_dict['pn'].heading)\n # return if we are we close enough\n return abs(hdiff) < abs(tol)", "def has_solution(self) -> bool:\n if self in [self.SATISFIED, self.ALL_SOLUTIONS, self.OPTIMAL_SOLUTION]:\n return True\n return False", "def check(self):\n open = self.query('open')\n newly_closed = set()\n newly_gone = self.open_before - open\n if len(newly_gone) > 0:\n # Some issues are no longer open, make sure they were closed\n # instead of deleted.\n closed = self.query('closed')\n newly_closed = newly_gone & closed\n self.open_before = open\n return newly_closed", "def is_residential(self):\n\n return self._is_residential", "def is_abundant_number(x):\n return sum(proper_divisors(x)) > x", "def done(self):\r\n return not self.get_all_closed_cells() or self.unsolvable", "def valid(self):\n return len(self.missing()) == 0", "def has_solution(self) -> bool:\n pass", "def checkObservation(self):\n if (self.independentVariable is not None \n and self.observation is not None \n and self.observationError is not None):\n l = len(self.independentVariable)\n if (l == len(self.observation) and l == len(self.observationError)):\n return True\n return False", "def is_solvable(self) -> bool:\r\n inv_count = 0\r\n arr = self.current_state.flatten()\r\n for i in range(0, 9):\r\n for j in range(i + 1, 9):\r\n if arr[j] and arr[i] and arr[i] > arr[j]:\r\n inv_count += 1\r\n return inv_count % 2 == 0" ]
[ "0.6593381", "0.622655", "0.62037015", "0.6201302", "0.6191842", "0.61888754", "0.60700476", "0.6044539", "0.6014487", "0.6009676", "0.5961862", "0.5950892", "0.5950892", "0.59451425", "0.5910703", "0.58750445", "0.5857207", "0.582537", "0.5819653", "0.580809", "0.5805597", "0.5798865", "0.5796407", "0.5755686", "0.57459867", "0.5712142", "0.5700134", "0.5697857", "0.5697857", "0.5697857", "0.5696923", "0.5694712", "0.56882435", "0.56817937", "0.5679434", "0.5678817", "0.5674043", "0.56707656", "0.566266", "0.565984", "0.5653187", "0.56475544", "0.564351", "0.5616838", "0.5613954", "0.56071097", "0.560474", "0.55991787", "0.55985284", "0.55905706", "0.55874157", "0.5573642", "0.5571733", "0.556909", "0.5568494", "0.5563183", "0.5558829", "0.5555319", "0.55504876", "0.55299073", "0.5529083", "0.5525818", "0.5521041", "0.5520189", "0.5514786", "0.5514669", "0.5509735", "0.5502791", "0.55020505", "0.5500626", "0.54989904", "0.5491663", "0.54893744", "0.54848003", "0.5476934", "0.545994", "0.5449642", "0.54471844", "0.5445523", "0.5435205", "0.5435022", "0.54332495", "0.5423219", "0.5419377", "0.54192436", "0.5407562", "0.540743", "0.5404744", "0.53959775", "0.5393805", "0.5388383", "0.53860986", "0.5381888", "0.53810894", "0.5379345", "0.53774315", "0.53761125", "0.5372038", "0.53641814", "0.53636545" ]
0.7586293
0
Run when the palette is closed
def on_palette_close(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(self, args):\r\n try:\r\n self.cmd_object_.on_palette_close()\r\n\r\n except:\r\n app = adsk.core.Application.cast(adsk.core.Application.get())\r\n ui = app.userInterface\r\n ui.messageBox('Failed During Palette Close:\\n{}'.format(traceback.format_exc()))", "def on_stop(self):\r\n app = adsk.core.Application.cast(adsk.core.Application.get())\r\n ui = app.userInterface\r\n palette = ui.palettes.itemById(self.palette_id)\r\n\r\n for handler in self.html_handlers:\r\n palette.incomingFromHTML.remove(handler)\r\n\r\n if palette:\r\n palette.deleteMe()\r\n\r\n super().on_stop()", "def _on_close(self):\n self.shell_obj.closed()", "def cleanup(self) -> None:\n colorama.deinit()", "def __window_close(self):\n pass", "def __onclosing(self):\n self.window.destroy()", "def on_closing(self, *args):\n pass", "def on_cleanup(self):\n\n pygame.quit()", "def __on_close(self):\n # Release the resource and\n # close the windows\n LOGGER.info(\"closing...\")\n self.__quit.set()\n self.__detect.end()\n self.root.quit()", "def finalizeExit(self) -> None:\n base.graphicsEngine.removeAllWindows()\n if self.win is not None:\n print(\"Exiting KarelCraft app, bye!\")\n self.closeWindow(self.win)\n self.win = None\n self.destroy()\n sys.exit()", "def end(self, event):\n plt.close()", "def _finish(self):\n steppable_registry = CompuCellSetup.persistent_globals.steppable_registry\n steppable_registry.finish()\n self.close_frames()", "def on_close(self, event):\n # Save pos and size\n x, y = self.GetPosition()\n width, height = self.GetSize()\n self.__config.set('window.x', x)\n self.__config.set('window.y', y)\n self.__config.set('window.width', width)\n self.__config.set('window.height', height)\n\n # Style\n style = self.GetWindowStyle()\n self.__config.set('window.style', style)\n\n self.__config.save()\n\n # Stop monitoring\n self.__cor.stop_monitor()\n\n # Kill graph as it seems to be stopping script from ending\n self.__graph = None\n\n # End\n event.Skip()", "def on_palette_execute(self, palette: adsk.core.Palette):\r\n pass", "def onCloseWindow(self, event):\r\n\r\n self.Destroy()", "def close(self, event, data):\n try:\n with open(self.save_file, \"w+\") as save_file:\n try:\n data = json.load(save_file)\n except ValueError:\n data = dict()\n data[\"color\"] = rgb_to_hex(self.rgb_color)\n json.dump(data, save_file)\n except (OSError, json.JSONDecodeError):\n print(\"Error when trying to set save file.\")\n Gtk.main_quit()", "def close_preferences(self,event):\n self.Destroy()\n event.Skip()", "def on_cleanup(self):\n self.close()", "def on_cleanup(self):\n self.close()", "def handle_close(self):\n self.active = False\n self.close()", "def cb_close(self, *args):\n Gtk.main_quit()", "def done(self):\n if self.pbar is not None:\n self.pbar.close()\n self.pbar = None\n self.counter = 0", "def onClose (self):\n \n pass", "def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)", "def onQuit(self, event):\n\n\t\tself.onClose(None)", "def finalize(self):\n self.thread.quit()\n self.color.release()\n self.pos.release()\n\n if self.initCoordinates.f_timer is not None:\n for f_timer in self.initCoordinates.f_timer:\n self.timer.addFunctionTimer(f_timer)\n if self.numMethod.f_timer is not None:\n for f_timer in self.numMethod.f_timer:\n self.timer.addFunctionTimer(f_timer)", "def onClose(self, *args):\n rospy.loginfo('Closing Cloud Map')\n self.root.quit()\n self.root.destroy()\n # rospy.signal_shutdown('Exited UI')", "def cog_unload(self):\n self._get_sketch_prompt.cancel()", "def onClose(self, event): \n \n self.Destroy()\n return", "def on_unload(self):\n pass", "def cleanup_and_exit(self):\n self.radio.stop()\n\n # Ensure display is on and at full brightness\n rpi_utils.toggle_screen_state(\"on\")\n rpi_utils.set_display_backlight_brightness(rpi_utils.HIGH_BRIGHTNESS)\n QApplication.instance().quit()", "def onApplicationClose(self):\n self.movieDisplay.clearImageCache()\n self.quit()", "def _close(self):\n log.Debug('dpbx.close():')", "def destroy_colors(self) -> None:\n self.canv.destroy()", "def _onExit(self, event):\n self.Close(True)", "def cleanup(self):\n pygame.quit()", "def ev_windowclose(self, event: WindowEvent) -> None:", "def OnClose(self):\n self.SaveData()\n self.destroy()", "def on_closing_event(self):\n self.exit_event(None)", "def on_exit(self):\n pass", "def state_finish_exit(cfg, app, win):", "def deinit(self):\n self._font.close()", "def on_before_close(self):\n pass", "def filemenu_Close(self):\n\n self.on_closing()", "def state_preview_exit(cfg, app, win):", "def OnClose(self, event):\r\n pos.app.main.Exit()", "def _finalize(self):\n if self._initialized:\n if self.connected():\n if self.swo_enabled():\n self.swo_stop()\n\n if self.opened():\n self.close()", "def close(self):\n GPIO.cleanup(self.gpio_pin)", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def outCloseEvent(self):\r\n pass", "def handle_close(event):\n self.fig.canvas.stop_event_loop()\n self.raiseAMessage('Closed Figure')", "def OnExit(self, event):\r\n self.Close(True)", "def exit(self):\n cv2.destroyAllWindows()\n print(\"Exiting..\")", "def OnCloseWindow(self, event):\r\n self.data.close()\r\n sizes[self.data.__class__.__name__] = self.GetSizeTuple()\r\n self.Destroy()", "def end(self):\n #self.manipulator_restore()\n #self.header_text_restore()\n #self.cursor_modal_restore()\n pass", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def close(self):\n ...", "def close(self):\n ...", "def _on_finalize(self):\n pass", "def OnClose(self, event):\n self.OnIconize(event, True)", "def onClose(self, event):\n pass", "def onClose(self, event):\n pass", "def onClose(self, event):\n pass", "def kill(self):\r\n plt.close(self.fig)", "def Close(self):", "def close(self):\n self.closecallback()\n self.destroy()", "def terminate(self):\n plt.close('all')", "def close(self):\n self.exit()", "def destroy(self,event):\n self.mi_base.cerrarBase()\n gtk.main_quit()", "def onClose(self, wasClean, code, reason):", "def OnExit(self, event):\n \n print 'Cleaning up...'\n self.Destroy()", "def callback_destroy( self ):\r\n self.winRunning = False\r\n self.rootWin.destroy()\r\n exit()", "def callback_close(self, event=None):\n\n if self.show_var.get():\n self.master.program_config[\"startup_window\"] = False\n util.data.write_config(self.master.program_config)\n\n self.destroy()", "def closed(self, info, is_ok):\n info.object.timer.Stop()\n info.object.connection.close()#close PyHWI connection to ADC when window closes!\n return", "def close(self):\n #title()\n self.experiment.pause = True\n if self.running:\n self.running = False\n\n self._unregisterCallbacks()", "def shutdown(self):\n gtk.main_quit()", "def close_preview(self):\n self.nvim.command('silent! pclose!')", "def closeEvent(self, event):\n self._renderer.plotter.close()\n self.close()", "def mouse_out(self, event):\r\n self['background'] = self.defaultBackground", "def destroy_on_close(self):\n self.deleteLater()", "def close(self):\n plotid = self._plotid\n f = self.set(plotid)\n plt.close(f)\n self._plotid = None\n self._plots.remove(plotid)\n self._color_indexes.pop(plotid, None)\n self._mappable.pop(plotid, None)\n self._polar.pop(plotid, None)\n self._xscales.pop(plotid, None)\n self._yscales.pop(plotid, None)\n self._errorbar_colors.pop(plotid, None)", "def state_processing_exit(cfg, app, win):", "def close(self):\n self.input_processing_running = False", "def OnClose(self, evt):\n \n try:\n \n # save config\n self._update_config()\n config.save()\n \n # export collections\n self._on_collections_export()\n \n except:\n pass\n \n # safe destroy\n self.AUIManager.UnInit()\n self.Destroy()", "def OnClose(self, event):\r\n if self.worker: #stop main GPIB thread\r\n self.worker.abort()\r\n time.sleep(0.3)\r\n self.Destroy()", "def state_finish_do(cfg, app, win, events):", "def close(self):\n self.closed = True", "def cleanup(self):\n self.exit_config_mode()" ]
[ "0.767233", "0.6960615", "0.67901963", "0.6604122", "0.6592666", "0.64696324", "0.64392585", "0.6405782", "0.6362652", "0.6352815", "0.6348606", "0.6346555", "0.63247025", "0.6313968", "0.6289701", "0.6276552", "0.6267763", "0.62607265", "0.62607265", "0.6249281", "0.623151", "0.62173563", "0.62118787", "0.6207149", "0.62064016", "0.6189255", "0.61469436", "0.60886616", "0.60793936", "0.60749465", "0.60691655", "0.6067721", "0.606519", "0.60635805", "0.6054569", "0.60528636", "0.6048894", "0.60401833", "0.6036248", "0.6019566", "0.60057336", "0.599935", "0.5996291", "0.59793806", "0.5978414", "0.5978224", "0.5966704", "0.59663117", "0.5962287", "0.5962287", "0.5962287", "0.5962287", "0.5962287", "0.5962287", "0.5962287", "0.5962287", "0.5962287", "0.5962287", "0.59596205", "0.5955131", "0.59513015", "0.5935052", "0.59349936", "0.59340256", "0.592296", "0.592296", "0.592296", "0.592296", "0.5916131", "0.5916131", "0.5914365", "0.59028393", "0.5901994", "0.5901994", "0.5901994", "0.58972806", "0.5895143", "0.58902454", "0.588625", "0.5880781", "0.5875506", "0.5873924", "0.58731437", "0.5872774", "0.58723575", "0.587196", "0.5870187", "0.58694667", "0.58649516", "0.58588547", "0.58496296", "0.58448654", "0.58431077", "0.5842518", "0.5839988", "0.5821136", "0.5814839", "0.58101386", "0.5809393", "0.58057964" ]
0.91420245
0
Function is run when the palette is executed. Useful to gather initial data and send to html page
def on_palette_execute(self, palette: adsk.core.Palette): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(self, args):\r\n app = adsk.core.Application.cast(adsk.core.Application.get())\r\n ui = app.userInterface\r\n try:\r\n\r\n # Create and display the palette.\r\n palette = ui.palettes.itemById(self.cmd_object_.palette_id)\r\n\r\n if not palette:\r\n palette = ui.palettes.add(\r\n self.cmd_object_.palette_id,\r\n self.cmd_object_.palette_name,\r\n self.cmd_object_.palette_html_file_url,\r\n self.cmd_object_.palette_is_visible,\r\n self.cmd_object_.palette_show_close_button,\r\n self.cmd_object_.palette_is_resizable,\r\n self.cmd_object_.palette_width,\r\n self.cmd_object_.palette_height,\r\n True\r\n )\r\n\r\n # Add handler to HTMLEvent of the palette.\r\n on_html_event_handler = _HTMLEventHandler(self.cmd_object_)\r\n palette.incomingFromHTML.add(on_html_event_handler)\r\n self.cmd_object_.handlers.append(on_html_event_handler)\r\n self.cmd_object_.html_handlers.append(on_html_event_handler)\r\n\r\n # Add handler to CloseEvent of the palette.\r\n on_closed_handler = _PaletteCloseHandler(self.cmd_object_)\r\n palette.closed.add(on_closed_handler)\r\n self.cmd_object_.handlers.append(on_closed_handler)\r\n\r\n else:\r\n main_url = urlparse(self.cmd_object_.palette_html_file_url)\r\n current_url = urlparse(palette.htmlFileURL)\r\n\r\n if not (\r\n (not self.cmd_object_.palette_force_url_reload) &\r\n (main_url.netloc == current_url.netloc) &\r\n (main_url.path == current_url.path)\r\n ):\r\n # ui.messageBox(current_url.netloc + \" vs. \" + main_url.netloc)\r\n # ui.messageBox(current_url.path + \" vs. \" + main_url.path)\r\n # ui.messageBox(str(self.cmd_object_.palette_force_url_reload))\r\n palette.htmlFileURL = self.cmd_object_.palette_html_file_url\r\n\r\n palette.isVisible = True\r\n\r\n self.cmd_object_.on_palette_execute(palette)\r\n\r\n except:\r\n ui.messageBox('Palette ({}) Execution Failed: {}'.format(\r\n self.cmd_object_.palette_html_file_url,\r\n traceback.format_exc())\r\n )", "def open(self):\n print('palette c_edge heat50 .6')\n print('palette c_vertex heat50 .9')\n print('palette c_sus_range heat10 .6')\n print('palette c_sus heat10 .9')\n print('palette c_inf_range heat85 .6')\n print('palette c_inf heat85 .9')\n print('palette c_wait_sus_range heat30 .6')\n print('palette c_wait_sus heat30 .9')", "def _on_palette_change(self, palette_data: dict) -> None:\n # set the color from the metadata\n color = self._label_to_rgb[palette_data['label']]\n # if the selected color is different, queue a cursor update\n if not np.array_equal(self._color, color):\n self.is_cursor_change = True\n # store the color with the new value\n self._color[:] = color\n # set the is brush flag\n self.is_brush = palette_data['paint'] == 'brush'\n # store the brush size with the new value\n self.brush_size = palette_data['brush_size']\n # if the palette is in super pixel mode, get that data\n if palette_data['paint'] == 'super_pixel':\n # get the algorithm from the dictionary\n algorithm = palette_data['super_pixel']\n # get the arguments for the specific algorithm\n arguments = palette_data[algorithm]\n # get the segments using the given algorithm and arguments\n segs = segment(self._image, algorithm, **arguments)\n # apply the segmented image pixels and segments to local structures\n self._super_pixel_segments[:], self._super_pixel[:] = segs\n # otherwise set the super pixel data back to 0\n else:\n self._super_pixel_segments[:] = 0\n self._super_pixel[:] = 0", "def post_start(self):", "def prepare_UI(self):", "def run(self):\n self.print_welcome()\n self.handle_inputs()", "def on_window_ready(self):\n pass", "def postRun(self):\n pass", "def do_startup(self):\n \n import json\n\n GLib.set_application_name(\"Deity\")\n Gtk.Application.do_startup(self)\n \n settings = self.get_settings()\n\n menub = Gtk.MenuButton(name=\"input-menu_button\",\n use_popover=True)\n\n headerbar = Gtk.HeaderBar(name=\"input-headerbar\",\n show_close_button=True,\n title=\"Deity\")\n\n main_grid = Gtk.Grid(name=\"input-main_grid\")\n\n statusbar = Gtk.Box(name=\"input-statusbar\",\n orientation=0,\n spacing=2)\n statusbar.pack_start(self.statuslabel, 1, 1, 1)\n\n self.connector.connect(\"query-status\", self.show_output)\n self.connector.connect(\"query-waiting\",\n lambda wid, count: self.statuslabel.set_text(\n f\"Queries on hold : {count}\"))\n self.connector.connect(\"request\", print)\n\n headerbar.pack_end(menub)\n\n main_grid.attach(self.iogrid.get_widget(), 0, 0, 1, 1)\n main_grid.attach(statusbar, 0, 1, 1, 1)\n\n self.output_window.add(self.get_placeholder_image())\n\n self.window.set_titlebar(headerbar)\n self.window.set_default_icon_from_file(\"artwork/Logo.png\")\n self.window.add(main_grid)\n\n self.window.connect(\"key-press-event\", self.parse_keypress)\n self.window.connect(\"delete-event\", self.request_quit)\n \n self.other[\"connector\"] = self.connector\n self.other[\"headerbar\"] = headerbar\n self.other[\"history\"] = self.history\n self.other[\"input-window\"] = self.window\n self.other[\"iogrid\"] = self.iogrid\n self.other[\"plugins\"] = self.get_plugins(settings[\"enabled-plugins\"])\n self.other[\"statusbar\"] = statusbar\n self.other[\"statuslabel\"] = self.statuslabel\n self.other[\"output-notebook\"] = self.notebook\n self.other[\"output-window\"] = self.output_window\n self.other[\"main-grid\"] = main_grid\n self.other[\"menu_button\"] = menub\n \n self.apply_settings(settings)\n self.current_prompt = self.iogrid.add_prompt()\n\n self.window.set_application(self)\n self.output_window.set_application(self)\n\n self.output_window.move(800, 150)\n self.window.move(75, 160)", "def setup(self):\n header_print(self.data['intro'])\n header_print(self.data['help'])\n random.shuffle(self.data['draw'])\n random.shuffle(self.data['locations'])\n random.shuffle(self.data['events'])\n random.shuffle(self.data['aces'])\n random.shuffle(self.data['personalities'])\n self.stats = {\n 'round': 0,\n 'powers': {\n 'MOONS': 6,\n 'SUNS': 6,\n 'WAVES': 6,\n 'LEAVES': 6,\n 'WYRMS': 6,\n 'KNOTS': 6,\n },\n 'hand': self.data['draw'][:],\n 'discard': [],\n 'active': [],\n 'opponent': {},\n }", "def script(self):", "def on_load_theme (self):\n\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_background()\n\t\t\tself.redraw_foreground()", "def setup_page(self):\r\n raise NotImplementedError", "def setup_page(self):\n raise NotImplementedError", "def start_button_action(self):\n if self.dynamic.output_file.text() and os.path.isdir(\n self.dynamic.output_directory.text()\n ):\n\n additional_settings = {\n \"Save_data\": True,\n \"Filepath\": self.dynamic.output_directory.text(),\n \"Filename\": self.dynamic.output_file.text(),\n \"skip_init\": False,\n }\n\n # Generate a Lookuptable for the plots\n steps = (\n int(\n abs(\n float(self.dynamic.max_voltage_IV.value())\n / float(self.dynamic.voltage_steps_IV.value())\n )\n )\n + 1\n )\n self.cmapLookup = self.cmap.getLookupTable(1.0, 3.0, steps)\n self.variables.reset_plot_data()\n\n self.generate_dynamicwaiting_job(additional_settings)\n # self.variables.reset_plot_data()\n\n else:\n reply = QMessageBox.information(\n None,\n \"Warning\",\n \"Please enter a valid filepath and filename.\",\n QMessageBox.Ok,\n )", "def start_displayhook(self):\n pass", "def on_render(self, console):\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black", "def view(self):\n\t\tself.done(1)", "def initGui(self):\n\n icon_path = ':/plugins/EU_Mapper/EUICON.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Generate EU Map'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True", "def start(self):\n self.show_greeting()\n self.read_frame()", "def _initialise_run(self) -> None:", "def postloop(self):\n print 'Bye!'", "def on_palette_close(self):\r\n pass", "def _populate_output(self):\n pass", "def state_preview_do(cfg, app, win, events):", "def populating_popup(self, *args):\n return _ida_hexrays.Hexrays_Hooks_populating_popup(self, *args)", "def do_stuff(self):\n self.create_tourism_raster()", "def initializePage(self):\n WC.WizardPage.initialize(self)\n self.page.use(qt.QVBoxLayout())\n exp = self.give_field(\"exp-store\").give_exp(\"pressure\")\n grps = exp.find_groups(self.give_field(\"mesh\"))\n dims = [(u\"Pressure\", 1.)]\n tit = u\"Adding pressure on meshes groups\"\n # The last groups should be seen first\n grps.reverse()\n WC.add_condition_selector(self, grps, dims, \"pressure-loading*\", tit)", "def on_startup(self) -> None:\n ...", "def render(self):\n self.env.render()\n #input(\"Press enter to take a step \")", "def setup(self, rc):\n pass", "def callback(data):\n c = config[data.data]\n w.config(text=c['text'], fg=c['fg'], bg=c['bg'])\n f.config(bg=c['bg'])", "def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(10)", "def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(10)", "def afterInit(self):", "def _render_callback(self, _sim, _viewer):\n pass", "def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(1)", "def on_run(self):\r\n\r\n\t\tpass", "def prepare_to_advance(self):\n\n self.capture_user_input()\n self.UI.reset_figure()\n # stopping the blocking event loop\n self.fig.canvas.stop_event_loop()", "def _on_step(self) -> bool:\n # print(\"locals \", self.locals)\n # # what timestep you think\n # print(\"timestep \",CustomCallback.step)\n # # what timestep a2c or ppo2 learn() is on \n # print(\"a2c/ppo2 num timestep\",self.num_timesteps)\n \n # TODO: add flag to save screenshots or not\n subfolder = os.path.join(self.directory, 'screen/')\n filepath = os.path.join(subfolder)\n img_name = '_screenshot_' + str(self.num_timesteps)\n \n if(self.algo == \"A2C\" or self.algo == \"PPO2\"):\n # self.locals['obs'] gives black and white imgs\n obs = self.env.get_images()\n for i in range(self.num_envs):\n mpl.image.imsave(subfolder+\"env_\" + str(i) + img_name + \"_.png\", obs[i])\n elif (self.algo == \"DQN\"):\n self.env.ale.saveScreenPNG(subfolder+\"env_\" + str(0) + img_name + \"_.png\")\n\n step_stats = {self.num_timesteps: {\n 'num_timesteps': self.num_timesteps,\n 'state': self.num_timesteps/self.num_envs,\n }\n }\n # add step to dict\n CustomCallback.main_data_dict.update(step_stats)\n key = self.num_timesteps\n\n # collection of minimum data: action, reward, lives\n if(self.algo == \"DQN\"):\n CustomCallback.main_data_dict[key]['action_env_0'] = self.locals['action']\n CustomCallback.main_data_dict[key]['action_name_env_0'] = self.actions[self.locals['env_action']]\n if(self.game == \"Pong\"):\n CustomCallback.main_data_dict[key]['curr_score_env_0'] = self.locals['episode_rewards'][-1]\n else:\n CustomCallback.main_data_dict[key]['cumulative_life_reward'] = self.locals['episode_rewards'][-1]\n if(self.isLives == True):\n CustomCallback.main_data_dict[CustomCallback.step]['lives'] = self.locals['info']['ale.lives']\n else:\n for i in range(self.num_envs):\n CustomCallback.main_data_dict[key]['action_env_'+str(i)] = self.locals['actions'][i]\n CustomCallback.main_data_dict[key]['action_name_env_'+str(i)] = self.actions[self.locals['actions'][i]]\n CustomCallback.main_data_dict[key]['step_reward_env_'+str(i)] = self.locals['rewards'][i]\n if(self.isLives == True):\n if(CustomCallback.step == 1):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = 3\n if(CustomCallback.step >= 2):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = self.locals['infos'][i]['ale.lives']\n\n if(self.game == \"Pong\" and self.algo != \"DQN\"):\n # extra processing for Pong scores\n self.find_life_game_info_a2c_ppo2_pong()\n\n # at the last step, write data into csv files\n if(CustomCallback.step == (self.num_steps/self.num_envs)):\n self.make_dataframes(self.df_list)\n # save minimal data\n self.df_to_csv(\"df_og.csv\", self.df_list)\n self.df_to_parquet()\n CustomCallback.step = CustomCallback.step + 1\n return True", "def btn_display_color_hist_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n img_hist_fio = img_proc.fio_color_hist_fio(image_fio)\n self.img_displayer.new_display(\n img_hist_fio, name + ' Histogram')\n self.show_as_waiting(False)", "def init_tab(self):", "def on_start(self):\r\n # This adjust the recipe tiles to the correct starting width:\r\n self.update_tile_width()\r\n # This searches the database in order to find all recipes and generate Tiles:\r\n self.update_tile_menu()\r\n toast('Welcome!', 3)", "def post_execute(self):", "def pre_execute(self):", "async def on_ready(self):\n\n await self._get_sketch_prompt()", "def run(palette='hsv', num_vals=12):\n fout_png = 'colors_{P}_{N}.png'.format(P=palette, N=num_vals)\n _, axis = plt.subplots(1, 1, figsize=(6, 6))\n colobj = MplColorHelper(palette, 0, num_vals-1)\n colors = [colobj.get_hexstr(yval) for yval in range(num_vals)]\n plt_color_text(colors)\n for idx, color in enumerate(reversed(colors)):\n print('{N:2} {COLOR}'.format(N=idx, COLOR=color))\n axis.set_title('{N} Discrete Colors from {MAP}'.format(N=num_vals, MAP=palette))\n plt.show()\n plt.savefig(fout_png)\n print(' WROTE: {PNG}'.format(PNG=fout_png))", "def on_init(self):\n self.write_log(\"策略初始化\")\n\n self.load_bar(10)", "def _onPremade(self, event):\n self.openPremade()", "def setUp(self):\n\t\tself.output = self.switchstdout()", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def callback(self, data):\n self.state = data.data\n #rospy.loginfo('HEARD')", "def launch(self):", "def computeUIPalette(self):\n\n\t\t# self.col['group-bg'] = QtGui.QColor(128, 128, 128)\n\t\tself.col['line'] = self.col['window'].darker(110)\n\t\tself.col['tooltip'] = QtGui.QColor(255, 255, 221)\n\t\tself.col['mandatory'] = QtGui.QColor(252, 152, 103)\n\t\tself.col['warning'] = QtGui.QColor(255, 216, 106)\n\t\tself.col['inherited'] = QtGui.QColor(161, 239, 228)\n\n\t\tif self.col['window'].lightness() < 128: # Dark UI\n\t\t\tself.imgtheme = \"light\"\n\t\t\tself.col['text'] = QtGui.QColor(204, 204, 204)\n\t\t\tself.col['group-bg'] = QtGui.QColor(0, 0, 0)\n\t\t\tself.col['disabled'] = QtGui.QColor(102, 102, 102)\n\t\t\t# self.col['disabled'] = self.offsetColor(self.col['window'], +51)\n\t\t\t# self.col['base'] = self.offsetColor(self.col['window'], -34, 34)\n\t\t\t# self.col['alternate'] = self.offsetColor(self.col['base'], +6)\n\t\t\t# self.col['button'] = self.offsetColor(self.col['window'], +34, 102)\n\t\t\t# self.col['button-border'] = self.offsetColor(self.col['button'], +8)\n\t\t\t# self.col['menu-bg'] = self.offsetColor(self.col['window'], -17, 68)\n\t\t\t# self.col['menu-border'] = self.offsetColor(self.col['menu-bg'], +17)\n\t\t\t# self.col['group-header'] = self.offsetColor(self.col['window'], +17)\n\t\t\tself.col['base'] = self.col['window'].darker(150)\n\t\t\tself.col['alternate'] = self.col['base'].lighter(106)\n\t\t\tself.col['button'] = self.col['window'].lighter(150)\n\t\t\tself.col['button-border'] = self.col['button']\n\t\t\tself.col['menu-bg'] = self.col['window'].darker(125)\n\t\t\tself.col['menu-border'] = self.col['menu-bg']\n\t\t\tself.col['group-header'] = self.col['window'].lighter(150)\n\t\telse: # Light UI\n\t\t\tself.imgtheme = \"dark\"\n\t\t\tself.col['text'] = QtGui.QColor(51, 51, 51)\n\t\t\tself.col['group-bg'] = QtGui.QColor(255, 255, 255)\n\t\t\tself.col['disabled'] = QtGui.QColor(102, 102, 102)\n\t\t\t# self.col['disabled'] = self.offsetColor(self.col['window'], -51)\n\t\t\t# self.col['base'] = self.offsetColor(self.col['window'], +34, 221)\n\t\t\t# self.col['alternate'] = self.offsetColor(self.col['base'], -6)\n\t\t\t# self.col['button'] = self.offsetColor(self.col['window'], -17, 204)\n\t\t\t# self.col['button-border'] = self.offsetColor(self.col['button'], -8)\n\t\t\t# self.col['menu-bg'] = self.offsetColor(self.col['window'], +17, 187)\n\t\t\t# self.col['menu-border'] = self.offsetColor(self.col['menu-bg'], -17)\n\t\t\t# self.col['group-header'] = self.offsetColor(self.col['window'], -17)\n\t\t\tself.col['base'] = self.col['window'].lighter(150)\n\t\t\tself.col['alternate'] = self.col['base'].darker(106)\n\t\t\tself.col['button'] = self.col['window'].darker(150)\n\t\t\tself.col['button-border'] = self.col['button']\n\t\t\tself.col['menu-bg'] = self.col['window'].lighter(125)\n\t\t\tself.col['menu-border'] = self.col['menu-bg']\n\t\t\tself.col['group-header'] = self.col['window'].darker(150)\n\n\t\t# self.col['hover'] = self.offsetColor(self.col['button'], +17)\n\t\t# self.col['checked'] = self.offsetColor(self.col['button'], -17)\n\t\tself.col['hover'] = self.col['button'].lighter(110)\n\t\tself.col['checked'] = self.col['button'].darker(110)\n\t\tself.col['pressed'] = self.col['checked'] #self.col['highlight']\n\n\t\tif self.col['highlight'].lightness() < 136:\n\t\t\tself.col['highlighted-text'] = QtGui.QColor(255, 255, 255)\n\t\telse:\n\t\t\tself.col['highlighted-text'] = QtGui.QColor(0, 0, 0)\n\n\t\tif self.col['tooltip'].lightness() < 136:\n\t\t\tself.col['tooltip-text'] = QtGui.QColor(255, 255, 255)\n\t\telse:\n\t\t\tself.col['tooltip-text'] = QtGui.QColor(0, 0, 0)\n\n\t\t# if self.col['button'].lightness() < 170:\n\t\t# \tself.col['button-text'] = self.offsetColor(self.col['button'], +68, 204)\n\t\t# else:\n\t\t# \tself.col['button-text'] = self.offsetColor(self.col['button'], -68, 51)\n\t\tself.col['button-text'] = self.col['text']\n\n\t\tself.col['mandatory-bg'] = self.col['mandatory']\n\t\tif self.col['mandatory-bg'].lightness() < 128:\n\t\t\tself.col['mandatory-text'] = self.offsetColor(self.col['mandatory-bg'], +68, 204)\n\t\telse:\n\t\t\tself.col['mandatory-text'] = self.offsetColor(self.col['mandatory-bg'], -68, 51)\n\n\t\tself.col['warning-bg'] = self.col['warning']\n\t\tif self.col['warning-bg'].lightness() < 128:\n\t\t\tself.col['warning-text'] = self.offsetColor(self.col['warning-bg'], +68, 204)\n\t\telse:\n\t\t\tself.col['warning-text'] = self.offsetColor(self.col['warning-bg'], -68, 51)\n\n\t\tself.col['inherited-bg'] = self.col['inherited']\n\t\tif self.col['inherited-bg'].lightness() < 128:\n\t\t\tself.col['inherited-text'] = self.offsetColor(self.col['inherited-bg'], +68, 204)\n\t\telse:\n\t\t\tself.col['inherited-text'] = self.offsetColor(self.col['inherited-bg'], -68, 51)", "def show_data():", "def _callback_main(self, data):\n alpha = data.data\n self.command_synergy(alpha)", "def initialise_har(_page_ref):", "def on_start(self):", "def on_start(self):", "def run(self):\n \n # Create the dialog with elements (after translation) and keep reference\n # Only create GUI ONCE in callback, so that it will only load when the plugin is started\n if self.first_start == True:\n self.first_start = False\n self.dlg = SaveAttributesDialog()\n #self.dlg.pushButton.clicked.connect(self.select_output_file)\n self.dlg.pb_select_layer.clicked.connect(self.input_shp_file)\n self.dlg.okButton.clicked.connect(self.process)\n \n\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n self.dlg.exec_()\n # See if OK was pressed", "def home_callback(self):\n self.rokucontrol.home_callback()", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_init(self):\n\t\tpass", "def setup(self):\n self.labels = getattr( self, 'labels', self.make_labels_common( self.parsed_data ) )\n keys = list( self.sort_keys(self.parsed_data) )\n # We reverse so the `first` colors get given to the `largest`\n # data elements.\n #keys.reverse()\n self.colors = self.preset_colors(keys)", "def run(self):\n\n self.window.run_command(\"show_panel\", {\"panel\": \"output.reg_replace\"})", "def setup_callback():\n self.setup_window.deiconify()", "def pp_callback(self):\n self.rokucontrol.pp_callback()", "def on_step(self) -> None:\r\n\r\n if self.board == None:\r\n return\r\n\r\n TkState.disable(self.edit_menu.winfo_children())\r\n TkState.enable([self.reset_button])\r\n self.anim_board.next_gen()\r\n self.on_new_generation()\r\n self.painter.draw_board()", "def _screen(self):\n yield \"\"\n yield \"# screen functions\"\n yield \"log ?= echo\"\n yield \"# indentation\"\n yield 'log.halfdent := \" \"'\n yield 'log.indent := \" \"'\n yield \"\"\n yield \"log.info = \\\\\"\n yield \" $(log) \\\\\"\n yield ' $(palette.info)\" [info]\"$(palette.normal) \\\\'\n yield \" $(palette.info)$(1)$(palette.normal)\"\n yield \"\"\n yield \"log.warning = \\\\\"\n yield \" $(log) \\\\\"\n yield ' $(palette.warning)\" [warning]\"$(palette.normal) \\\\'\n yield \" $(palette.warning)$(1)$(palette.normal)\"\n yield \"\"\n yield \"log.error = \\\\\"\n yield \" $(log) \\\\\"\n yield ' $(palette.error)\" [error]\"$(palette.normal) \\\\'\n yield \" $(palette.error)$(1)$(palette.normal)\"\n yield \"\"\n yield \"log.debug = \\\\\"\n yield \" $(log) \\\\\"\n yield ' $(palette.debug)\" [debug]\"$(palette.normal) \\\\'\n yield \" $(palette.debug)$(1)$(palette.normal)\"\n yield \"\"\n yield \"log.firewall = \\\\\"\n yield \" $(log) \\\\\"\n yield ' $(palette.firewall)\" [firewall]\"$(palette.normal) \\\\'\n yield \" $(palette.firewall)$(1)$(palette.normal)\"\n yield \"\"\n yield \"# render a build action\"\n yield \"log.asset = \\\\\"\n yield \" $(log) \\\\\"\n yield ' $(palette.asset)\" [$(1)]\"$(palette.normal) \\\\'\n yield \" $(2)\"\n yield \"\"\n yield \"log.action = \\\\\"\n yield \" $(log) \\\\\"\n yield ' $(palette.action)\" [$(1)]\"$(palette.normal) \\\\'\n yield \" $(2)\"\n yield \"\"\n yield \"log.attention = \\\\\"\n yield \" $(log) \\\\\"\n yield ' $(palette.attention)\" [$(1)]\"$(palette.normal) \\\\'\n yield \" $(2)\"\n yield \"\"\n # all done\n return", "def on_load(self):", "def Echocallback(self, Frame_data):", "def render(self):\n\t\tself._menu.refresh_population()\n\t\tself._menu.blit_and_update()", "def render(self):\n self.delete()\n self.__create_background(self._imfname)\n # XXX must be last after successor implementation, but works without this line\n #self.c.event_generate(\"<Configure>\")\n #self.c.update_idletasks()", "def load_theme_values(self): \n pass", "def _after_connect(self):\r\n _debug('GUISignalGenerator: _after_connect()')\r\n # Update the controls\r\n self.button_sweep.enable()\r\n self.button_send_list.enable()\r\n self.button_reset.enable()\r\n \r\n # Update the RF button.\r\n rf_on = self.api.get_output()\r\n if rf_on == None: self.button_rf.set_checked(True, block_events=True).enable()\r\n else: self.button_rf.set_checked(rf_on, block_events=True).enable()\r\n \r\n # Update the combo; we block first just in case the value doesn't \"change\"\r\n if self.api == None: self.label_instrument_name.set_text('Simulation')\r\n else:\r\n if self.api.get_mode() == 'Fixed': self.combo_mode.set_value(0, block_events=True).enable()\r\n else: self.combo_mode.set_value(1, block_events=True).enable()\r\n self._combo_mode_changed()\r\n \r\n # Update the list plot\r\n self.query_list()", "def setup_script(self, *args, **kwargs):\n pass", "def post_exec(self):\n raise NotImplementedError(\"Must implement in frontend subclass.\")", "def _afterInit(self):\n pass", "def run(self):\n self.load_template()\n self.load_data()\n self.load_files()\n self.render_content()\n self.process()\n # pprint(self.data)", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def run(self) -> None:\n self._render()\n print(self.sio.getvalue())", "def __init__(self, *args, **kwargs):\n _gdi_.Palette_swiginit(self,_gdi_.new_Palette(*args, **kwargs))", "def on_run(self):\n pass", "def main():\n\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config[\"database\"])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n\n rendered_report = render_report(show_years=show_years,\n panelists=panelists,\n report_settings=app_config[\"report\"])\n\n generate_output_files(rendered_report=rendered_report,\n report_settings=app_config[\"report\"])", "def generate():\n global data\n data = []\n # Generate a random data set\n for _ in range(usr_size.get()):\n data.append(random.randrange(usr_min.get(), usr_max.get()+1))\n display_data(data, ['red' for x in range(len(data))])", "def run(self):\n self.simulate_test_data()\n self.pipeline_test_data()\n self.plot_jump_flags_image()\n self.plot_groupdq_flags(pixel=[884, 550])\n self.plot_ramps_pre_post_correction(pixel=[884, 550])", "def run(self):\n\n # Create the dialog with elements (after translation) and keep reference\n # Only create GUI ONCE in callback, so that it will only load when the plugin is started\n if self.first_start == True:\n self.first_start = False\n self.dlg = EUMapperDialog()\n self.dlg.pushButton.clicked.connect(self.Population)\n self.dlg.pushButton_2.clicked.connect(self.Populationd)\n self.dlg.pushButton_3.clicked.connect(self.BNPC)\n self.dlg.pushButton_4.clicked.connect(self.AverageSalary)\n self.dlg.pushButton_5.clicked.connect(self.LifeExpectancy)\n self.dlg.pushButton_6.clicked.connect(self.COEMISSIONS)\n self.dlg.pushButton_7.clicked.connect(self.PlainEU)\n self.dlg.pushButton_8.clicked.connect(self.FIREPOWER)\n\n\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass\n #self.OpenEU()\n\n #pass", "def on_loadFile(self):\n self.stored = self.teText.toHtml()\n self.rf_widgetVis(state=True)" ]
[ "0.71663606", "0.5976665", "0.59089667", "0.5755218", "0.57544327", "0.56373245", "0.5611845", "0.56111276", "0.5609256", "0.55880344", "0.5586924", "0.5578858", "0.5556275", "0.5524903", "0.55076844", "0.5467774", "0.54665", "0.5456171", "0.543333", "0.543", "0.5428835", "0.541232", "0.53966635", "0.53932476", "0.5389214", "0.5388145", "0.5382311", "0.5368325", "0.5366644", "0.5354036", "0.53467196", "0.5341655", "0.5336188", "0.5336188", "0.53356737", "0.53162163", "0.5315218", "0.5304453", "0.5301141", "0.52900994", "0.5288789", "0.5285394", "0.5279898", "0.5279214", "0.5278186", "0.52733535", "0.5271271", "0.526687", "0.5265082", "0.5263456", "0.52616966", "0.52616966", "0.52386", "0.52332604", "0.52324575", "0.52228034", "0.5220165", "0.5216577", "0.52099836", "0.52099836", "0.520842", "0.52051693", "0.51893646", "0.51893646", "0.51893646", "0.51893646", "0.51893646", "0.5189218", "0.51865005", "0.51856554", "0.518106", "0.5175828", "0.51748455", "0.5174747", "0.5164316", "0.5159181", "0.51577824", "0.5154891", "0.51547813", "0.5149243", "0.51412886", "0.51398283", "0.5138714", "0.51366943", "0.5134867", "0.5134867", "0.5134867", "0.5134867", "0.5134867", "0.5134867", "0.5134867", "0.5134867", "0.51249605", "0.5115069", "0.5111953", "0.5097146", "0.50952977", "0.5092713", "0.5077045", "0.5072558" ]
0.6666663
1
Function is run when the addin stops. Clean up. If overridden ensure to execute with super().on_stop()
def on_stop(self): app = adsk.core.Application.cast(adsk.core.Application.get()) ui = app.userInterface palette = ui.palettes.itemById(self.palette_id) for handler in self.html_handlers: palette.incomingFromHTML.remove(handler) if palette: palette.deleteMe() super().on_stop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def post_stop(self):", "def on_stop(self):\n self.logger.debug(\"Stopping...\")\n pass", "def stop(self):\n # All done!\n super().stop()", "def onstop(self, sender, **kwargs):\n pass", "def stop(self):\n self.on_stop()", "def stop(self):\n super().stop()", "def _stop(self):", "def stop(self):\n\t\tpass", "def on_close(self):\n self.stop_flag = True\n super(MyApp, self).on_close()", "def __exit__(self, *args):\n self.stop()", "def stop(self):", "def stop(self):", "def stop(self):\r\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self) -> None:\n ...", "def pre_stop(self):", "def stop(self) -> None:", "def stop(self) -> None:", "def stop (self):\n pass", "def stop (self):\n pass", "def on_stop(self):\n self.write_log(\"策略停止\")\n self.cta_engine.event_engine.unregister(EVENT_TIMER, self.process_timer_event)", "def _gracefully_stop(self):\n pass", "def stop(self):\n # print \"process shutdown complete\"", "def stop(self):\n return", "def stop():", "def stop():", "def stop():", "def stop():", "def __exit__(self):\n self._stop_all()", "def stop(self):\n self.unhook()", "def Stop(self) :\n\t\t...", "def __del__(self):\n self.stop()", "def __del__(self):\n self.stop()", "def __del__(self):\n self.stop()", "def __del__(self):\n self.stop()", "def _prepare_to_stop(self):\n pass", "def stop (self):\n super(InfofileCollector, self).stop()\n self.log.info (\"stopping\")\n self.maint_timer.cancel()", "def stop(self) -> None:\n pass", "def stop(self):\n self.killed = True", "def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n self.stop()", "def notify_stop(self):\n self._notify_stop()", "def stopclean(self):\n raise Exception(\"Not implemented\")", "def stop(self):\n self._stop_event.set()\n super().stop()", "def on_stop(self):\n print(\"策略停止\")\n\n self.put_event()", "def on_stop(self):\n self.write_log(\"策略停止\")\n\n self.put_event()", "def stop(self):\n raise NotImplementedError", "def stop(self):\n raise NotImplementedError", "def stop(self):\r\n self.stopped = True", "def on_stop(self):\n self.write_log(\"策略停止\")\n self.put_event()", "def finalize(self):\n self.stop()", "def stop(self): # NEW!\n super().stop()\n self.app.stop()", "def on_stop(self):\n self.write_log(\"策略停止\")", "def on_stop(self):\n self.write_log(\"策略停止\")", "def on_stop(self):\n self.write_log(\"策略停止\")", "def on_stop(self):\n self.write_log(\"策略停止\")", "def __del__(self):\n if self.running:\n self.stop()", "def stop() -> None:", "def stop(self):\n self.stopped = True", "def on_cleanup(self):\n raise NotImplementedError", "def stop(self):\n raise NotImplementedError()", "def stop(self):\n raise NotImplementedError()", "def on_terminate(self):\n pass", "async def _stop(self):\n return", "def __window_stop(self):\n pass", "def on_exit(self):\n pass", "def stop(self):\n self.stopped = True", "def stop(self):\n self.stopped = True", "def stop(self):\n\n self.keep_running = False", "def _stop(self):\n return True", "def stop(self):\n self.finished = True", "def stop(self):\n self.finished.set()", "def shutdown_plugin(self):\n pass", "def cleanupAtExit():\n \n global client\n \n client.stop()", "def stop_monitoring(self):\n pass", "def __del__(self):\r\n if self.coreInst:\r\n for mod in self.coreInst.keys():\r\n self.coreInst[mod][plug_key].stop()", "def __del__(self):\n AppHelper.stopEventLoop()", "def __del__(self):\n AppHelper.stopEventLoop()", "def shutdown(self):", "def stop(self):\n self.exit.set()", "def stop(self):\n self.exit.set()", "def cleanup(self):\r\n self.stop()\r\n self.PWM.stop() # stop the PWM output\r", "def shutdown(self):\n print \"Plugin is shutting down.\"", "def stop(self):\n GameEngine().stop()\n self.on_stop()" ]
[ "0.81118715", "0.81118715", "0.81118715", "0.81118715", "0.81118715", "0.81118715", "0.81118715", "0.7925438", "0.7536368", "0.7458011", "0.7412743", "0.7316049", "0.73140156", "0.7286416", "0.72050714", "0.7180944", "0.71321785", "0.7109044", "0.7109044", "0.71050143", "0.70840544", "0.70840544", "0.70840544", "0.70840544", "0.70840544", "0.70840544", "0.70840544", "0.70840544", "0.70840544", "0.70840544", "0.70840544", "0.7074557", "0.7048657", "0.70448476", "0.70448476", "0.70300794", "0.70300794", "0.702121", "0.6975016", "0.6944608", "0.6942583", "0.6930924", "0.6930924", "0.6930924", "0.6930924", "0.6926382", "0.6871766", "0.6839956", "0.6835898", "0.6835898", "0.6835898", "0.6835898", "0.680747", "0.6806588", "0.67840594", "0.6779909", "0.6773583", "0.67669564", "0.6766425", "0.67537886", "0.6749745", "0.6740891", "0.67385", "0.67385", "0.6730063", "0.6724353", "0.67238873", "0.67224455", "0.67179084", "0.67179084", "0.67179084", "0.67179084", "0.6712123", "0.6710709", "0.67044735", "0.6695254", "0.6691486", "0.6691486", "0.66724515", "0.66602546", "0.66571546", "0.6657038", "0.66522527", "0.66522527", "0.66348314", "0.66323036", "0.6622813", "0.6613333", "0.65967596", "0.65964", "0.6593652", "0.65930414", "0.6584668", "0.6584668", "0.65834326", "0.6576692", "0.6576692", "0.65755016", "0.65579283", "0.6550357" ]
0.6577097
95
Method executed by Fusion. DOn't rename
def notify(self, args): try: command_ = args.command inputs_ = command_.commandInputs on_execute_handler = _PaletteExecuteHandler(self.cmd_object_) command_.execute.add(on_execute_handler) self.cmd_object_.handlers.append(on_execute_handler) self.cmd_object_.on_create(command_, inputs_) except: app = adsk.core.Application.cast(adsk.core.Application.get()) ui = app.userInterface ui.messageBox('Command created failed: {}'.format(traceback.format_exc()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def my_rename(self, src, dst):\n self.renamerCalled = True", "def _fix_up(self, cls, code_name):", "def fix_name(self):\n self._name_fixed = True", "def OnRenameTimer(self):\r\n \r\n self.Edit(self._current)", "def rename(old, new):", "def rename(old, new):", "def _transform_name(self) -> None:\n self.name = utils.maybe_rename_for_k8s(self.name)", "def OnRenameTimer(self):\r\n\r\n self.EditLabel(self._current, self._curColumn)", "def script(self):", "def rename(self, name):\n return _coconut_tail_call(self.__class__, name)", "def rename(self,oldName,newName):\n isLoaded = self.isLoaded(oldName)\n if isLoaded: self.unload(oldName)\n FileInfos.rename(self,oldName,newName)\n self.refreshDoubleTime()\n if isLoaded: self.load(newName)", "def name(self, new_name):\n self.rename(new_name)", "def hxlrename():\n run_script(hxlrename_main)", "def name(self, name):\n pass", "def change():", "def test_6a_change_file_name(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.rename_file_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare renaming test.\")\n self.dismiss_dialogs()\n function = js_func[\"rename\"] % (GST.gs_file_paths[\"file_to_rename_path\"], GST.gs_file_paths[\"after_rename_path\"])\n try:\n self.send_request(function, \"rename()\")\n except Exception as e:\n raise RenameException(\"Failed to rename the file: \" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise RenameException(\"Failed to rename the file: \" + response)", "def invoke(self, op):\n for rename in self:\n rename(op)", "def on_path(self, new):\n self.name = basename(new)", "def name(self):\n ...", "def new_name(self,new_name):\n self.name = new_name", "def falcon():", "def rename(self,newName):\n self.userName = newName", "def name(self, new_name: str) -> None:\n raise NotImplementedError()", "def rename(oldname, newname):", "def reporte_rename(fecha):\n pass", "def projectFileRenamed(self, oldfn, newfn):\n editor = self.getOpenEditor(oldfn)\n if editor:\n editor.fileRenamed(newfn)", "def name():\n pass", "def name():\n pass", "def renameAction(self,**kwargs):\n try:\n old_action = kwargs[\"fname\"].split(' ')[1]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n rospy.loginfo(\"Enter the new name of the action:\")\n action = sys.stdin.readline().strip()\n\n self.bl.baxter_actions[str(action)] = self.bl.baxter_actions[str(old_action)]\n del self.bl.baxter_actions[str(old_action)]\n\n self.baxter.mm.changeMenuTitle(\"Action %s renamed to: %s\" % (old_action, str(action)))\n self.baxter.yes() \n self.mm.loadMenu(\"teachMenu\")", "def renameUI(*args, **kwargs)->AnyStr:\n pass", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def use(self):", "def RenameFile(self, oldname: str, newname: str) -> None:\n ...", "def set_name(self,name):\r\n self._name = __name", "def __set_name__(self, cls, name):\n pass", "def process_file(file_name):\n pass # delete this line and replace with your code here", "def newname(self) :\n\t\ttry :\n\t\t\treturn self._newname\n\t\texcept Exception as e:\n\t\t\traise e", "def startFile(self, newFileName):\n \n pass", "def rename_file (self):\n\t\tassert self.__filename, \"Renaming could not complete because the new filename could not be determined, one or more needed arguments is empty!\"\n\t\tos.rename( self._file.path, self.__filename )\n\t\t\n\t\tif self.verbose and self.log :\tself.log.info( 'File renamed from %s to %s' % (self._file.path, self.__filename))", "def setFunctionName(self, function_name):\r\n self.actualFunction = function_name", "def rename(self, name):\n self.name = name", "def friewallOn():\n pass", "def set_function_name_at(self, function_address, new_name):\n pass", "def _name_changed(self):\n self._named = True", "def base_rename(self, new_name):\n\n new_path = join(dirname(self.fspath), new_name)\n\n return self.rename(new_path)", "def change_model_name(self, name):\n self._name = name\n if self._zon is not None:\n self._zon.filename = f\"{name}.{self._zon.filename.split('.')[-1]}\"", "def re_name(name,new_name):\n\n try:\n os.rename(config_tools.full_dest+name,config_tools.full_dest+new_name)\n except OSError:\n print(f\"Не удалось переименовать {name}\")\n else:\n print(f\"{name} успешно переименновавано в {new_name}\")", "def transact(self):", "def transact(self):", "def bulk_rename(current_path,casetype):\n\tclick.echo(current_path)\n\tfilenames = os.listdir(current_path) \n\n\tfor filename in filenames:\n\t\tif filename != 'file_organizer0.03.py':\n\t\t\tif casetype == 'lower':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.lower())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").lower())\n\t\t\telif casetype == 'upper':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.upper())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").upper())\n\t\t\t\t\n\t\t\telif casetype == 'title':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.title)\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").title())\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.lower())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").lower())\n\n\tclick.secho('Finished Renaming to {} case!!'.format(casetype),bg='blue',fg='white')", "def name():\n\n pass", "def autoname(self):\n raise NotImplementedError()", "def _postprocess(self):", "def replace(name, newobject):", "def newname(self, newname) :\n\t\ttry :\n\t\t\tself._newname = newname\n\t\texcept Exception as e:\n\t\t\traise e", "def use(target, name):", "def autorename_shots(context):\n\n for index, shot in enumerate(context.scene.milkshake_shots):\n shot.code = f\"SH{index + 1 :02}\"\n shot.camera.name = f\"{shot.code}.CAMX.000\"\n for obj in bpy.data.objects:\n if obj.data == shot.camera:\n obj.name = shot.camera.name\n break\n core.log(f\"Renamed shot {shot.code} and camera {shot.camera.name}.\")", "def perform(self):\n pass", "def __init__(self):\n self.label = \"Change Field Name\"\n self.description = \"Uses Alter Field to change the field name. Can select one field at a time \" + \\\n \"or enter a string that will be deleted from all fields in the feature if that string is in the \" + \\\n \"field name.\"\n self.canRunInBackground = False", "def _hook(self):", "def __editorRenamed(self, fn, editor):\n self.editorRenamed.emit(fn)\n self.editorRenamedEd.emit(editor)", "def rename(self,oldName,newName):\n #--Update references\n fileInfo = self[oldName]\n self[newName] = self[oldName]\n del self[oldName]\n self.table.moveRow(oldName,newName)\n #--FileInfo\n fileInfo.name = newName\n #--File system\n newPath = os.path.join(fileInfo.dir,newName)\n oldPath = os.path.join(fileInfo.dir,oldName)\n renameFile(oldPath,newPath)\n #--Done\n fileInfo.madeBackup = False", "def ChangeName(self, newName):\n if newName != \"\":\n newPath = self.format + os.sep + \"playlists\" + os.sep + newName + \".txt\"\n os.replace(self.path, newPath)\n self.path = newPath", "def set_filename(self, file_name):", "def __call__(fun_name):", "def handle_file_name(self):\r\n self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]\r\n result_name = self.tmp_name + '_result_'\r\n log_name = self.tmp_name + '_log.csv'\r\n \r\n self.result_file_name = os.path.join(self.save_path , result_name) \r\n self.log_file_name = os.path.join(self.log_path , log_name)", "def setName(self, funcName):\r\n # type: (str) -> None\r\n idc.MakeName(self.func_ea, funcName)", "def userRenamed(self, oldname, newname):\n # Send messasge to Server bot.\n self.data_in(text=\"\", type=\"renamed\", oldname=oldname, newname=newname)", "def update_name(self, new_name):\r\n self.__name = new_name", "def update_name(self, new_name):\r\n self.__name = new_name", "def __call__(self):\n\t\treturn", "def setMethodFilename(self,filename):\n if filename[-4:] != \".imf\":\n filename = filename+\".imf\"\n for i in [1]:\n caputS(self.pvStem+\"PORT\"+str(i)+\":MethodFileName\",filename)", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def change_folname(self, name):\r\n self.oname = f\"{name}\\\\\"\r\n self.ver = os.path.join(self.p, self.oname)\r\n try:\r\n os.chdir(self.ver)\r\n self.confirmed = True\r\n except Exception as error:\r\n return error\r\n finally:\r\n return self.confirmed", "def host_renameOpsiDepotserver(self, oldId, newId):", "def _change_name(self, suff, info_extra):\n if 'cable-ring' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n f = i1 / i2\n suff = suff.replace('.png',\n f'-area-{i1:0.3f}-best-{i2:0.3f}-FRAC-{f:0.3f}.png')\n elif 'cloth-flat' in self.path:\n i1 = info_extra['cloth_coverage']\n suff = suff.replace('.png', f'-coverage-{i1:0.3f}.png')\n elif 'bag-alone' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n suff = suff.replace('.png', f'-area-{i1:0.3f}-best-{i2:0.3f}.png')\n else:\n pass\n return suff", "def upgrade(self):", "def upgrade(self):", "def name(self):\r\n pass", "def on_action_myname(self, value):\n raise NotImplementedError()", "def set_name(self, newname=\"\"):\n self.name = newname", "def post_execute(self):", "def onApply(self, event):\n\n # Rename all of the files based on the substitution.\n for (old, new) in zip(self.m_diskNames, self.m_newNames):\n if old != new:\n old = os.path.join(self.m_curPath, old)\n new = os.path.join(self.m_curPath, new)\n try:\n os.rename(old, new)\n except OSError:\n pass\n\n # Now we out the lists so that what the user sees after this\n # reflects what's on disk.\n self.m_diskNames[:] = []\n self.m_newNames[:] = []\n\n # Update.\n self.updateDiskFileList()", "def test_rename_python_api(self):\n\n rename.rename([NEW_APP_NAME, NEW_DOMAIN])\n self.assertTrue(os.path.exists(RENAMED_PROJECT_DIR))", "def get_name():", "def change_name(change_account):\n change_data(change_account, changed_data='name')", "def Notify(self):\r\n\r\n self._owner.OnRenameTimer()", "def setnames_call_funcs(log, subj, modal, tcorrsufx):\n infile = '{}_{}_{}_mean+orig'.format(modal, subj, tcorrsufx)\n outname = '{}_{}_{}_mean_Z'.format(modal, subj, tcorrsufx)\n fishertransform(log, infile, outname)\n convert_to_nifti(log, outname+'+orig.')", "def _get_rename(self):\n return self.__rename", "def name(self):\n pass", "def name(self):\n raise Exception(\"Must be reimplemented in subclass.\")", "def name(self):\n raise NotImplementedError", "def task(self, name):\n pass" ]
[ "0.6707409", "0.63080657", "0.6128592", "0.6127869", "0.60907537", "0.60907537", "0.60767657", "0.60607314", "0.5969004", "0.5958113", "0.5949128", "0.59331226", "0.592982", "0.5924983", "0.5913726", "0.5904696", "0.59042233", "0.5902338", "0.5860706", "0.5860248", "0.58415276", "0.5837389", "0.582821", "0.5825225", "0.5823624", "0.5815447", "0.5811894", "0.5811894", "0.57824343", "0.57744396", "0.5734564", "0.5734564", "0.5734564", "0.5734564", "0.5734162", "0.57301223", "0.57146", "0.57037467", "0.5695449", "0.5690413", "0.5673153", "0.5660451", "0.5643736", "0.5642723", "0.5616921", "0.5614471", "0.56138974", "0.5612163", "0.56047046", "0.55928767", "0.5585179", "0.5585179", "0.5576685", "0.55733484", "0.5568689", "0.5565197", "0.5555371", "0.55538", "0.55530554", "0.55495334", "0.5544884", "0.55308366", "0.5530315", "0.55298", "0.5526639", "0.55195254", "0.5516381", "0.5509643", "0.55039454", "0.55006135", "0.54978555", "0.54840535", "0.54840535", "0.54735035", "0.546043", "0.545966", "0.545966", "0.545966", "0.545966", "0.545966", "0.545966", "0.5458309", "0.5457402", "0.54537916", "0.545318", "0.545318", "0.5452512", "0.5449907", "0.54384476", "0.5435435", "0.5433364", "0.5422622", "0.5413981", "0.5410806", "0.5407937", "0.54041654", "0.5400843", "0.5399923", "0.53959435", "0.5395655", "0.53810084" ]
0.0
-1
Method executed by Fusion. Don't rename
def notify(self, args): app = adsk.core.Application.cast(adsk.core.Application.get()) ui = app.userInterface try: # Create and display the palette. palette = ui.palettes.itemById(self.cmd_object_.palette_id) if not palette: palette = ui.palettes.add( self.cmd_object_.palette_id, self.cmd_object_.palette_name, self.cmd_object_.palette_html_file_url, self.cmd_object_.palette_is_visible, self.cmd_object_.palette_show_close_button, self.cmd_object_.palette_is_resizable, self.cmd_object_.palette_width, self.cmd_object_.palette_height, True ) # Add handler to HTMLEvent of the palette. on_html_event_handler = _HTMLEventHandler(self.cmd_object_) palette.incomingFromHTML.add(on_html_event_handler) self.cmd_object_.handlers.append(on_html_event_handler) self.cmd_object_.html_handlers.append(on_html_event_handler) # Add handler to CloseEvent of the palette. on_closed_handler = _PaletteCloseHandler(self.cmd_object_) palette.closed.add(on_closed_handler) self.cmd_object_.handlers.append(on_closed_handler) else: main_url = urlparse(self.cmd_object_.palette_html_file_url) current_url = urlparse(palette.htmlFileURL) if not ( (not self.cmd_object_.palette_force_url_reload) & (main_url.netloc == current_url.netloc) & (main_url.path == current_url.path) ): # ui.messageBox(current_url.netloc + " vs. " + main_url.netloc) # ui.messageBox(current_url.path + " vs. " + main_url.path) # ui.messageBox(str(self.cmd_object_.palette_force_url_reload)) palette.htmlFileURL = self.cmd_object_.palette_html_file_url palette.isVisible = True self.cmd_object_.on_palette_execute(palette) except: ui.messageBox('Palette ({}) Execution Failed: {}'.format( self.cmd_object_.palette_html_file_url, traceback.format_exc()) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fix_up(self, cls, code_name):", "def my_rename(self, src, dst):\n self.renamerCalled = True", "def script(self):", "def process_file(file_name):\n pass # delete this line and replace with your code here", "def falcon():", "def fix_name(self):\n self._name_fixed = True", "def dummy_fn(self):\n\t\tpass", "def name():\n pass", "def name():\n pass", "def name(self):\n ...", "def use(self):", "def name(self, name):\n pass", "def friewallOn():\n pass", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def __call__(self):\n\t\treturn", "def __call__(fun_name):", "def name():\n\n pass", "def perform(self):\n pass", "def setFunctionName(self, function_name):\r\n self.actualFunction = function_name", "def autoname(self):\n raise NotImplementedError()", "def __init__(self):\n self.label = \"Change Field Name\"\n self.description = \"Uses Alter Field to change the field name. Can select one field at a time \" + \\\n \"or enter a string that will be deleted from all fields in the feature if that string is in the \" + \\\n \"field name.\"\n self.canRunInBackground = False", "def _transform_name(self) -> None:\n self.name = utils.maybe_rename_for_k8s(self.name)", "def _hook(self):", "def _postprocess(self):", "def OnRenameTimer(self):\r\n \r\n self.Edit(self._current)", "def on_cls_action_myname(value):\n raise NotImplementedError()", "def Deflect(self):\t\t\n\t\tprint(self.name.Title() + \"Deflect!\")", "def action(self):\n pass", "def action(self):\n pass", "def on_action_myname(self, value):\n raise NotImplementedError()", "def name(self):\r\n pass", "def __call__( self ):\n pass", "def startFile(self, newFileName):\n \n pass", "def act(self):\n pass", "def change():", "def method_name(self):\n pass", "def _do_action(self):\n pass", "def _do_action(self):\n pass", "def foo_method(self):\n return \"My name is foo_method.\"", "def OnRenameTimer(self):\r\n\r\n self.EditLabel(self._current, self._curColumn)", "def use(target, name):", "def hxlrename():\n run_script(hxlrename_main)", "def name(self):\n pass", "def do_refactor(self):\n return f\"{self} is refactoring code\"", "def sth():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def renameUI(*args, **kwargs)->AnyStr:\n pass", "def feature():\n pass", "def renameAction(self,**kwargs):\n try:\n old_action = kwargs[\"fname\"].split(' ')[1]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n rospy.loginfo(\"Enter the new name of the action:\")\n action = sys.stdin.readline().strip()\n\n self.bl.baxter_actions[str(action)] = self.bl.baxter_actions[str(old_action)]\n del self.bl.baxter_actions[str(old_action)]\n\n self.baxter.mm.changeMenuTitle(\"Action %s renamed to: %s\" % (old_action, str(action)))\n self.baxter.yes() \n self.mm.loadMenu(\"teachMenu\")", "def hook_name(self) -> str:", "def fire(self):", "def invoke(self, op):\n for rename in self:\n rename(op)", "def setMethodFilename(self,filename):\n if filename[-4:] != \".imf\":\n filename = filename+\".imf\"\n for i in [1]:\n caputS(self.pvStem+\"PORT\"+str(i)+\":MethodFileName\",filename)", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def name(self):\n raise Exception(\"Must be reimplemented in subclass.\")", "def name(self):\n raise NotImplementedError", "def fixupFileNames(process):\n if not hasattr(process.source, \"fileNames\"):\n process.source.fileNames = cms.untracked.vstring()\n return", "def __set_name__(self, cls, name):\n pass", "def Shuriken(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def task(self, name):\n pass", "def name(self) -> str: # pragma: no cover", "def __call__(self):\n pass", "def __call__(self):\n pass", "def no_overwrite_example():", "def run(self): \r\n return", "def plugin_name(self):", "def processing(self):\n pass", "def name(self, new_name: str) -> None:\n raise NotImplementedError()", "def handle_file_name(self):\r\n self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]\r\n result_name = self.tmp_name + '_result_'\r\n log_name = self.tmp_name + '_log.csv'\r\n \r\n self.result_file_name = os.path.join(self.save_path , result_name) \r\n self.log_file_name = os.path.join(self.log_path , log_name)", "def _adjust_gs_swift_bug(self, name):\n if name:\n return name.replace(\"/\", \"\")\n else:\n return name", "def use(self):\n pass", "def post_execute(self):", "def set_name(self,name):\r\n self._name = __name", "def get_name(self) -> str:\n return \"uncrustify\"", "def get_name(self):", "def get_name(self):", "def function(self):\n raise NotImplementedError", "def wantsNametag(self):\n return 0", "def get_name():", "def transact(self):", "def transact(self):", "def __def_function__():\n pass", "def rename(self, name):\n return _coconut_tail_call(self.__class__, name)", "def _action(self):\n pass", "def rename(self,newName):\n self.userName = newName", "def pre_execute(self):", "def hello(): #status: WIP\r\n pass", "def test_6a_change_file_name(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.rename_file_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare renaming test.\")\n self.dismiss_dialogs()\n function = js_func[\"rename\"] % (GST.gs_file_paths[\"file_to_rename_path\"], GST.gs_file_paths[\"after_rename_path\"])\n try:\n self.send_request(function, \"rename()\")\n except Exception as e:\n raise RenameException(\"Failed to rename the file: \" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise RenameException(\"Failed to rename the file: \" + response)", "def set_filename(self, file_name):" ]
[ "0.6129317", "0.6021787", "0.5983167", "0.59377337", "0.59124655", "0.5910845", "0.5819237", "0.57898045", "0.57898045", "0.5778516", "0.5760022", "0.5747388", "0.5742715", "0.56831336", "0.56831336", "0.56831336", "0.56831336", "0.56460947", "0.5617484", "0.5591955", "0.55917186", "0.55884457", "0.55796283", "0.5564985", "0.5563565", "0.5513896", "0.5504558", "0.54955345", "0.5489025", "0.54766643", "0.54745084", "0.54745084", "0.54649407", "0.5456285", "0.5444433", "0.5441439", "0.5441366", "0.54345787", "0.54319704", "0.54313695", "0.54313695", "0.54309577", "0.5430041", "0.54247135", "0.54171115", "0.5410531", "0.5409297", "0.54045236", "0.53998584", "0.53998584", "0.53998584", "0.53998584", "0.53998584", "0.53998584", "0.5399758", "0.53897697", "0.5389063", "0.538529", "0.5384183", "0.5379851", "0.536246", "0.53594345", "0.53594345", "0.53594345", "0.53594345", "0.53594345", "0.53533876", "0.5344365", "0.53440386", "0.5342188", "0.533726", "0.5331874", "0.53316975", "0.53306764", "0.53306764", "0.5329526", "0.53268373", "0.5317787", "0.5317278", "0.531559", "0.5312765", "0.53113085", "0.5310851", "0.5310381", "0.53040135", "0.5296868", "0.5289083", "0.5289083", "0.5288054", "0.52837944", "0.52808684", "0.5276773", "0.5276773", "0.52740395", "0.5273917", "0.52728766", "0.5272804", "0.52725726", "0.52658165", "0.52627206", "0.52610546" ]
0.0
-1
Method executed by Fusion. Don't rename
def notify(self, args): try: html_args = adsk.core.HTMLEventArgs.cast(args) self.cmd_object_.on_html_event(html_args) except: app = adsk.core.Application.cast(adsk.core.Application.get()) ui = app.userInterface ui.messageBox('Failed Handling HTML Event:\n{}'.format(traceback.format_exc()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fix_up(self, cls, code_name):", "def my_rename(self, src, dst):\n self.renamerCalled = True", "def script(self):", "def process_file(file_name):\n pass # delete this line and replace with your code here", "def falcon():", "def fix_name(self):\n self._name_fixed = True", "def dummy_fn(self):\n\t\tpass", "def name():\n pass", "def name():\n pass", "def name(self):\n ...", "def use(self):", "def name(self, name):\n pass", "def friewallOn():\n pass", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def __call__(self):\n\t\treturn", "def __call__(fun_name):", "def name():\n\n pass", "def perform(self):\n pass", "def setFunctionName(self, function_name):\r\n self.actualFunction = function_name", "def autoname(self):\n raise NotImplementedError()", "def __init__(self):\n self.label = \"Change Field Name\"\n self.description = \"Uses Alter Field to change the field name. Can select one field at a time \" + \\\n \"or enter a string that will be deleted from all fields in the feature if that string is in the \" + \\\n \"field name.\"\n self.canRunInBackground = False", "def _transform_name(self) -> None:\n self.name = utils.maybe_rename_for_k8s(self.name)", "def _hook(self):", "def _postprocess(self):", "def OnRenameTimer(self):\r\n \r\n self.Edit(self._current)", "def on_cls_action_myname(value):\n raise NotImplementedError()", "def Deflect(self):\t\t\n\t\tprint(self.name.Title() + \"Deflect!\")", "def action(self):\n pass", "def action(self):\n pass", "def on_action_myname(self, value):\n raise NotImplementedError()", "def name(self):\r\n pass", "def __call__( self ):\n pass", "def startFile(self, newFileName):\n \n pass", "def act(self):\n pass", "def change():", "def method_name(self):\n pass", "def _do_action(self):\n pass", "def _do_action(self):\n pass", "def foo_method(self):\n return \"My name is foo_method.\"", "def OnRenameTimer(self):\r\n\r\n self.EditLabel(self._current, self._curColumn)", "def use(target, name):", "def hxlrename():\n run_script(hxlrename_main)", "def name(self):\n pass", "def do_refactor(self):\n return f\"{self} is refactoring code\"", "def sth():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def renameUI(*args, **kwargs)->AnyStr:\n pass", "def feature():\n pass", "def renameAction(self,**kwargs):\n try:\n old_action = kwargs[\"fname\"].split(' ')[1]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n rospy.loginfo(\"Enter the new name of the action:\")\n action = sys.stdin.readline().strip()\n\n self.bl.baxter_actions[str(action)] = self.bl.baxter_actions[str(old_action)]\n del self.bl.baxter_actions[str(old_action)]\n\n self.baxter.mm.changeMenuTitle(\"Action %s renamed to: %s\" % (old_action, str(action)))\n self.baxter.yes() \n self.mm.loadMenu(\"teachMenu\")", "def hook_name(self) -> str:", "def fire(self):", "def invoke(self, op):\n for rename in self:\n rename(op)", "def setMethodFilename(self,filename):\n if filename[-4:] != \".imf\":\n filename = filename+\".imf\"\n for i in [1]:\n caputS(self.pvStem+\"PORT\"+str(i)+\":MethodFileName\",filename)", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def name(self):\n raise Exception(\"Must be reimplemented in subclass.\")", "def name(self):\n raise NotImplementedError", "def fixupFileNames(process):\n if not hasattr(process.source, \"fileNames\"):\n process.source.fileNames = cms.untracked.vstring()\n return", "def __set_name__(self, cls, name):\n pass", "def Shuriken(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def task(self, name):\n pass", "def name(self) -> str: # pragma: no cover", "def __call__(self):\n pass", "def __call__(self):\n pass", "def no_overwrite_example():", "def run(self): \r\n return", "def plugin_name(self):", "def processing(self):\n pass", "def name(self, new_name: str) -> None:\n raise NotImplementedError()", "def handle_file_name(self):\r\n self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]\r\n result_name = self.tmp_name + '_result_'\r\n log_name = self.tmp_name + '_log.csv'\r\n \r\n self.result_file_name = os.path.join(self.save_path , result_name) \r\n self.log_file_name = os.path.join(self.log_path , log_name)", "def _adjust_gs_swift_bug(self, name):\n if name:\n return name.replace(\"/\", \"\")\n else:\n return name", "def use(self):\n pass", "def post_execute(self):", "def set_name(self,name):\r\n self._name = __name", "def get_name(self) -> str:\n return \"uncrustify\"", "def get_name(self):", "def get_name(self):", "def function(self):\n raise NotImplementedError", "def wantsNametag(self):\n return 0", "def get_name():", "def transact(self):", "def transact(self):", "def __def_function__():\n pass", "def rename(self, name):\n return _coconut_tail_call(self.__class__, name)", "def _action(self):\n pass", "def rename(self,newName):\n self.userName = newName", "def pre_execute(self):", "def hello(): #status: WIP\r\n pass", "def test_6a_change_file_name(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.rename_file_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare renaming test.\")\n self.dismiss_dialogs()\n function = js_func[\"rename\"] % (GST.gs_file_paths[\"file_to_rename_path\"], GST.gs_file_paths[\"after_rename_path\"])\n try:\n self.send_request(function, \"rename()\")\n except Exception as e:\n raise RenameException(\"Failed to rename the file: \" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise RenameException(\"Failed to rename the file: \" + response)", "def set_filename(self, file_name):" ]
[ "0.6129317", "0.6021787", "0.5983167", "0.59377337", "0.59124655", "0.5910845", "0.5819237", "0.57898045", "0.57898045", "0.5778516", "0.5760022", "0.5747388", "0.5742715", "0.56831336", "0.56831336", "0.56831336", "0.56831336", "0.56460947", "0.5617484", "0.5591955", "0.55917186", "0.55884457", "0.55796283", "0.5564985", "0.5563565", "0.5513896", "0.5504558", "0.54955345", "0.5489025", "0.54766643", "0.54745084", "0.54745084", "0.54649407", "0.5456285", "0.5444433", "0.5441439", "0.5441366", "0.54345787", "0.54319704", "0.54313695", "0.54313695", "0.54309577", "0.5430041", "0.54247135", "0.54171115", "0.5410531", "0.5409297", "0.54045236", "0.53998584", "0.53998584", "0.53998584", "0.53998584", "0.53998584", "0.53998584", "0.5399758", "0.53897697", "0.5389063", "0.538529", "0.5384183", "0.5379851", "0.536246", "0.53594345", "0.53594345", "0.53594345", "0.53594345", "0.53594345", "0.53533876", "0.5344365", "0.53440386", "0.5342188", "0.533726", "0.5331874", "0.53316975", "0.53306764", "0.53306764", "0.5329526", "0.53268373", "0.5317787", "0.5317278", "0.531559", "0.5312765", "0.53113085", "0.5310851", "0.5310381", "0.53040135", "0.5296868", "0.5289083", "0.5289083", "0.5288054", "0.52837944", "0.52808684", "0.5276773", "0.5276773", "0.52740395", "0.5273917", "0.52728766", "0.5272804", "0.52725726", "0.52658165", "0.52627206", "0.52610546" ]
0.0
-1
Method executed by Fusion. Don't rename
def notify(self, args): try: self.cmd_object_.on_palette_close() except: app = adsk.core.Application.cast(adsk.core.Application.get()) ui = app.userInterface ui.messageBox('Failed During Palette Close:\n{}'.format(traceback.format_exc()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fix_up(self, cls, code_name):", "def my_rename(self, src, dst):\n self.renamerCalled = True", "def script(self):", "def process_file(file_name):\n pass # delete this line and replace with your code here", "def falcon():", "def fix_name(self):\n self._name_fixed = True", "def dummy_fn(self):\n\t\tpass", "def name():\n pass", "def name():\n pass", "def name(self):\n ...", "def use(self):", "def name(self, name):\n pass", "def friewallOn():\n pass", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def __call__(self):\n\t\treturn", "def __call__(fun_name):", "def name():\n\n pass", "def perform(self):\n pass", "def setFunctionName(self, function_name):\r\n self.actualFunction = function_name", "def autoname(self):\n raise NotImplementedError()", "def __init__(self):\n self.label = \"Change Field Name\"\n self.description = \"Uses Alter Field to change the field name. Can select one field at a time \" + \\\n \"or enter a string that will be deleted from all fields in the feature if that string is in the \" + \\\n \"field name.\"\n self.canRunInBackground = False", "def _transform_name(self) -> None:\n self.name = utils.maybe_rename_for_k8s(self.name)", "def _hook(self):", "def _postprocess(self):", "def OnRenameTimer(self):\r\n \r\n self.Edit(self._current)", "def on_cls_action_myname(value):\n raise NotImplementedError()", "def Deflect(self):\t\t\n\t\tprint(self.name.Title() + \"Deflect!\")", "def action(self):\n pass", "def action(self):\n pass", "def on_action_myname(self, value):\n raise NotImplementedError()", "def name(self):\r\n pass", "def __call__( self ):\n pass", "def startFile(self, newFileName):\n \n pass", "def act(self):\n pass", "def change():", "def method_name(self):\n pass", "def _do_action(self):\n pass", "def _do_action(self):\n pass", "def foo_method(self):\n return \"My name is foo_method.\"", "def OnRenameTimer(self):\r\n\r\n self.EditLabel(self._current, self._curColumn)", "def use(target, name):", "def hxlrename():\n run_script(hxlrename_main)", "def name(self):\n pass", "def do_refactor(self):\n return f\"{self} is refactoring code\"", "def sth():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def renameUI(*args, **kwargs)->AnyStr:\n pass", "def feature():\n pass", "def renameAction(self,**kwargs):\n try:\n old_action = kwargs[\"fname\"].split(' ')[1]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n rospy.loginfo(\"Enter the new name of the action:\")\n action = sys.stdin.readline().strip()\n\n self.bl.baxter_actions[str(action)] = self.bl.baxter_actions[str(old_action)]\n del self.bl.baxter_actions[str(old_action)]\n\n self.baxter.mm.changeMenuTitle(\"Action %s renamed to: %s\" % (old_action, str(action)))\n self.baxter.yes() \n self.mm.loadMenu(\"teachMenu\")", "def hook_name(self) -> str:", "def fire(self):", "def invoke(self, op):\n for rename in self:\n rename(op)", "def setMethodFilename(self,filename):\n if filename[-4:] != \".imf\":\n filename = filename+\".imf\"\n for i in [1]:\n caputS(self.pvStem+\"PORT\"+str(i)+\":MethodFileName\",filename)", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def name(self):\n raise Exception(\"Must be reimplemented in subclass.\")", "def name(self):\n raise NotImplementedError", "def fixupFileNames(process):\n if not hasattr(process.source, \"fileNames\"):\n process.source.fileNames = cms.untracked.vstring()\n return", "def __set_name__(self, cls, name):\n pass", "def Shuriken(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def task(self, name):\n pass", "def name(self) -> str: # pragma: no cover", "def __call__(self):\n pass", "def __call__(self):\n pass", "def no_overwrite_example():", "def run(self): \r\n return", "def plugin_name(self):", "def processing(self):\n pass", "def name(self, new_name: str) -> None:\n raise NotImplementedError()", "def handle_file_name(self):\r\n self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]\r\n result_name = self.tmp_name + '_result_'\r\n log_name = self.tmp_name + '_log.csv'\r\n \r\n self.result_file_name = os.path.join(self.save_path , result_name) \r\n self.log_file_name = os.path.join(self.log_path , log_name)", "def _adjust_gs_swift_bug(self, name):\n if name:\n return name.replace(\"/\", \"\")\n else:\n return name", "def use(self):\n pass", "def post_execute(self):", "def set_name(self,name):\r\n self._name = __name", "def get_name(self) -> str:\n return \"uncrustify\"", "def get_name(self):", "def get_name(self):", "def function(self):\n raise NotImplementedError", "def wantsNametag(self):\n return 0", "def get_name():", "def transact(self):", "def transact(self):", "def __def_function__():\n pass", "def rename(self, name):\n return _coconut_tail_call(self.__class__, name)", "def _action(self):\n pass", "def rename(self,newName):\n self.userName = newName", "def pre_execute(self):", "def hello(): #status: WIP\r\n pass", "def test_6a_change_file_name(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.rename_file_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare renaming test.\")\n self.dismiss_dialogs()\n function = js_func[\"rename\"] % (GST.gs_file_paths[\"file_to_rename_path\"], GST.gs_file_paths[\"after_rename_path\"])\n try:\n self.send_request(function, \"rename()\")\n except Exception as e:\n raise RenameException(\"Failed to rename the file: \" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise RenameException(\"Failed to rename the file: \" + response)", "def set_filename(self, file_name):" ]
[ "0.6129317", "0.6021787", "0.5983167", "0.59377337", "0.59124655", "0.5910845", "0.5819237", "0.57898045", "0.57898045", "0.5778516", "0.5760022", "0.5747388", "0.5742715", "0.56831336", "0.56831336", "0.56831336", "0.56831336", "0.56460947", "0.5617484", "0.5591955", "0.55917186", "0.55884457", "0.55796283", "0.5564985", "0.5563565", "0.5513896", "0.5504558", "0.54955345", "0.5489025", "0.54766643", "0.54745084", "0.54745084", "0.54649407", "0.5456285", "0.5444433", "0.5441439", "0.5441366", "0.54345787", "0.54319704", "0.54313695", "0.54313695", "0.54309577", "0.5430041", "0.54247135", "0.54171115", "0.5410531", "0.5409297", "0.54045236", "0.53998584", "0.53998584", "0.53998584", "0.53998584", "0.53998584", "0.53998584", "0.5399758", "0.53897697", "0.5389063", "0.538529", "0.5384183", "0.5379851", "0.536246", "0.53594345", "0.53594345", "0.53594345", "0.53594345", "0.53594345", "0.53533876", "0.5344365", "0.53440386", "0.5342188", "0.533726", "0.5331874", "0.53316975", "0.53306764", "0.53306764", "0.5329526", "0.53268373", "0.5317787", "0.5317278", "0.531559", "0.5312765", "0.53113085", "0.5310851", "0.5310381", "0.53040135", "0.5296868", "0.5289083", "0.5289083", "0.5288054", "0.52837944", "0.52808684", "0.5276773", "0.5276773", "0.52740395", "0.5273917", "0.52728766", "0.5272804", "0.52725726", "0.52658165", "0.52627206", "0.52610546" ]
0.0
-1
Builds the selection spec.
def build_selection_spec(client_factory, name): sel_spec = client_factory.create('ns0:SelectionSpec') sel_spec.name = name return sel_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , self.name() )\n #\n ## check existing selection\n #\n sel = self._selection ( sel_tag )\n if sel : return sel \n\n #\n ## adjust a bit the arguments\n if not kwargs.has_key('Preambulo') :\n kwargs ['Preambulo' ] = self['Preambulo']\n\n if not kwargs.has_key( 'ParticleCombiners' ) :\n kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' } \n \n # \n ## use \"simple-selection\"\n #\n from PhysSelPython.Wrappers import SimpleSelection\n sel = SimpleSelection (\n sel_name ,\n algotype ,\n inputs , \n *args ,\n **kwargs )\n # \n return self._add_selection( sel_tag , sel )", "def makeSelection(self, selection=\"\"):\n\n\t\tif selection == \"\":\n\t\t\tprint \"usage: makeSelection(selection)\"\n\n\t\tsel_string = self.parseMacros(selection)\n\n\t\t# --- split by \";\" --- #\n\t\ttmp = []\n\t\tcols = []\n\t\tcols = sel_string.split(\";\")\n\t\tfor col in cols:\n\t\t\tinverse = False\n\t\t\tif col == \"\":\n\t\t\t\tcontinue\n\n\t\t\ttmp = string.split(col, \"=\")\n\t\t\tif \"!\" in tmp[0]:\n\t\t\t\tinverse = True\n\n\t\t\tif \"resi\" in tmp[0]:\n\t\t\t\tself.parseResI(tmp[1])\n\t\t\t\tself.invresi = inverse\n\t\t\telif \"resn\" in tmp[0]:\n\t\t\t\tself.parseResN(tmp[1])\n\t\t\t\tself.invresn = inverse\n\t\t\telif \"name\" in tmp[0]:\n\t\t\t\tself.parseAtom(tmp[1])\n\t\t\t\tself.invatom = inverse\n\t\t\telif \"element\" in tmp[0]:\n\t\t\t\tself.parseElement(tmp[1])\n\t\t\t\tself.invelement = inverse\t\n\t\t\telif \"chain\" in tmp[0]:\n\t\t\t\tself.parseChain(tmp[1])\n\t\t\t\tself.invchain = inverse\n\t\t\telif \"type\" in tmp[0]:\n\t\t\t\tself.parseType(tmp[1])\n\t\t\t\tself.invtype = inverse\n\t\t\telif \"cat\" in tmp[0]:\n\t\t\t\tself.parseCat(tmp[1])\n\t\t\t\tself.invcat = inverse\n\t\t\telif \"atomid\" in tmp[0]:\n\t\t\t\tself.parseAtomid(tmp[1])\n\t\t\t\tself.invatomid = inverse\n\t\t\telif \"BB\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O \")\n\t\t\t\tself.invatom = False\n\t\t\telif \"CEN\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O , CB \")\n\t\t\t\tself.invatom = False\n\t\t\telif \"SC\" in tmp[0]:\n\t\t\t\tself.parseAtom(\" N , CA , C , O \")\n\t\t\t\tself.invatom = True\n\t\t\telif \"HET\" in tmp[0]:\n\t\t\t\tself.parseType(\"HETATM\")\n\t\t\t\tself.invtype = inverse\n\t\t\telse:\n\t\t\t\tprint \"unrecognized selector: \",tmp[0]\n\t\t\t\tsys.exit()", "def build_sel_opt(self, this_param, sel_blob):\n for sel in sel_blob:\n this_sel = etree.SubElement(this_param, 'option', selected=sel['selected'], value=sel['value'])\n this_sel.text = sel['value']", "def create_select(qualifier, lines, select_id=None):\n options = {} #{ option : [Label]}\n for label in lines.keys():\n option = qualifier(label)\n if (option not in options):\n options[option] = []\n options[option].append(label)\n option_list = list(options.keys())\n option_list.sort()\n print '<select class=\"lines\"',\n if select_id is not None:\n print 'id=%s' % qa(select_id)\n print 'multiple=\"true\" size=\"10\" onchange=\"updateSvg();\">'\n for option in option_list:\n print '<option value=' + qa('[' + \n reduce(lambda x,y:x+json.dumps(str(y))+',',options[option],\"\")[0:-1]\n + ']') + '>'+qe(option)+'</option>'\n print '</select>'", "def __make_sel(selection):\n sel = []\n param = []\n for key, value in selection.iteritems(): \n if key == \"fn\":\n if value.find('%') >= 0:\n sel.append(\"irods_filepath like %s\")\n else:\n sel.append(\"irods_filepath = %s\")\n elif key == \"expid\":\n sel.append(\"exper_id = %s\".format(value))\n elif key == 'runnum':\n sel.append(\"runnum = %s\".format(value))\n elif key == 'status' and value:\n sel.append(\"status = %s\")\n else:\n continue\n param.append(value)\n\n q = \"WHERE {}\".format(\" AND \".join(sel)) if sel else \"\"\n return q, param", "def layout_selection(self):\n select_txt = wx.StaticText(self, -1, 'Selection Options')\n select_txt.SetForegroundColour('blue')\n self.selection_cbox = wx.ComboBox(self, -1, style=wx.CB_READONLY)\n list_of_options = ['Select all Data',\n 'Unselect all Data',\n 'Select all Data 1D',\n 'Unselect all Data 1D',\n 'Select all Data 2D',\n 'Unselect all Data 2D']\n for option in list_of_options:\n self.selection_cbox.Append(str(option))\n self.selection_cbox.SetValue('Select all Data')\n wx.EVT_COMBOBOX(self.selection_cbox, -1, self._on_selection_type)\n self.sizer5.AddMany([(select_txt, 0, wx.ALL, 5),\n (self.selection_cbox, 0, wx.ALL, 5)])\n self.enable_selection()", "def _build_conditional(self):\n # HERE\n self.output_sect = etree.SubElement(self.inputs, 'conditional', name='output_opt')\n self.output_sect_sel = etree.SubElement(self.output_sect, 'param', name='output_opt_sel', type='select', label='Additional output parameters?')\n self.opt_yes = etree.SubElement(self.output_sect_sel, 'option', value='yes')\n self.opt_yes.text = 'yes'\n self.opt_no = etree.SubElement(self.output_sect_sel, 'option', value='no', selected='true')\n self.opt_no.text = 'no'\n self.when_yes = etree.SubElement(self.output_sect, 'when', value='yes')", "def build_inputs_out_sel(self, params, parent):\n for param in params:\n new_name = param['name'] + '_sel'\n new_label = param['name'].replace('_', ' ').title()\n new_arg = '--' + new_name\n this_param = etree.SubElement(parent, 'param', name=new_name, argument=new_arg, type='boolean',\n truevalue=new_arg, falsevalue='', optional='true', checked='false',\n label=new_label, help=param['help'])", "def build(buffer,load_default=None):\n assert buffer is not None\n assert isinstance(buffer.lines,list)\n\n # compute 'load_default' flag\n # (maybe this block can be removed, as it seems we always set load_default to non-None value (to be confirmed))\n if load_default is None:\n if buffer.filename==sdconst.SELECTION_FROM_CMDLINE:\n load_default=False # don't load default files if selection is from command args\n else:\n load_default=True # load default files if selection is from stdin or file\n\n # create buffer selection\n selection=Selection()\n\n # store outer attributes ('outer' means attributes not stored in selection file)\n selection.filename=buffer.filename\n selection.path=buffer.path\n\n # store inner attributes ('inner' means attributes stored in selection file)\n parse_buffer(buffer.lines,selection)\n\n # merge some outer attributes with inner attributes (else they are not returned by merge_facets() method)\n process_parameter(\"selection_filename=%s\"%selection.filename,selection)\n process_parameter(\"selection_file=%s\"%selection.path,selection)\n\n # load default (file containing default parameters for all projects)\n default_selection=load_default_file(sdconfig.default_selection_file,load_default)\n\n\n # NOTE\n #\n # For project level default file to be loaded, the following must be true\n #\n # \"project must be specified in cli parameter or in the selection file or \n # in the global default file\" or inside an identifier (e.g. dataset_functional_id)\n\n\n # retrieve projects\n projects=get_projects(selection)\n\n # retrieve default projects\n default_projects=get_default_projects(default_selection)\n\n # \n if len(projects)==0:\n # project not present in CLI nor in 'selection file'\n\n # let's try if project is set in default file\n if len(default_projects)==0:\n project=None # no project set\n elif len(default_projects)==1:\n project=default_projects[0]\n elif len(default_projects)>1:\n print_too_many_projects_set_warning(default_projects)\n project=None # too many project: do not load project level default value\n\n elif len(projects)>0:\n\n # When project(s) are present in CLI or 'selection file',\n # project(s) from default file are ignored.\n # (see #34 for more info)\n if len(default_projects)>0:\n del default_selection.facets['project']\n \n\n if len(projects)==1:\n project=projects[0]\n elif len(projects)>1:\n print_too_many_projects_set_warning(projects)\n project=None # too many project: do not load project level default value\n\n\n # load project default (file containing default parameters for the project)\n project_default_selection=load_default_file(sdconfig.get_project_default_selection_file(project),load_default)\n\n project_default_selection.childs.append(selection) # add selection as child of project_default_selection\n selection.parent=project_default_selection # set project_default_selection as parent of selection\n\n default_selection.childs.append(project_default_selection) # add project_default_selection as child of default_selection\n project_default_selection.parent=default_selection # set default_selection as parent of project_default_selection\n\n return selection", "def __init__(self):\n \n # Call the super contructor\n super(Select, self).__init__(0, 0, 200, 50)\n\n # Assign personalisation attributes\n self.placeholder = \"Choose a value\"\n self.font_name: str = None\n self.font_size: int = 12\n\n self.bg_color: tuple = (0, 0, 0, 0)\n self.bg_hover: tuple = (255, 255, 255, 30)\n self.bg_press: tuple = None\n\n self.label_color: tuple = (255, 255, 255, 255)\n self.label_hover: tuple = None\n self.label_press: tuple = None\n\n self.border_color: tuple = (255, 255, 255, 255)\n self.border_hover: tuple = None\n self.border_press: tuple = None\n self.border_width: int = 4\n\n self.option_height: int = 45\n self.option_margin: int = 5\n self.option_font_name = None\n self.option_font_size = 12\n\n self.option_bg_color: tuple = (0, 0, 0, 0)\n self.option_bg_hover: tuple = (255, 255, 255, 30)\n self.option_bg_press: tuple = None\n self.option_bg_select: tuple = (255, 255, 255, 60)\n\n self.option_label_color: tuple = (255, 255, 255, 255)\n self.option_label_hover: tuple = None\n self.option_label_press: tuple = None\n self.option_label_select: tuple = None\n\n # Assign internal attributes\n self._is_hovered: bool = False\n self._is_pressed: bool = False\n self._is_opened: bool = False\n self._is_inverted: bool = False\n self._options: list = list()\n self._current_select: int = -1\n self._current_hover: int = -1\n\n self._bg = pyglet.shapes.Rectangle\n self._label = pyglet.text.Label\n self._border = Border\n\n self._option_border: Border = None\n self._options_bg: list = list()\n self._options_label: list = list()", "def build_subsets(self):\n self.all = nrn.SectionList()\n self.all.wholetree(sec=self.soma)", "def get_selection(self, selection_name, format=None):", "def sel_prep(self):\n sel_blob = []\n for sel in self.blob['options']:\n if self.blob['defaultValue'] == sel['name']:\n sel_blob.append({'value': sel['name'], 'selected': 'true'})\n else:\n sel_blob.append({'value': sel['name'], 'selected': 'false'})\n\n return sel_blob", "def build(self) -> None:", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def _get_selection_params(self):\n # lazy imports to avoid hard dependency\n from tsfresh.defaults import (\n FDR_LEVEL,\n HYPOTHESES_INDEPENDENT,\n TEST_FOR_BINARY_TARGET_BINARY_FEATURE,\n TEST_FOR_BINARY_TARGET_REAL_FEATURE,\n TEST_FOR_REAL_TARGET_BINARY_FEATURE,\n TEST_FOR_REAL_TARGET_REAL_FEATURE,\n )\n\n # Set defaults\n selection_params = {\n \"test_for_binary_target_binary_feature\": TEST_FOR_BINARY_TARGET_BINARY_FEATURE, # noqa: E501\n \"test_for_binary_target_real_feature\": TEST_FOR_BINARY_TARGET_REAL_FEATURE,\n \"test_for_real_target_binary_feature\": TEST_FOR_REAL_TARGET_BINARY_FEATURE,\n \"test_for_real_target_real_feature\": TEST_FOR_REAL_TARGET_REAL_FEATURE,\n \"fdr_level\": FDR_LEVEL,\n \"hypotheses_independent\": HYPOTHESES_INDEPENDENT,\n }\n\n # Replace defaults with user defined parameters\n for name in selection_params.keys():\n value = getattr(self, name)\n if value is not None:\n selection_params[name] = value\n\n return selection_params", "def _build(self, **kwargs):", "def _build(self):", "def _build(self):", "def add_selector(self, listing):\n # We will be able to select X-frames and its boundaries\n # will be stored in the given list\n\n def onselect(xmin, xmax):\n# indmin, indmax = np.searchsorted(x, (xmin, xmax))\n# indmax = min(len(x)-1, indmax)\n indmin = xmin\n indmax = xmax\n onselect.listing.append([indmin, indmax])\n print (onselect.listing)\n \n onselect.listing = listing\n \n # set useblit True on gtkagg for enhanced performance\n ax = self.axes\n span = SpanSelector(ax, onselect, 'horizontal', useblit=True,\n rectprops=dict(alpha=0.5, facecolor='red') )\n \n self.widget_list.append(span)", "def build_options(self, identifier: Optional[str]) -> BuildOptions:\n\n with self.reader.identifier(identifier):\n before_all = self.reader.get(\"before-all\", sep=\" && \")\n\n build_frontend_str = self.reader.get(\"build-frontend\", env_plat=False)\n environment_config = self.reader.get(\n \"environment\", table={\"item\": '{k}=\"{v}\"', \"sep\": \" \"}\n )\n environment_pass = self.reader.get(\"environment-pass\", sep=\" \").split()\n before_build = self.reader.get(\"before-build\", sep=\" && \")\n repair_command = self.reader.get(\"repair-wheel-command\", sep=\" && \")\n\n dependency_versions = self.reader.get(\"dependency-versions\")\n test_command = self.reader.get(\"test-command\", sep=\" && \")\n before_test = self.reader.get(\"before-test\", sep=\" && \")\n test_requires = self.reader.get(\"test-requires\", sep=\" \").split()\n test_extras = self.reader.get(\"test-extras\", sep=\",\")\n build_verbosity_str = self.reader.get(\"build-verbosity\")\n\n build_frontend: BuildFrontend\n if build_frontend_str == \"build\":\n build_frontend = \"build\"\n elif build_frontend_str == \"pip\":\n build_frontend = \"pip\"\n else:\n msg = f\"cibuildwheel: Unrecognised build frontend '{build_frontend_str}', only 'pip' and 'build' are supported\"\n print(msg, file=sys.stderr)\n sys.exit(2)\n\n try:\n environment = parse_environment(environment_config)\n except (EnvironmentParseError, ValueError):\n print(\n f'cibuildwheel: Malformed environment option \"{environment_config}\"',\n file=sys.stderr,\n )\n traceback.print_exc(None, sys.stderr)\n sys.exit(2)\n\n # Pass through environment variables\n if self.platform == \"linux\":\n for env_var_name in environment_pass:\n try:\n environment.add(env_var_name, os.environ[env_var_name])\n except KeyError:\n pass\n\n if dependency_versions == \"pinned\":\n dependency_constraints: Optional[\n DependencyConstraints\n ] = DependencyConstraints.with_defaults()\n elif dependency_versions == \"latest\":\n dependency_constraints = None\n else:\n dependency_versions_path = Path(dependency_versions)\n dependency_constraints = DependencyConstraints(dependency_versions_path)\n\n if test_extras:\n test_extras = f\"[{test_extras}]\"\n\n try:\n build_verbosity = min(3, max(-3, int(build_verbosity_str)))\n except ValueError:\n build_verbosity = 0\n\n manylinux_images: Dict[str, str] = {}\n musllinux_images: Dict[str, str] = {}\n if self.platform == \"linux\":\n all_pinned_docker_images = _get_pinned_docker_images()\n\n for build_platform in MANYLINUX_ARCHS:\n pinned_images = all_pinned_docker_images[build_platform]\n\n config_value = self.reader.get(\n f\"manylinux-{build_platform}-image\", ignore_empty=True\n )\n\n if not config_value:\n # default to manylinux2014\n image = pinned_images.get(\"manylinux2014\")\n elif config_value in pinned_images:\n image = pinned_images[config_value]\n else:\n image = config_value\n\n assert image is not None\n manylinux_images[build_platform] = image\n\n for build_platform in MUSLLINUX_ARCHS:\n pinned_images = all_pinned_docker_images[build_platform]\n\n config_value = self.reader.get(f\"musllinux-{build_platform}-image\")\n\n if config_value is None:\n image = pinned_images[\"musllinux_1_1\"]\n elif config_value in pinned_images:\n image = pinned_images[config_value]\n else:\n image = config_value\n\n musllinux_images[build_platform] = image\n\n return BuildOptions(\n globals=self.globals,\n test_command=test_command,\n test_requires=test_requires,\n test_extras=test_extras,\n before_test=before_test,\n before_build=before_build,\n before_all=before_all,\n build_verbosity=build_verbosity,\n repair_command=repair_command,\n environment=environment,\n dependency_constraints=dependency_constraints,\n manylinux_images=manylinux_images or None,\n musllinux_images=musllinux_images or None,\n build_frontend=build_frontend,\n )", "def build(self):", "def build(self):", "def build(self):", "def build (self):\n raise NotImplementedError", "def _create_features_dropdown(self, name=_features_dropdown):\n fts = sorted(self.features)\n d = Select(options=fts, css_classes=[self._features_dropdown], name=name)\n return d", "def build(self):\n raise NotImplementedError", "def build():", "def build(self):\n with self.set_master(sticky=\"nsew\", row_weights=[1], column_weights=[0, 1], auto_columns=0):\n self.build_category_canvas()\n with self.set_master(sticky=\"nsew\", row_weights=[0, 1, 0], column_weights=[1, 1]):\n self.build_previous_range_button(row=0, column=0)\n self.build_hidden_fields_checkbutton(row=0, column=1)\n with self.set_master(sticky=\"nsew\", row=1, column=0, row_weights=[1], column_weights=[1]):\n self.build_entry_frame()\n with self.set_master(sticky=\"nsew\", row=1, column=1, row_weights=[1], column_weights=[1]):\n self.build_field_frame()\n self.build_next_range_button(row=2, column=0)", "def build(self, spec, prefix):\n make()", "def build_traversal_spec(client_factory, name, spec_type, path, skip,\r\n select_set):\r\n traversal_spec = client_factory.create('ns0:TraversalSpec')\r\n traversal_spec.name = name\r\n traversal_spec.type = spec_type\r\n traversal_spec.path = path\r\n traversal_spec.skip = skip\r\n traversal_spec.selectSet = select_set\r\n return traversal_spec", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()", "def create_selections_column(self):\n\n self.selections = dict(\n city=tk.StringVar(None, \"chicago\"),\n day=tk.StringVar(None, \"All\"),\n month=tk.StringVar(None, \"All\"),\n )\n\n self.city_frame = tk.LabelFrame(self, text=\"City\", padx=5, pady=5)\n self.city_frame.grid(row=0, column=0, padx=5, pady=5, sticky=\"we\")\n radio_buttons_from_list(\n self.city_frame,\n CITY_DATA.keys(),\n self.selections[\"city\"],\n self.prompt_load_data,\n )\n\n self.day_frame = tk.LabelFrame(self, text=\"Week Day\", padx=5, pady=5)\n self.day_frame.grid(row=1, column=0, padx=5, pady=5, sticky=\"we\")\n radio_buttons_from_list(\n self.day_frame, DAY_OPTIONS, self.selections[\"day\"], self.prompt_load_data\n )\n\n self.month_frame = tk.LabelFrame(self, text=\"Month\", padx=5, pady=5)\n self.month_frame.grid(row=2, column=0, padx=5, pady=5, sticky=\"we\")\n radio_buttons_from_list(\n self.month_frame,\n MONTH_OPTIONS,\n self.selections[\"month\"],\n self.prompt_load_data,\n )", "def get_select_directive_dict(self, request):\n return {\n 'preview_container_css_selector': '.cradmin-legacy-multiselect2-itemvalue',\n 'preview_css_selector': '.cradmin-legacy-multiselect2-selected-item',\n 'item_wrapper_css_selector': 'li',\n 'target_dom_id': self.get_target_dom_id(),\n 'custom_data': self.get_custom_data(),\n 'is_selected': self.kwargs.get('is_selected', False),\n }", "def build_subsets(self):\n\t\tself.all = h.SectionList()\n\t\tself.all.wholetree(sec=self.soma)\n\n\t\t# morphological section lists\n\t\tself.axon_list = []\n\t\tself.axosomatic_list = []\n\t\tself.apicalshaftoblique_list = []\n\t\tself.apicaltree_list = []\n\t\tself.tuft_list = []\n\t\tself.soma_list = []\n\t\tself.basal_list = []\n\n\t\tself.axon_list.append(hillock)\n\t\tself.axon_list.append(iseg)\n\t\tself.axon_list.append(axon)\n\n\t\tself.axosomatic_list.append(soma)\n\t\tself.axosomatic_list.append(basal)\n\t\tself.axosomatic_list.append(hillock)\n\t\tself.axosomatic_list.append(iseg)\n\t\tself.axosomatic_list.append(axon)\n\n\t\tself.apicalshaftoblique_list.append(apical)\n\n\t\tself.apicaltree_list.append(apical)\n\t\tself.apicaltree_list.append(tuft)\n\n\t\tself.tuft_list.append(tuft)\n\n\t\tself.soma_list.append(soma)\n\n\t\tself.basal_list.append(basal)\n\n\t# Create lists of cell parts that contain each ion channel type\n\t\tself.nat_list = []\n\t\tself.kslow_list = []\n\t\tself.kfast_list = []\n\t\tself.ih_list = []\n\n\t\tself.ih_list.append(basal)\n\t\tself.ih_list.append(apical)\n\t\tself.ih_list.append(tuft)\n\n\t\tself.excsyn_list.append(basal)\n\t\tself.excsyn_list.append(apical)\n\t\tself.excsyn_list.append(tuft)\n\n\t\tself.inhdendsyn_list.append(basal)\n\t\tself.inhdendsyn_list.append(apical)\n\n\t\tself.inhsomasyn_list.append(soma)\n\n\t\tself.nat_list.append(soma)\n\t\tself.nat_list.append(hillock)\n\t\tself.nat_list.append(iseg)\n\t\tself.nat_list.append(apical)\n\t\tself.nat_list.append(tuft)\n\n\t\tself.kfast_list.append(soma)\n\t\tself.kfast_list.append(apical)\n\t\tself.kfast_list.append(tuft)\n\n\t\tself.kslow_list.append(soma)\n\t\tself.kslow_list.append(apical)\n\t\tself.kslow_list.append(tuft)", "def build(self):\n pass", "def build(self):\n pass", "def build(_):", "def _assemble(self):\n selectop = self._headopt and f'{self._headopt}' or ''\n select = f'{selectop} ' + ', '.join(self._head)\n froms = 'from ' + ', '.join(self._tables)\n joins = ' '.join(self._joins)\n wheres, wkw = self._build_where()\n\n order = ''\n if self._order:\n order = f'order by {self._order[0]} {self._order[1]}'\n limit = ''\n if self._limit:\n limit = f'limit {self._limit}'\n\n kw = self._kw.copy()\n kw.update(wkw)\n return (f'select {select} '\n f'{froms} '\n f'{joins} '\n f'{wheres} '\n f'{order} '\n f'{limit}'\n ), kw", "def build_document(self):\n pass", "def get_specification(self) -> Dict:\n specification = {\n 'version': VERSION,\n 'metadata': {\n 'twoColumn': True,\n 'layout': self.autolayout\n },\n 'nodes': []\n }\n\n def strip_io(io_list: list, direction) -> list:\n \"\"\"\n Strips every input/output from metadata and leaves only\n `name` and `type` keys.\n \"\"\"\n return [\n {\n 'name': io['name'],\n 'type': io['type'],\n 'direction': direction\n }\n for io in io_list\n ]\n\n toremove = set()\n for key, node in self.nodes.items():\n try:\n node_cls = load_class(node.cls_name)\n except (ModuleNotFoundError, ImportError, Exception) as err:\n msg = f'Could not add {node_cls}. Reason:'\n _LOGGER.warn('-' * len(msg))\n _LOGGER.warn(msg)\n _LOGGER.warn(err)\n _LOGGER.warn('-' * len(msg))\n toremove.add(key)\n continue\n parameterschema = node_cls.form_parameterschema()\n\n properties = []\n for name, props in parameterschema['properties'].items():\n new_property = {'name': name}\n\n if 'default' in props:\n new_property['default'] = props['default']\n\n if 'description' in props:\n new_property['description'] = props['description']\n\n def add_default(default_val):\n if new_property.get('default') is None:\n new_property['default'] = default_val\n\n # Case for an input with range defined\n if 'enum' in props:\n new_property['type'] = 'select'\n new_property['values'] = list(map(str, props['enum']))\n add_default(new_property['values'][0])\n # Case for a single value input\n elif 'type' in props:\n if 'array' in props['type']:\n new_property['type'] = 'list'\n if 'items' in props and 'type' in props['items']:\n dtype = props['items']['type']\n new_property['dtype'] = dtype\n add_default([])\n elif 'boolean' in props['type']:\n new_property['type'] = 'checkbox'\n add_default(False)\n elif 'string' in props['type']:\n new_property['type'] = 'text'\n add_default('')\n elif 'integer' in props['type']:\n new_property['type'] = 'integer'\n add_default(0)\n elif 'number' in props['type']:\n new_property['type'] = 'number'\n add_default(0)\n elif 'object' in props['type']:\n # Object arguments should be defined in specification\n # as node inputs, rather than properties\n new_property = None\n else:\n new_property['type'] = 'text'\n add_default('')\n # If no type is specified then text is used\n else:\n new_property['type'] = 'text'\n add_default('')\n\n if new_property is not None:\n properties.append(new_property)\n\n specification['nodes'].append({\n 'name': node.name,\n 'type': node.type,\n 'category': node.category,\n 'properties': properties,\n 'interfaces': strip_io(\n self.io_mapping[node.type]['inputs'],\n 'input'\n ) + strip_io(\n self.io_mapping[node.type]['outputs'],\n 'output'\n )\n })\n\n for key in toremove:\n del self.nodes[key]\n return specification", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def generate_filtergroup_selector(table, evaluation):\n table = table.copy()\n gtindex = [c for c in table.columns if c[-1] == \"groupTags\"][0]\n table[\"Filtergroups\"] = list(map(\"|{}|\".format, table[gtindex]))\n\n out = '<div id=\"ipet-long-table-filter col\"><label class=\"col-form-label text-left\">Select filtergroups:<select id=\"ipet-long-filter-select\" class=\"custom-select\">' # noqa\n\n for fg in evaluation.getActiveFilterGroups():\n fg_name = fg.getName()\n fg_data = evaluation.getInstanceGroupData(fg)\n\n # don't show empty filtergroups\n if len(fg_data) == 0:\n continue\n\n # construct new option string\n newoption = '<option value=\"' + fg_name + '\">' + fg_name + '</option>' # noqa\n\n # update selector strin\n out = out + newoption\n\n maxfgstr = \",\".join([\"|{}|\".format(fg.getName()) for fg in evaluation.getActiveFilterGroups()])\n maxlen = len(maxfgstr)\n\n pd.set_option('max_colwidth', max(pd.get_option('display.max_colwidth'), maxlen))\n out = out + '</select></label></div>'\n return out, table[\"Filtergroups\"]", "def selection(self):\r\n if not self._selection:\r\n print(\"not working\")\r\n return None\r\n\r\n year, month = self._date.year, self._date.month\r\n if len(str(month))==1:\r\n month = \"0{}\".format(month)\r\n return (\"{}{}{}\".format(year, month, self._selection[0]), \r\n \"{} / {} / {}\".format(year, month, self._selection[0]))", "def create_gui(self):\n\n selectors_widgets = list()\n\n for n in range(4):\n\n selectors_widgets.append(wd.Dropdown(\n options={'': 0, 'Orange': 1, 'Red': 2, 'Blue': 3, 'Green': 4},\n value=0,\n disabled=False,\n layout={'width': '148px'}\n ))\n\n self.confirm_button.on_click(\n self.create_combination_and_rate_function())\n self.user_interact.children = [self.selectors, self.confirm_button]\n\n self.selectors.children = selectors_widgets", "def _create_selector_widget(self,frame,name,widget_options):\n #param = self.get_parameter_object(name)\n #self._update_translator(name,param)\n\n ## sort the range for display\n # CEBALERT: extend OptionMenu so that it\n # (a) supports changing its option list (subject of a previous ALERT)\n # (b) supports sorting of its option list\n # (c) supports selecting a new default\n new_range,widget_options = self._X(name,widget_options)\n tkvar = self._tkvars[name]\n\n # Combobox looks bad with standard theme on my ubuntu\n # (and 'changed' marker - blue text - not visible).\n w = Combobox(frame,textvariable=tkvar,\n values=new_range,state='readonly',\n **widget_options)\n\n # Combobox (along with Checkbutton?) somehow sets its\n # associated textvariable without calling that textvariable's\n # set() method. Therefore, to update the Selector's help text\n # when an item is selected, we bind to the\n # <<ComboboxSelected>> event.\n def _combobox_updated(event,name=name):\n w = self.representations[name]['widget']\n help_text = getdoc(\n self._string2object(\n name,\n self._tkvars[name]._original_get()))\n\n self.balloon.bind(w,help_text)\n\n w.bind(\"<<ComboboxSelected>>\",_combobox_updated)\n\n help_text = getdoc(self._string2object(name,tkvar._original_get()))\n self.balloon.bind(w,help_text)\n return w", "def build_opt(self):\n student_taken = self.student.student_hist[['FULL','DESCR.y']]\n\n requiments = self.major_map.cleaned_major_data[['REQID','L','MA','CS','HU','SB','SQ','SG','C','G','H','Honor']]\n\n requiments.is_copy = False\n requiments['REQID'] = requiments['REQID'].astype('str')\n\n requirements_to = pd.merge(self.graph, requiments, how='inner', left_on='REQ', right_on='REQID')\n \n # requirements_to = pd.merge(self.graph, requiments, how='left', left_on='REQ', right_on='REQID')\n classes_from = pd.merge(self.graph, student_taken, how='left', left_on='CLS', right_on='FULL')\n\n return classes_from, requirements_to", "def _generateSortBySelector(self, dParams, sPreamble, sPostamble):\n from testmanager.core.testresults import TestResultLogic;\n\n if self.ksParamTestResultsSortBy in dParams:\n enmResultSortBy = dParams[self.ksParamTestResultsSortBy];\n del dParams[self.ksParamTestResultsSortBy];\n else:\n enmResultSortBy = TestResultLogic.ksResultsSortByRunningAndStart;\n\n sHtmlSortBy = '<form name=\"TimeForm\" method=\"GET\"> Sort by\\n';\n sHtmlSortBy += sPreamble;\n sHtmlSortBy += '\\n <select name=\"%s\" onchange=\"window.location=' % (self.ksParamTestResultsSortBy,);\n sHtmlSortBy += '\\'?%s&%s=\\' + ' % (webutils.encodeUrlParams(dParams), self.ksParamTestResultsSortBy)\n sHtmlSortBy += 'this.options[this.selectedIndex].value;\" title=\"Sorting by\">\\n'\n\n fSelected = False;\n for enmCode, sTitle in TestResultLogic.kaasResultsSortByTitles:\n if enmCode == enmResultSortBy:\n fSelected = True;\n sHtmlSortBy += ' <option value=\"%s\"%s>%s</option>\\n' \\\n % (enmCode, ' selected=\"selected\"' if enmCode == enmResultSortBy else '', sTitle,);\n assert fSelected;\n sHtmlSortBy += ' </select>\\n';\n sHtmlSortBy += sPostamble;\n sHtmlSortBy += '\\n</form>\\n'\n return sHtmlSortBy;", "def _build_dropdown(options):\n return [(x, x) for x in options]", "def from_selection(cls):\n guid = compas_rhino.select_point()\n return cls.from_guid(guid)", "def build_path(self):\r\n return self.selmgr.select_path()", "def _build_pod_spec(self):\n logger.debug(\"Building Pod Spec\")\n crds = []\n try:\n crds = [\n yaml.load(Path(f).read_text())\n for f in [\n \"files/configs.config.gatekeeper.sh.yaml\",\n \"files/constrainttemplates.templates.gatekeeper.sh.yaml\",\n \"files/constraintpodstatuses.status.gatekeeper.sh.yaml\",\n \"files/constrainttemplatepodstatuses.status.gatekeeper.sh.yaml\",\n ]\n ]\n except yaml.YAMLError as exc:\n logger.error(\"Error in configuration file:\", exc)\n\n crd_objects = [\n CustomResourceDefintion(crd[\"metadata\"][\"name\"], crd[\"spec\"])\n for crd in crds\n ]\n\n config = self.model.config\n spec_template = {}\n with open(\"files/pod-spec.yaml.jinja2\") as fh:\n spec_template = Template(fh.read())\n\n try:\n image_details = self.image.fetch()\n except OCIImageResourceError as e:\n self.model.unit.status = e.status\n return\n\n template_args = {\n \"crds\": crd_objects,\n \"image_details\": image_details,\n \"imagePullPolicy\": config[\"imagePullPolicy\"],\n \"app_name\": self.app.name,\n \"audit_cli_args\": self._audit_cli_args(),\n \"namespace\": os.environ[\"JUJU_MODEL_NAME\"],\n }\n\n spec = yaml.load(spec_template.render(**template_args))\n\n print(f\"Pod spec: {spec}\")\n return spec", "def _build_impl(self):", "def createSelector(self,type='select',speed=2.0):\n self.selector = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector.hide()\n ival = self.selector.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def update_selection(self):\n raise NotImplementedError", "def create(cls, selection):\n\t\t\n\t\treturn cls({ true_selector: selection, false_selector: Selection.invert(selection) })", "def build(self):\n\n raise NotImplementedError(\"Implement build() method\")", "def __buildOptionString ( self ):\n\n #-- 1 --\n result = []\n\n #-- 2 --\n # [ result +:= strings defining each element of self.switchSpecs\n # as getopt.getopt requires it ]\n for sw in self.switchSpecs:\n #-- 2 body --\n # [ if sw is a SwitchArg ->\n # result +:= a string defining sw as getopt.getopt\n # requires it ]\n if sw.takesValue:\n result.append ( \"%s:\" % sw.letter )\n else:\n result.append ( sw.letter )\n\n #-- 3 --\n # [ return the strings in result, concatenated ]\n return \"\".join ( result )", "def _write_selec(parameters):\n # Load data\n from ._common import selections\n\n data = deepcopy(selections)\n if parameters[\"selections\"][\"integers\"]:\n data[\"integers\"].update(parameters[\"selections\"][\"integers\"])\n if len(parameters[\"selections\"][\"floats\"]):\n data[\"floats\"] = parameters[\"selections\"][\"floats\"]\n\n # Check floats and overwrite IE(1)\n if data[\"floats\"] is not None and len(data[\"floats\"]):\n if isinstance(data[\"floats\"][0], (list, tuple, numpy.ndarray)):\n for x in data[\"floats\"]:\n if len(x) > 8:\n raise ValueError()\n\n data[\"integers\"][1] = len(data[\"floats\"])\n ndim = 2\n\n else:\n if len(data[\"floats\"]) > 8:\n raise ValueError()\n\n data[\"integers\"][1] = 1\n ndim = 1\n else:\n ndim = None\n\n # Formats\n fmt = block_to_format[\"SELEC\"]\n fmt1 = str2format(fmt[1])\n fmt2 = str2format(fmt[2])\n\n # Record 1\n values = [data[\"integers\"][k] for k in sorted(data[\"integers\"].keys())]\n out = write_record(values, fmt1)\n\n # Record 2\n if ndim == 1:\n out += write_record(data[\"floats\"], fmt2)\n elif ndim == 2:\n for x in data[\"floats\"]:\n out += write_record(x, fmt2)\n\n return out", "def build_selection_menu(category, optional=False, headeradd=\"\"):\n menu = FilterSprite._build_menu(category)\n if menu is None:\n return None\n\n # add title part\n menu.insert(\n 0,\n (\"Select \" + category.title() + headeradd, \"Option: \")\n )\n\n # add optional\n if optional:\n menu.append((\"No \" + category.title(), FilterSprite.OPTIONAL))\n\n return menu", "def create_widgets( self ):\n\n self.selectionView = SelectionView()\n self.selectionView.setModel( self.proxyPhotosModel )\n self.selectionView.activated.connect( self.selectionActivation )\n self.selectionView.selectionModel().selectionChanged.connect( self.selectionChange )\n self.selectionView.setColumnHidden( self.ID_COLUMN, True ) # hide the ID\n\n self.selectionBox = QComboBox()\n\n self.selectionBox.addItem( \"all\", \"all\" )\n for state in self.db.get_processing_states():\n self.selectionBox.addItem( state, state )\n\n self.selectionBox.activated.connect( self.selectionTypeActivation )\n\n self.selectionBoxLabel = QLabel( \"&Processing Type:\" )\n self.selectionBoxLabel.setBuddy( self.selectionBox )\n\n self.previewArea = grafwidgets.PhotoPreviewArea()\n\n # informational labels for the photo record.\n self.infoStateLabel = QLabel()\n self.infoSummaryLabel = QLabel()\n self.infoLocationLabel = QLabel()\n self.infoTakenLabel = QLabel()\n self.infoTagsLabel = QLabel()\n\n # dock widget which will hold the selection layout once created\n # in create_layout, for now it gets an empty widget.\n self.selection_dock = QDockWidget()\n self.selection_dock.setFeatures( QDockWidget.DockWidgetMovable )\n self.selection_dock.setWidget( QWidget() )", "def test_option_selection(self):\r\n\r\n # Create options 0-4 and select option 2\r\n self.context['options_value'] = [2]\r\n self.context['options'] = [\r\n {'id': id_num,\r\n 'choice': 'correct',\r\n 'description': '<p>Unescaped <b>HTML {0}</b></p>'.format(id_num)}\r\n for id_num in range(0, 5)]\r\n\r\n xml = self.render_to_xml(self.context)\r\n\r\n # Expect that each option description is visible\r\n # with unescaped HTML.\r\n # Since the HTML is unescaped, we can traverse the XML tree\r\n for id_num in range(0, 5):\r\n xpath = \"//span[@data-id='{0}']/p/b\".format(id_num)\r\n self.assert_has_text(xml, xpath, 'HTML {0}'.format(id_num), exact=False)\r\n\r\n # Expect that the correct option is selected\r\n xpath = \"//span[contains(@class,'selected')]/p/b\"\r\n self.assert_has_text(xml, xpath, 'HTML 2', exact=False)", "def build(self):\n raise NotImplementedError(\"This is an interface method. Implement it in subclass.\")", "def gen_widgets(df):\n w = {\n \"a\": widgets.Dropdown(options=[(col, list(df[col])) for col in list(df)]),\n \"alpha\": widgets.FloatSlider(min=0.0, max=1.0, step=0.05),\n \"annot\": widgets.Checkbox(),\n \"aspect\": widgets.FloatText(value=1, step=0.05),\n \"axlabel\": widgets.Text(),\n \"bins\": widgets.IntText(value=10),\n \"bw\": widgets.Dropdown(options=[\"scott\", \"silverman\"]),\n \"capsize\": widgets.FloatText(value=1.0),\n \"cbar\": widgets.Checkbox(),\n \"cbar\": widgets.Checkbox(),\n \"cbar_ax\": widgets.Checkbox(),\n \"center\": widgets.FloatText(value=1.0),\n \"ci\": widgets.FloatSlider(min=0, max=100, value=95, step=0.1),\n \"cmap\": widgets.Text(value=\"viridis\"),\n \"col\": widgets.Dropdown(options=list(df)),\n \"col_wrap\": widgets.IntText(value=10),\n \"color\": widgets.Text(value=\"g\"),\n \"cumulative\": widgets.Checkbox(),\n \"cut\": widgets.FloatText(value=1.0),\n \"data\": widgets.Dropdown(options=[(col, list(df[col])) for col in list(df)]),\n \"data2\": widgets.Dropdown(options=[(col, list(df[col])) for col in list(df)]),\n \"diag_kind\": widgets.Dropdown(options=[\"auto\", \"hist\", \"kde\"]),\n \"dropna\": widgets.Checkbox(),\n \"edgecolor\": widgets.Text(value=\"gray\"),\n \"err_style\": widgets.Dropdown(options=[\"band\", \"bars\"]),\n \"errwidth\": widgets.FloatText(value=1.0),\n \"fit_reg\": widgets.Checkbox(),\n \"fliersize\": widgets.FloatText(value=1.0),\n \"fmt\": widgets.Text(value=\".2g\"),\n \"gridsize\": widgets.IntText(value=100),\n \"height\": widgets.FloatText(value=5),\n \"hist\": widgets.Checkbox(),\n \"hue\": widgets.Dropdown(options=list(df)),\n \"inner\": widgets.Dropdown(options=[\"box\", \"quartile\", \"point\", \"stick\"]),\n \"jitter\": widgets.Checkbox(),\n \"join\": widgets.Checkbox(),\n \"k_depth\": widgets.Dropdown(options=[\"proportion\", \"tukey\", \"trustworthy\"]),\n \"kde\": widgets.Checkbox(),\n \"kernel\": widgets.Dropdown(options=['gau', 'cos', 'biw', 'epa', 'tri', 'triw']),\n \"kind_catplot\": widgets.Dropdown(options=[\"point\", \"bar\", \"strip\", \"swarm\", \"box\", \"violin\", \"boxen\"]),\n \"kind_jointplot\": widgets.Dropdown(options=[\"scatter\", \"reg\", \"resid\", \"kde\", \"hex\"]),\n \"kind_pairplot\": widgets.Dropdown(options=[\"scatter\", \"reg\"]),\n \"kind_relplot\": widgets.Dropdown(options=[\"scatter\", \"line\"]),\n \"label\": widgets.Text(),\n \"legend\": widgets.Dropdown(options={\"brief\": \"brief\", \"full\":\"full\", \"False\": False}),\n \"legend_out\": widgets.Checkbox(),\n \"linecolor\": widgets.Text(\"white\"),\n \"linewidth\": widgets.FloatText(value=1.0),\n \"linewidths\": widgets.FloatText(value=0.0, step=0.01),\n \"logistic\": widgets.Checkbox(),\n \"logx\": widgets.Checkbox(),\n \"lowess\": widgets.Checkbox(),\n \"margin_titles\": widgets.Checkbox(),\n \"marker\": widgets.Dropdown(options=['o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P', 'X']),\n \"n_boot\": widgets.IntText(value=1000),\n \"norm_hist\": widgets.Checkbox(),\n \"notch\": widgets.Checkbox(),\n \"order_regression\": widgets.IntText(value=1),\n \"orient\": widgets.Dropdown(options=[\"v\", \"h\"]),\n \"outlier_prop\": widgets.FloatSlider(min=0.0, max=1.0, step=0.001, value=0.007),\n \"palette\": widgets.Text(value='viridis'),\n \"ratio\": widgets.IntText(value=5),\n \"robust\": widgets.Checkbox(),\n \"row\": widgets.Dropdown(options=list(df)),\n \"rug\": widgets.Checkbox(),\n \"saturation\": widgets.FloatSlider(min=0.0, max=1.0, step=0.05, value=1.0),\n \"scale_boxenplot\": widgets.Dropdown(options=[\"linear\", \"exponential\", \"area\"]),\n \"scale_float\": widgets.FloatText(value=1.0),\n \"scale_hue\": widgets.Checkbox(),\n \"scale_violinplot\": widgets.Dropdown(options=[\"area\", \"count\", \"width\"]),\n \"scatter\": widgets.Checkbox(),\n \"shade\": widgets.Checkbox(),\n \"shade_lowest\": widgets.Checkbox(),\n \"sharex\": widgets.Dropdown(options={\"True\": True, \"col\": \"col\", \"row\": \"row\"}),\n \"sharey\": widgets.Dropdown(options={\"True\": True, \"col\": \"col\", \"row\": \"row\"}),\n \"size_float\": widgets.FloatText(value=1.0),\n \"size_vector\": widgets.Dropdown(options=list(df)),\n \"sort\": widgets.Checkbox(),\n \"space\": widgets.FloatText(value=.2),\n \"split\": widgets.Checkbox(),\n \"square\": widgets.Checkbox(),\n \"style\": widgets.Dropdown(options=list(df)),\n \"truncate\": widgets.Checkbox(),\n \"units\": widgets.Dropdown(options=list(df)),\n \"vertical\": widgets.Checkbox(),\n \"vmax\": widgets.FloatText(value=1.0, step=0.1),\n \"vmin\": widgets.FloatText(value=1.0, step=0.1),\n \"whis\": widgets.FloatText(value=1.0),\n \"width\": widgets.FloatText(value=1.0),\n \"x\": widgets.Dropdown(options=list(df)),\n \"x_bins\": widgets.IntText(value=10),\n \"x_ci\": widgets.IntSlider(min=0, max=100, value=95),\n \"x_jitter\": widgets.FloatText(value=.1),\n \"x_partial\": widgets.Dropdown(options=list(df)),\n \"y\": widgets.Dropdown(options=list(df)),\n \"y_jitter\": widgets.FloatText(value=.1),\n \"y_partial\": widgets.Dropdown(options=list(df)),\n }\n relplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"hue\": w[\"hue\"],\n \"size\": w[\"size_vector\"],\n \"style\": w[\"style\"],\n \"row\": w[\"row\"],\n \"col\": w[\"col\"],\n \"col_wrap\": w[\"col_wrap\"],\n #\"row_order\":\n #\"col_order\":\n \"palette\": w[\"palette\"],\n #\"hue_order\":\n #\"hue_norm\":\n #\"sizes\"\n #\"size_order\":\n #\"size_norm\":\n \"legend\": w[\"legend\"],\n \"kind\": w[\"kind_relplot\"],\n \"height\": w[\"height\"],\n \"aspect\": w[\"aspect\"],\n }\n scatterplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"hue\": w[\"hue\"],\n \"size\": w[\"size_vector\"],\n \"style\": w[\"style\"],\n \"palette\": w[\"palette\"],\n #\"hue_order\":\n #\"hue_norm\":\n #\"sizes\": w[\"sizes\"],\n #\"size_order\":\n #\"size_norm\":\n #\"markers\":\n #\"style_order\":\n #\"{x,y}_bins\": (non functional)\n #\"units\": (non functional)\n #\"estimator\": (non functional)\n #\"ci\": (non functional)\n #\"n_boot\": (non functional)\n \"alpha\": w[\"alpha\"],\n #\"{x,y}_jitter\": (non functional)\n \"legend\": w[\"legend\"],\n }\n lineplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"hue\": w[\"hue\"],\n \"size\": w[\"size_vector\"],\n \"style\": w[\"style\"],\n \"palette\": w[\"palette\"],\n #\"hue_order\":\n #\"hue_norm\":\n #\"sizes\",\n #\"dashes\":,\n #\"markers\"\n #\"style_order\":\n \"units\": w[\"units\"],\n #\"estimator\"\n \"ci\": w[\"ci\"],\n \"n_boot\": w[\"n_boot\"],\n \"sort\": w[\"sort\"],\n \"err_style\": w[\"err_style\"],\n \"legend\": w[\"legend\"],\n }\n catplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"row\": w[\"row\"],\n \"col\": w[\"col\"],\n \"col_wrap\": w[\"col_wrap\"],\n #\"estimator\"\n \"ci\": w[\"ci\"],\n \"n_boot\": w[\"n_boot\"],\n \"units\": w[\"units\"],\n #\"order\",\"hue_order\"\n #\"row_order\",\"col_order\"\n \"kind\": w[\"kind_catplot\"],\n \"height\": w[\"height\"],\n \"aspect\": w[\"aspect\"],\n \"orient\": w[\"orient\"],\n \"color\": w[\"color\"],\n \"palette\": w[\"palette\"],\n \"legend\": w[\"legend\"],\n \"legend_out\": w[\"legend_out\"],\n \"sharex\": w[\"sharex\"],\n \"sharey\": w[\"sharey\"],\n \"margin_titles\": w[\"margin_titles\"],\n }\n stripplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n #\"order\",\"hue_order\"\n \"jitter\": w[\"jitter\"],\n #\"dodge\"\n \"orient\": w[\"orient\"],\n \"color\": w[\"color\"],\n \"palette\": w[\"palette\"],\n \"size\": w[\"size_float\"],\n \"edgecolor\": w[\"edgecolor\"],\n \"linewidth\": w[\"linewidth\"],\n }\n swarmplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n #\"order\",\"hue_order\"\n #\"dodge\"\n \"orient\": w[\"orient\"],\n \"color\": w[\"color\"],\n \"palette\": w[\"palette\"],\n \"size\": w[\"size_float\"],\n \"edgecolor\": w[\"edgecolor\"],\n \"linewidth\": w[\"linewidth\"],\n }\n boxplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n #\"order\",\"hue_order\"\n \"orient\": w[\"orient\"],\n \"color\": w[\"color\"],\n \"palette\": w[\"palette\"],\n \"saturation\": w[\"saturation\"],\n \"width\": w[\"width\"],\n #\"dodge\"\n \"fliersize\": w[\"fliersize\"],\n \"linewidth\": w[\"linewidth\"],\n \"whis\": w[\"whis\"],\n \"notch\": w[\"notch\"],\n }\n violinplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"hue\": w[\"hue\"],\n #\"order\",\"hue_order\"\n \"bw\": w[\"bw\"],\n \"cut\": w[\"cut\"],\n \"scale\": w[\"scale_violinplot\"],\n \"scale_hue\": w[\"scale_hue\"],\n \"gridsize\": w[\"gridsize\"],\n \"width\": w[\"width\"],\n \"inner\": w[\"inner\"],\n \"split\": w[\"split\"],\n #\"dodge\"\n \"orient\": w[\"orient\"],\n \"linewidth\": w[\"linewidth\"],\n \"color\": w[\"color\"],\n \"palette\": w[\"palette\"],\n \"saturation\": w[\"saturation\"],\n }\n boxenplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"hue\": w[\"hue\"],\n #\"order\",\"hue_order\"\n \"orient\": w[\"orient\"],\n \"color\": w[\"color\"],\n \"palette\": w[\"palette\"],\n \"saturation\": w[\"saturation\"],\n \"width\": w[\"width\"],\n #\"dodge\"\n \"k_depth\": w[\"k_depth\"],\n \"linewidth\": w[\"linewidth\"],\n \"scale\": w[\"scale_boxenplot\"],\n \"outlier_prop\": w[\"outlier_prop\"],\n }\n pointplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"hue\": w[\"hue\"],\n #\"order\",\"hue_order\"\n #\"estimator\"\n \"ci\": w[\"ci\"],\n \"n_boot\": w[\"n_boot\"],\n \"units\": w[\"units\"],\n #\"markers\"\n #linestyles\n #\"dodge\"\n \"join\": w[\"join\"],\n \"scale\": w[\"scale_float\"],\n \"orient\": w[\"orient\"],\n \"color\": w[\"color\"],\n \"palette\": w[\"palette\"],\n \"errwidth\": w[\"errwidth\"],\n \"capsize\": w[\"capsize\"],\n }\n barplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"hue\": w[\"hue\"],\n #\"order\",\"hue_order\"\n #\"estimator\"\n \"ci\": w[\"ci\"],\n \"n_boot\": w[\"n_boot\"],\n \"units\": w[\"units\"],\n \"orient\": w[\"orient\"],\n \"color\": w[\"color\"],\n \"palette\": w[\"palette\"],\n \"saturation\": w[\"saturation\"],\n #\"errcolor\"\n \"errwidth\": w[\"errwidth\"],\n \"capsize\": w[\"capsize\"],\n #\"dodge\"\n }\n countplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"hue\": w[\"hue\"],\n #\"order\",\"hue_order\"\n \"orient\": w[\"orient\"],\n \"color\": w[\"color\"],\n \"palette\": w[\"palette\"],\n \"saturation\": w[\"saturation\"],\n #\"dodge\"\n }\n jointplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"kind\": w[\"kind_jointplot\"],\n #stat_func\n \"color\": w[\"color\"],\n \"height\": w[\"height\"],\n \"ratio\": w[\"ratio\"],\n \"space\": w[\"space\"],\n \"dropna\": w[\"dropna\"],\n #\"xlim\"\n #\"ylim\"\n }\n pairplot = {\n \"hue\": w[\"hue\"],\n #hue_order\n \"palette\": w[\"palette\"],\n #vars\n #x_vars\n #y_vars\n \"kind\": w[\"kind_pairplot\"],\n \"diag_kind\": w[\"diag_kind\"],\n #\"markers\"\n \"height\": w[\"height\"],\n \"aspect\": w[\"aspect\"],\n \"dropna\": w[\"dropna\"],\n }\n distplot = {\n \"a\": w[\"a\"],\n \"bins\": w[\"bins\"],\n \"hist\": w[\"hist\"],\n \"kde\": w[\"kde\"],\n \"rug\": w[\"rug\"],\n #\"fit\"\n #{hist, kde, rug, fit}_kws\n \"color\": w[\"color\"],\n \"vertical\": w[\"vertical\"],\n \"norm_hist\": w[\"norm_hist\"],\n \"axlabel\": w[\"axlabel\"],\n \"label\": w[\"label\"],\n }\n kdeplot = {\n \"data\": w[\"data\"],\n \"data2\": w[\"data2\"],\n \"shade\": w[\"shade\"],\n \"vertical\": w[\"vertical\"],\n \"kernel\": w[\"kernel\"],\n \"bw\": w[\"bw\"],\n \"gridsize\": w[\"gridsize\"],\n \"cut\": w[\"cut\"],\n #\"clip\":\n \"legend\": w[\"legend\"],\n \"cumulative\": w[\"cumulative\"],\n \"shade_lowest\": w[\"shade_lowest\"],\n \"cbar\": w[\"cbar\"],\n \"cbar_ax\": w[\"cbar_ax\"],\n }\n lmplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"hue\": w[\"hue\"],\n \"col\": w[\"col\"],\n \"row\": w[\"row\"],\n \"palette\": w[\"palette\"],\n \"col_wrap\": w[\"col_wrap\"],\n \"height\": w[\"height\"],\n \"aspect\": w[\"aspect\"],\n #\"markers\",\n \"sharex\": w[\"sharex\"],\n \"sharey\": w[\"sharey\"],\n \"legend\": w[\"legend\"],\n \"legend_out\": w[\"legend_out\"],\n #x_estimator\n \"x_bins\": w[\"x_bins\"],\n \"x_ci\": w[\"x_ci\"],\n \"scatter\": w[\"scatter\"],\n \"fit_reg\": w[\"fit_reg\"],\n \"ci\": w[\"ci\"],\n \"n_boot\": w[\"n_boot\"],\n \"units\": w[\"units\"],\n \"order\": w[\"order_regression\"],\n \"logistic\": w[\"logistic\"],\n \"lowess\": w[\"lowess\"],\n \"robust\": w[\"robust\"],\n \"logx\": w[\"logx\"],\n \"x_partial\": w[\"x_partial\"],\n \"y_partial\": w[\"y_partial\"],\n \"truncate\": w[\"truncate\"],\n \"x_jitter\": w[\"x_jitter\"],\n \"y_jitter\": w[\"y_jitter\"],\n }\n regplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n #x_estimator\n \"x_bins\": w[\"x_bins\"],\n \"x_ci\": w[\"x_ci\"],\n \"scatter\": w[\"scatter\"],\n \"fit_reg\": w[\"fit_reg\"],\n \"ci\": w[\"ci\"],\n \"n_boot\": w[\"n_boot\"],\n \"units\": w[\"units\"],\n \"order\": w[\"order_regression\"],\n \"logistic\": w[\"logistic\"],\n \"lowess\": w[\"lowess\"],\n \"robust\": w[\"robust\"],\n \"logx\": w[\"logx\"],\n \"x_partial\": w[\"x_partial\"],\n \"y_partial\": w[\"y_partial\"],\n \"truncate\": w[\"truncate\"],\n \"x_jitter\": w[\"x_jitter\"],\n \"y_jitter\": w[\"y_jitter\"],\n \"label\": w[\"label\"],\n \"color\": w[\"color\"],\n \"marker\": w[\"marker\"],\n }\n residplot = {\n \"x\": w[\"x\"],\n \"y\": w[\"y\"],\n \"lowess\": w[\"lowess\"],\n \"x_partial\": w[\"x_partial\"],\n \"y_partial\": w[\"y_partial\"],\n \"order\": w[\"order_regression\"],\n \"robust\": w[\"robust\"],\n \"dropna\": w[\"dropna\"],\n \"label\": w[\"label\"],\n \"color\": w[\"color\"],\n }\n heatmap = {\n \"vmin\": w[\"vmin\"],\n \"vmax\": w[\"vmax\"],\n \"cmap\": w[\"cmap\"],\n \"center\": w[\"center\"],\n \"robust\": w[\"robust\"],\n \"annot\": w[\"annot\"],\n \"fmt\": w[\"fmt\"],\n \"linewidths\": w[\"linewidths\"],\n \"linecolor\": w[\"linecolor\"],\n \"cbar\": w[\"cbar\"],\n \"square\": w[\"square\"],\n #xticklabels, yticklabels\n #\"mask\"\n }\n #clustermap = {}\n return {\n \"*\": w,\n \"relplot\": relplot,\n \"scatterplot\": scatterplot,\n \"lineplot\": lineplot,\n \"catplot\": catplot,\n \"stripplot\": stripplot,\n \"swarmplot\": swarmplot,\n \"boxplot\": boxplot,\n \"violinplot\": violinplot,\n \"boxenplot\": boxenplot,\n \"pointplot\": pointplot,\n \"barplot\": barplot,\n \"countplot\": countplot,\n \"jointplot\": jointplot,\n \"pairplot\": pairplot,\n \"distplot\": distplot,\n \"kdeplot\": kdeplot,\n #rugplot -> not interesting\n \"lmplot\": lmplot,\n \"regplot\": regplot,\n \"residplot\": residplot,\n \"heatmap\": heatmap,\n #\"clustermap\": clustermap,\n }", "def build_basic_frame():\r\n param_frame = T.LabelFrame(ROOT, text = \"Set Basic Experiment Parameters\")\r\n pop_frame = ExperimentFrame(param_frame, \"Population Size: \",\r\n T.LEFT, EXP.pop_size, \"pop_size\")\r\n PARAM.append(pop_frame)\r\n gen_frame = ExperimentFrame(param_frame, \"Number of Generations: \",\r\n T.LEFT, EXP.num_generations, \"num_generations\")\r\n PARAM.append(gen_frame)\r\n max_depth_frame = ExperimentFrame(param_frame, \"Max Depth of Tree: \", \r\n T.LEFT, EXP.max_depth, \"max_depth\")\r\n PARAM.append(max_depth_frame)\r\n \r\n selection_frame = T.LabelFrame(param_frame, text = \"Selection Method\")\r\n select_var = {\"s_selection\" : \"greedy\"}\r\n PARAM.append(select_var)\r\n def select():\r\n \"\"\" SelectionMethod Radio Button \"\"\"\r\n PARAM.remove(select_var)\r\n select_var[\"s_selection\"] = textv.get()\r\n PARAM.append(select_var)\r\n modes = [\r\n (\"Greedy Selection\", \"greedy\")\r\n #(\"Tournament Selection\", \"tournament\"),\r\n ]\r\n textv = T.StringVar()\r\n for text, mode in modes:\r\n select_b = T.Radiobutton(selection_frame, text = text,\r\n variable = textv, value = mode, command = select)\r\n select_b.pack(anchor=T.W)\r\n textv.set(\"greedy\") # initialize\r\n selection_frame.pack(side = T.RIGHT)\r\n \r\n \r\n selection_frame2 = T.LabelFrame(param_frame, text = \"Tree Type\")\r\n \r\n tree_var = {\"t_type\" : \"grow\"}\r\n PARAM.append(tree_var)\r\n def treeselect():\r\n \"\"\" SelectionMethod Radio Button \"\"\"\r\n tree_var[\"t_type\"] = treev.get()\r\n modes = [\r\n (\"Grow Tree\", \"grow\"),\r\n (\"Full Tree\", \"full\"),\r\n ]\r\n treev = T.StringVar()\r\n for text, mode in modes:\r\n select_b = T.Radiobutton(selection_frame2, text = text,\r\n variable = treev, value = mode, command = treeselect)\r\n select_b.pack(anchor=T.W)\r\n treev.set(\"grow\") # initialize\r\n \r\n selection_frame2.pack(side = T.RIGHT)\r\n \r\n param_frame.pack(side = T.TOP, ipadx = INTHORIZONTALPADDING,\r\n ipady = INTVERTICALPADDING, padx = HORIZONTALPADDING,\r\n pady = VERTICALPADDING)", "def _build(specs_dict: dict, **kwargs: bool):\n return [\n Card(face, suit, value=specs_dict.get(face).get(suit), **kwargs)\n for face in specs_dict.keys()\n for suit in specs_dict.get(face).keys()\n ]", "def _build(self):\n raise NotImplementedError()", "def generate_table(confignames):\n selectrow = np.ones(len(confignames))\n\n selectpd = pd.DataFrame({\"configname\": confignames, \"selected\": selectrow})\n return selectpd", "def test_read_selection():\n # test one channel for each selection\n ch_names = ['MEG 2211', 'MEG 0223', 'MEG 1312', 'MEG 0412', 'MEG 1043',\n 'MEG 2042', 'MEG 2032', 'MEG 0522', 'MEG 1031']\n sel_names = ['Vertex', 'Left-temporal', 'Right-temporal', 'Left-parietal',\n 'Right-parietal', 'Left-occipital', 'Right-occipital',\n 'Left-frontal', 'Right-frontal']\n\n raw = read_raw_fif(raw_fname)\n for i, name in enumerate(sel_names):\n sel = read_selection(name)\n assert_true(ch_names[i] in sel)\n sel_info = read_selection(name, info=raw.info)\n assert_equal(sel, sel_info)\n\n # test some combinations\n all_ch = read_selection(['L', 'R'])\n left = read_selection('L')\n right = read_selection('R')\n\n assert_true(len(all_ch) == len(left) + len(right))\n assert_true(len(set(left).intersection(set(right))) == 0)\n\n frontal = read_selection('frontal')\n occipital = read_selection('Right-occipital')\n assert_true(len(set(frontal).intersection(set(occipital))) == 0)\n\n ch_names_new = [ch.replace(' ', '') for ch in ch_names]\n raw_new = read_raw_fif(raw_new_fname)\n for i, name in enumerate(sel_names):\n sel = read_selection(name, info=raw_new.info)\n assert_true(ch_names_new[i] in sel)\n\n assert_raises(TypeError, read_selection, name, info='foo')", "def __init__(self, *args, **kw):\n\n # Extract options from source strings.\n\n self.code_line = kw.get(\"code_line\")\n if self.code_line:\n self.code_line = self.code_line.rstrip(\"\\n\")\n else:\n return\n\n self.prop_line = kw.get(\"prop_line\")\n if self.prop_line:\n self.prop_line = self.prop_line.rstrip(\"\\n\")\n else:\n return\n\n self.ast_dict = kw.get(\"ast_dict\")\n if self.ast_dict:\n # name │ lineno │ value │ namespace\n\n # Collection will check if namespace exists,\n # and create new collection if necessary.\n pass\n else:\n pass\n\n # Set instance attribute values.\n self.prop_args, self.prop_kw = eval(self.prop_line, globals())\n # Ensure keywords are case matched.\n self.prop_kw = {CasePicker.to_snake(k):v for k, v in self.prop_kw.items()}\n\n self.field_name = self.ast_dict[\"name\"]\n self.default_val = self.ast_dict[\"value\"]\n\n # Set options from DPROPERTY specifiers.\n self.data_type = self.prop_kw.get(\"type\", type(self.default_val))\n ##self.default_val = self.data_type(self.default_val)\n self.category = self.prop_kw.get(\"category\", \"\")\n #self.category = self.category.split(\"|\") # Category|Subcategory\n self.display_name = self.prop_kw.get(\"display_name\", CasePicker.to_pascal(self.field_name))\n\n # Set input ranges.\n self.from_ = self.prop_kw.get(\"from\",\n self.prop_kw.get(\"min\", None))\n self.to = self.prop_kw.get(\"to\",\n self.prop_kw.get(\"max\", None))\n try:\n self.from_, self.to = self.prop_kw.get(\"range\")\n except:\n pass", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def _build_range(self) -> str:\n return build_sequence(filter(None, (self.uids, self.sequence)))", "def _build(self):\n tab = self._tab\n tab.setModel(self._model)\n tab.horizontalHeader().setClickable(False)\n sig = SIG(\"sectionClicked(int)\")\n connect(tab.verticalHeader(), sig, self.remove_cond)\n if self._grp_colm==\"New_coln\":\n tab.setItemDelegate(TextDelegate(self))\n tab.setItemDelegateForColumn(1, AstGroupDelegate(self,self._grp_names1))\n\n else :\n tab.setItemDelegate(ValueDelegate(self))\n tab.setItemDelegateForColumn(0, AstGroupDelegate(self, self._grp_names))", "def get_selection():\n\n selected = Gui.Selection.getSelectionEx()[0].SubObjects\n sel_len = len(selected)\n result = SelectionContainer()\n\n for _x in range(0, sel_len):\n\n shape_type = selected[_x].ShapeType\n\n if shape_type == 'Vertex':\n result.vertices.append(selected[_x])\n\n elif shape_type == 'Edge':\n\n if 'Line' in str(selected[_x].Curve):\n result.lines.append(selected[_x])\n else:\n result.curves.append(selected[_x])", "def getBuilder():", "def todict(self, recursive=10):\n # dict required here for subsequent update.\n selections = dict([(key, val.todict(recursive-1)) if recursive-1\n else (val.basename, val.header)\n for (key,val) in self.selections.normal_items()])\n selections.update(dict([(key, {\"name\": val, \"text_descr\": self.obs_package.TEXT_DESCR[key]})\n for (key,val) in self.selections.special_items()]))\n # selections.items() critical below for prior interface, web context display\n return {\n \"header\" : { key: self.header[key] for key in self.header },\n \"name\" : self.header[\"name\"],\n \"parameters\" : tuple(self.parkey),\n \"selections\" : list(sorted(selections.items())),\n }", "def create_options(self, saving=False):\n self.get_filters(saving)\n\n options = {}\n if(self.calibration_points):\n options['begin_calibration_index'] = self.begin_ind_calibration_entry.get()\n options['end_calibration_index'] = self.end_ind_calibration_entry.get()\n options['known_distance'] = self.known_distance_entry.get()\n else:\n options['begin_calibration_index']=''\n options['end_calibration_index']=''\n options['known_distance']=''\n options['unit_type'] = (re.sub(r'[^A-Za-z0-9_]', '', self.unit_type_entry.get())).capitalize()\n options['begin_index'] = self.begin_ind_entry.get()\n options['end_index'] = self.end_ind_entry.get()\n options['names_list'] = self.names_list_entry.get()\n options['name_column'] = self.name_column_var.get()\n options['x_column'] = self.x_column_var.get()\n options['y_column'] = self.y_column_var.get()\n options['z_column'] = self.z_column_var.get()\n options['filters'] = self.filter_entry_dict\n options['habitat_image'] = self.habitat_image\n options['x_ratio']=self.x_ratio_entry.get()\n options['y_ratio']=self.y_ratio_entry.get()\n options['z_ratio']=self.z_ratio_entry.get()\n options['sheet_name']=self.sheet_name_var.get()\n\n return options", "def make_selection_menu(\n cls,\n selections: list[str],\n title: str = \"\",\n subtitle: str = \"\",\n *,\n show_exit_item: bool = False,\n ) -> CursesMenu:\n menu = cls(title=title, subtitle=subtitle, show_exit_item=show_exit_item)\n from cursesmenu.items.selection_item import SelectionItem\n\n for index, selection in enumerate(selections):\n menu.items.append(\n SelectionItem(text=selection, index=index, should_exit=True),\n )\n return menu", "def build_recursive_traversal_spec(client_factory):\r\n visit_folders_select_spec = build_selection_spec(client_factory,\r\n \"visitFolders\")\r\n # For getting to hostFolder from datacenter\r\n dc_to_hf = build_traversal_spec(client_factory, \"dc_to_hf\", \"Datacenter\",\r\n \"hostFolder\", False,\r\n [visit_folders_select_spec])\r\n # For getting to vmFolder from datacenter\r\n dc_to_vmf = build_traversal_spec(client_factory, \"dc_to_vmf\", \"Datacenter\",\r\n \"vmFolder\", False,\r\n [visit_folders_select_spec])\r\n # For getting Host System to virtual machine\r\n h_to_vm = build_traversal_spec(client_factory, \"h_to_vm\", \"HostSystem\",\r\n \"vm\", False,\r\n [visit_folders_select_spec])\r\n\r\n # For getting to Host System from Compute Resource\r\n cr_to_h = build_traversal_spec(client_factory, \"cr_to_h\",\r\n \"ComputeResource\", \"host\", False, [])\r\n\r\n # For getting to datastore from Compute Resource\r\n cr_to_ds = build_traversal_spec(client_factory, \"cr_to_ds\",\r\n \"ComputeResource\", \"datastore\", False, [])\r\n\r\n rp_to_rp_select_spec = build_selection_spec(client_factory, \"rp_to_rp\")\r\n rp_to_vm_select_spec = build_selection_spec(client_factory, \"rp_to_vm\")\r\n # For getting to resource pool from Compute Resource\r\n cr_to_rp = build_traversal_spec(client_factory, \"cr_to_rp\",\r\n \"ComputeResource\", \"resourcePool\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # For getting to child res pool from the parent res pool\r\n rp_to_rp = build_traversal_spec(client_factory, \"rp_to_rp\", \"ResourcePool\",\r\n \"resourcePool\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # For getting to Virtual Machine from the Resource Pool\r\n rp_to_vm = build_traversal_spec(client_factory, \"rp_to_vm\", \"ResourcePool\",\r\n \"vm\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # Get the assorted traversal spec which takes care of the objects to\r\n # be searched for from the root folder\r\n traversal_spec = build_traversal_spec(client_factory, \"visitFolders\",\r\n \"Folder\", \"childEntity\", False,\r\n [visit_folders_select_spec, dc_to_hf,\r\n dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,\r\n rp_to_rp, h_to_vm, rp_to_vm])\r\n return traversal_spec", "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def make_select_fields(self, product, field_names, custom_offsets):\n\n assert product and field_names\n\n dataset_fields = product.metadata_type.dataset_fields\n dataset_section = product.metadata_type.definition['dataset']\n\n select_fields = []\n for field_name in field_names:\n if dataset_fields.get(field_name):\n select_fields.append(dataset_fields[field_name])\n else:\n # try to construct the field\n if field_name in {'transform', 'extent', 'crs', 'bounds'}:\n grid_spatial = dataset_section.get('grid_spatial')\n if grid_spatial:\n select_fields.append(SimpleDocField(\n 'grid_spatial', 'grid_spatial', DATASET.c.metadata,\n False,\n offset=grid_spatial\n ))\n elif custom_offsets and field_name in custom_offsets:\n select_fields.append(SimpleDocField(\n field_name, field_name, DATASET.c.metadata,\n False,\n offset=custom_offsets[field_name]\n ))\n elif field_name == 'uris':\n select_fields.append(Field('uris', 'uris'))\n\n return select_fields", "def generate_completion_spec(\n parser,\n hooks=None,\n arg_filter=None,\n tool=\"argparse_complete_fig\"\n):\n spec = construct_subcommand(parser, hooks, arg_filter)\n return f\"\"\"// Autogenerated by {tool}\nconst completionSpec: Fig.Spec = {json.dumps(spec, indent=2)}\nexport default completionSpec;\"\"\"", "def getInputSpecification(cls):\n inputSpecification = super(Custom1D, cls).getInputSpecification()\n inputSpecification.addSub(InputData.parameterInputFactory(\"workingDir\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"functionType\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"dataFilename\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"functionID\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"variableID\", contentType=InputTypes.StringType))\n\n return inputSpecification", "def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n rspec = RSpec(version=rspec_version)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n top_auth = resource_hrn.split('.')[0]\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],top_auth)\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n #print \"sfa_leases\", sfa_leases\n if sfa_leases:\n # SFAWRAP BUG ???\n # rspec.version.add_leases bugs with an empty set of leases\n # slice_id = leases[0]['slice_id']\n # TypeError: list indices must be integers, not str\n rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n \n return rspec.toxml()", "def __init__(self, spec):\n self.spec = spec", "def build(self, *args, **kwargs):\n return", "def buildOptions(self):\n ZenModeler.buildOptions(self)\n self.parser.add_option(\n \"--net\",\n dest=\"net\",\n action=\"append\",\n help=\"Discover all device on this network\",\n )\n self.parser.add_option(\n \"--range\",\n dest=\"range\",\n action=\"append\",\n help=\"Discover all IPs in this range\",\n )\n self.parser.add_option(\n \"--deviceclass\",\n dest=\"deviceclass\",\n default=\"/Discovered\",\n help=\"Default device class for discovered devices\",\n )\n self.parser.add_option(\n \"--prod_state\",\n dest=\"productionState\",\n default=1000,\n type=\"int\",\n help=\"Initial production state for discovered devices\",\n )\n self.parser.add_option(\n \"--prod-state\",\n dest=\"productionState\",\n default=1000,\n type=\"int\",\n help=\"Initial production state for discovered devices\",\n )\n self.parser.add_option(\n \"--location\",\n dest=\"location\",\n default=None,\n help=\"Initial location for discovered devices\",\n )\n self.parser.add_option(\n \"--group\",\n dest=\"groups\",\n action=\"append\",\n help=\"Group to which discovered devices should be added\",\n )\n self.parser.add_option(\n \"--system\",\n dest=\"systems\",\n action=\"append\",\n help=\"System to which discovered devices should be added\",\n )\n self.parser.add_option(\n \"--remodel\",\n dest=\"remodel\",\n action=\"store_true\",\n default=False,\n help=\"Remodel existing objects\",\n )\n self.parser.add_option(\n \"--routers\",\n dest=\"routersonly\",\n action=\"store_true\",\n default=False,\n help=\"Only discover routers\",\n )\n self.parser.add_option(\n \"--tries\",\n dest=\"tries\",\n default=1,\n type=\"int\",\n help=\"How many ping tries\",\n )\n self.parser.add_option(\n \"--timeout\",\n dest=\"timeout\",\n default=2,\n type=\"float\",\n help=\"ping timeout in seconds\",\n )\n self.parser.add_option(\n \"--chunk\",\n dest=\"chunkSize\",\n default=10,\n type=\"int\",\n help=\"Number of in-flight ping packets\",\n )\n self.parser.add_option(\n \"--snmp-missing\",\n dest=\"snmpMissing\",\n action=\"store_true\",\n default=False,\n help=\"Send an event if SNMP is not found on the device\",\n )\n self.parser.add_option(\n \"--add-inactive\",\n dest=\"addInactive\",\n action=\"store_true\",\n default=False,\n help=\"Add all IPs found, even if they are unresponsive\",\n )\n self.parser.add_option(\n \"--reset-ptr\",\n dest=\"resetPtr\",\n action=\"store_true\",\n default=False,\n help=\"Reset all IP PTR records\",\n )\n self.parser.add_option(\n \"--no-snmp\",\n dest=\"nosnmp\",\n action=\"store_true\",\n default=False,\n help=\"Skip SNMP discovery on found IP addresses\",\n )\n self.parser.add_option(\n \"--subnets\",\n dest=\"subnets\",\n action=\"store_true\",\n default=False,\n help=\"Recurse into subnets for discovery\",\n )\n self.parser.add_option(\n \"--walk\",\n dest=\"walk\",\n action=\"store_true\",\n default=False,\n help=\"Walk the route tree, performing discovery on all networks\",\n )\n self.parser.add_option(\n \"--max-devices\",\n dest=\"maxdevices\",\n default=0,\n type=\"int\",\n help=\"Collect a maximum number of devices. Default is no limit.\",\n )\n self.parser.add_option(\n \"--snmp-strict-discovery\",\n dest=\"zSnmpStrictDiscovery\",\n action=\"store_true\",\n default=False,\n help=\"Only add devices that can be modeled via SNMP.\",\n )\n self.parser.add_option(\n \"--prefer-snmp-naming\",\n dest=\"zPreferSnmpNaming\",\n action=\"store_true\",\n default=False,\n help=\"Prefer SNMP name to DNS name when modeling via SNMP.\",\n )\n # --job: a development-only option that jobs will use to communicate\n # their existence to zendisc. Not for users, so help is suppressed.\n self.parser.add_option(\"--job\", dest=\"job\", help=SUPPRESS_HELP)", "def build(self):\n # Clean all fields.\n self._clean_fields()\n\n # Build", "def buildCommandModel ( switchSpecs, posSpecs ):\n\n #-- 1 --\n result = []\n\n #-- 2 --\n # [ result +:= strings representing the options in switchSpecs ]\n for switch in switchSpecs:\n result.append ( \"-%s\" % switch.letter )\n\n #-- 3 --\n # [ result +:= strings representing the keys in posSpecs ]\n for pos in posSpecs:\n if pos.optional:\n result.append ( \"[%s]\" % pos.key )\n else:\n result.append ( pos.key )\n if pos.repeated:\n result.append ( \"...\" )\n\n #-- 4 --\n # [ return the concatenation of the strings in result with single\n # spaces between them ]\n return \" \".join ( result )", "def __init__(self, parent=None):\n super(RobotSelection, self).__init__(parent)\n self.parent = parent\n self.initUI()", "def assemble_widget(self) -> widgets.Widget:\n graph_selection = self._create_layer_selection(layer_type=\"graphs\")\n map_selection = self._create_layer_selection(layer_type=\"maps\")\n view_buttons = self.create_visibility_buttons()\n\n widget = widgets.VBox(\n [\n widget_utils.create_html_header(\"Graph Selection\"),\n graph_selection,\n widget_utils.HRULE,\n widget_utils.create_html_header(\"Map Selection\"),\n map_selection,\n widget_utils.HRULE,\n widget_utils.create_html_header(\"View Selection\"),\n view_buttons,\n ]\n )\n\n return widget", "def __chanSelection(self, spwsel):\n \n # Split to get each spw in a list\n if spwsel.__contains__(','):\n spwlist = spwsel.split(',') \n else:\n spwlist = spwsel.split(';')\n \n spwid=[]\n chanlist=[]\n # Split to create two lists, one with channels, the other with spwIDs\n for isel in spwlist:\n # Get tail, colon and head\n (s, c, ch) = isel.rpartition(\":\")\n # Remove any blanks\n s = s.strip(' ')\n c = c.strip(' ')\n ch = ch.strip(' ')\n # If no tail, there was no colon to split. In this case, add the spwID\n if s == \"\":\n spwid.append(ch)\n chanlist.append('')\n else:\n spwid.append(s)\n chanlist.append(ch)\n \n # Create a dictionary\n seldict = {}\n for ns in xrange(len(spwid)):\n sel = {}\n sel['spw'] = spwid[ns]\n sel['channels'] = chanlist[ns]\n seldict[ns] = sel\n\n\n return seldict", "def parse(self):\n opt = self.gather_options()\n opt.isTrain = self.isTrain # train or test\n\n # process opt.suffix\n if opt.suffix:\n suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''\n opt.name = opt.name + suffix\n\n opt.f_map = [opt.crop_size, opt.crop_size * 2, opt.crop_size * 4, opt.crop_size * 8]\n self.print_options(opt)\n\n # set gpu ids\n str_ids = opt.gpu_ids.split(',')\n opt.gpu_ids = []\n for str_id in str_ids:\n id = int(str_id)\n if id >= 0:\n opt.gpu_ids.append(id)\n if len(opt.gpu_ids) > 0:\n torch.cuda.set_device(opt.gpu_ids[0])\n\n self.opt = opt\n return self.opt", "def build_specfile_sections(spec):\n str = \"\"\n\n mandatory_sections = {\n 'DESCRIPTION' : '\\n%%description\\n%s\\n\\n', }\n\n str = str + SimpleTagCompiler(mandatory_sections).compile( spec )\n\n optional_sections = {\n 'DESCRIPTION_' : '%%description -l %s\\n%s\\n\\n',\n 'CHANGELOG' : '%%changelog\\n%s\\n\\n',\n 'X_RPM_PREINSTALL' : '%%pre\\n%s\\n\\n',\n 'X_RPM_POSTINSTALL' : '%%post\\n%s\\n\\n',\n 'X_RPM_PREUNINSTALL' : '%%preun\\n%s\\n\\n',\n 'X_RPM_POSTUNINSTALL' : '%%postun\\n%s\\n\\n',\n 'X_RPM_VERIFY' : '%%verify\\n%s\\n\\n',\n\n # These are for internal use but could possibly be overridden\n 'X_RPM_PREP' : '%%prep\\n%s\\n\\n',\n 'X_RPM_BUILD' : '%%build\\n%s\\n\\n',\n 'X_RPM_INSTALL' : '%%install\\n%s\\n\\n',\n 'X_RPM_CLEAN' : '%%clean\\n%s\\n\\n',\n }\n\n # Default prep, build, install and clean rules\n # TODO: optimize those build steps, to not compile the project a second time\n if 'X_RPM_PREP' not in spec:\n spec['X_RPM_PREP'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"' + '\\n%setup -q'\n\n if 'X_RPM_BUILD' not in spec:\n spec['X_RPM_BUILD'] = '[ ! -e \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && mkdir \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_INSTALL' not in spec:\n spec['X_RPM_INSTALL'] = 'scons --install-sandbox=\"$RPM_BUILD_ROOT\" \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_CLEAN' not in spec:\n spec['X_RPM_CLEAN'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"'\n\n str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )\n\n return str", "def __init__(self, **kwargs):\n super(BaseAG, self).__init__()\n self.domains = None\n self._info = {}\n\n mutation_name = kwargs.get(\"mutation\", self._args.mutation)\n representation_name = kwargs.get(\"representation\",\n self._args.representation)\n crossover_name = kwargs.get(\"crossover\", self._args.crossover)\n selection_name = kwargs.get(\"selection\", self._args.selection)\n\n self._population = kwargs.get(\"population\", self._args.population)\n self._selection_crossover = kwargs.get(\"selection_crossover\",\n self._args.selection_crossover)\n self._selection_mutation = kwargs.get(\"selection_mutation\",\n self._args.selection_mutation)\n self._generations = kwargs.get(\"generations\",\n self._args.generations)\n dimension = kwargs.get(\"dimension\", self._args.dimensions)\n precision = kwargs.get(\"precision\", self._args.precision)\n\n crossovers = cros_factory.crossover_factory()\n mutations = mut_factory.mutation_factory()\n representations = repr_factory.representations_factory()\n selections = selection_factory.selection_factory()\n\n for item in crossovers:\n if crossover_name == item.name():\n self._crossover = item()\n for item in representations:\n if representation_name == item.name():\n # NOTE(mmicu): the dimension is know when we get the function\n # eliminate this requirement\n self._representation = item(dimension,\n precision)\n\n for item in mutations:\n if mutation_name == item.name():\n self._mutation = item()\n\n for item in selections:\n if selection_name == item.name():\n self._selection = item(self._representation)", "def make_subset(self, samples=None, name=None):\n if isinstance(samples, str):\n samples = [samples]\n\n if not hasattr(self, 'subsets'):\n self.subsets = {}\n self.subsets['All_Analyses'] = self.samples\n self.subsets[self.srm_identifier] = [s for s in self.samples if self.srm_identifier in s]\n self.subsets['All_Samples'] = [s for s in self.samples if self.srm_identifier not in s]\n\n # self.subsets = {s: 'All_Analyses' for s in self.samples if self.srm_identifier not in s}\n # self.subsets.update({s: 'STD' for s in self.samples if self.srm_identifier in s})\n # self.subsets = {}\n\n if name is None:\n name = max([-1] + [x for x in self.subsets.keys() if isinstance(x, int)]) + 1\n\n if samples is not None:\n self.subsets[name] = samples\n\n # for subset in np.unique(list(self.subsets.values())):\n # self.subsets[subset] = sorted([k for k, v in self.subsets.items() if str(v) == subset])\n\n return name", "def build(self, **other_kwargs):\n raise NotImplementedError()", "def _get_select_options(self, d):\n\t\top = d.options.split('\\n')\n\t\tif len(op) > 1 and op[1][:4].lower() == 'sql:':\n\t\t\t# Execute the sql query\n\t\t\tquery = op[1][4:].replace('__user',\n\t\t\t\t\t\twebnotes.session.get('user'))\n\t\telse:\n\t\t\t# Extract DocType and Conditions\n\t\t\t# and execute the resulting query\n\t\t\tdt = op[0][5:].strip()\n\t\t\tcond_list = [cond.replace('__user',\n\t\t\t\twebnotes.session.get('user')) for cond in op[1:]]\n\t\t\tquery = \"\"\"\\\n\t\t\t\tSELECT name FROM `tab%s`\n\t\t\t\tWHERE %s docstatus!=2\n\t\t\t\tORDER BY name ASC\"\"\" % (dt,\n\t\t\t\tcond_list and (\" AND \".join(cond_list) + \" AND \") or \"\")\n\t\ttry:\n\t\t\topt_list = webnotes.conn.sql(query)\n\t\texcept:\n\t\t\t# WARNING: Exception suppressed\n\t\t\topt_list = []\n\n\t\treturn opt_list", "def create_combination_and_rate_function(self):\n\n def create_combination_and_rate(_):\n\n self.console.children = []\n\n selection = wd.HBox()\n\n selection_widgets = list()\n if not 0 in (dropdown.value for dropdown in self.selectors.children):\n\n self.turn += 1\n\n user_try = list()\n\n for selector in self.selectors.children:\n\n color = wd.Button(\n disabled=True, button_style=self.mapping_colors[selector.value])\n color.icon = 'user-o'\n\n selection_widgets.append(color)\n\n user_try.append(selector.value)\n selector.value = 0\n\n self.duplicate = []\n self.avoid = []\n self.already_checked = []\n\n self.check_combination(combination=user_try)\n\n info = wd.Button(description='Turn {0} - Well placed: {1} \\n - Misplaced: {2}'.format(self.turn,\n self.try_return['well_placed'], self.try_return['misplaced']),\n disabled=True,\n layout={'width': 'auto',\n 'margin': '0px 0px 0px 50px'}\n )\n\n info.icon = 'hashtag'\n\n selection_widgets.append(info)\n\n # see if the player wins.\n if self.try_return == {'well_placed': 4, 'misplaced': 0}:\n\n self.console.children = [\n wd.Label(value='You won !'), self.new_game_button]\n\n self.display_correct_answer(failed=False)\n\n for selector in self.selectors.children:\n selector.disabled = True\n self.confirm_button.disabled = True\n\n else: # if the player has not won, we reset the counter of well/misplaced checkers and wait 7 seconds\n # before asking for another attempt.\n if self.turn == 12:\n\n self.console.children = [\n wd.Label(value='You loose !'), self.new_game_button]\n\n self.display_correct_answer()\n\n for selector in self.selectors.children:\n selector.disabled = True\n self.confirm_button.disabled = True\n\n else:\n self.try_return = {'well_placed': 0, 'misplaced': 0}\n\n else:\n self.console.children = [\n wd.Label(value='Please choose a color for every positions !')]\n\n selection.children = selection_widgets\n\n trials_children = list(self.trials.children)\n trials_children.insert(1, selection)\n\n self.trials.children = trials_children\n\n return create_combination_and_rate", "def __init__(self, *specs: Specification) -> None:\n self.specs = specs" ]
[ "0.6188935", "0.59599125", "0.5944895", "0.55326456", "0.5366692", "0.5328713", "0.53118664", "0.53038955", "0.5270639", "0.5265871", "0.5262976", "0.5192852", "0.518418", "0.51645607", "0.51583874", "0.5150827", "0.5110556", "0.5098783", "0.5098783", "0.50898474", "0.50657874", "0.504695", "0.504695", "0.504695", "0.50376", "0.5031194", "0.5024653", "0.5015964", "0.5003452", "0.49741095", "0.497377", "0.49639472", "0.4959592", "0.49264863", "0.49171576", "0.4898372", "0.4898372", "0.4897905", "0.4888906", "0.48564085", "0.48543665", "0.4840678", "0.48180988", "0.48049223", "0.4802947", "0.47997254", "0.47680932", "0.47678024", "0.47666025", "0.4764813", "0.4760585", "0.476002", "0.47504684", "0.47493884", "0.47482747", "0.47438142", "0.4742374", "0.47390383", "0.47265074", "0.47247276", "0.4720427", "0.4718998", "0.47164428", "0.46880105", "0.467875", "0.46679002", "0.46403915", "0.46250108", "0.46200418", "0.46128124", "0.4604551", "0.45932773", "0.45907834", "0.45885766", "0.45877367", "0.45876834", "0.45863503", "0.45862207", "0.45753735", "0.45745364", "0.4559251", "0.455543", "0.45549303", "0.45545855", "0.45543778", "0.4552645", "0.45511308", "0.4547701", "0.45459017", "0.45440194", "0.4543656", "0.45258898", "0.45229", "0.45228365", "0.45192304", "0.4518706", "0.45180166", "0.45154008", "0.4512508", "0.45111772" ]
0.72015435
0
Builds the traversal spec object.
def build_traversal_spec(client_factory, name, spec_type, path, skip, select_set): traversal_spec = client_factory.create('ns0:TraversalSpec') traversal_spec.name = name traversal_spec.type = spec_type traversal_spec.path = path traversal_spec.skip = skip traversal_spec.selectSet = select_set return traversal_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_recursive_traversal_spec(client_factory):\r\n visit_folders_select_spec = build_selection_spec(client_factory,\r\n \"visitFolders\")\r\n # For getting to hostFolder from datacenter\r\n dc_to_hf = build_traversal_spec(client_factory, \"dc_to_hf\", \"Datacenter\",\r\n \"hostFolder\", False,\r\n [visit_folders_select_spec])\r\n # For getting to vmFolder from datacenter\r\n dc_to_vmf = build_traversal_spec(client_factory, \"dc_to_vmf\", \"Datacenter\",\r\n \"vmFolder\", False,\r\n [visit_folders_select_spec])\r\n # For getting Host System to virtual machine\r\n h_to_vm = build_traversal_spec(client_factory, \"h_to_vm\", \"HostSystem\",\r\n \"vm\", False,\r\n [visit_folders_select_spec])\r\n\r\n # For getting to Host System from Compute Resource\r\n cr_to_h = build_traversal_spec(client_factory, \"cr_to_h\",\r\n \"ComputeResource\", \"host\", False, [])\r\n\r\n # For getting to datastore from Compute Resource\r\n cr_to_ds = build_traversal_spec(client_factory, \"cr_to_ds\",\r\n \"ComputeResource\", \"datastore\", False, [])\r\n\r\n rp_to_rp_select_spec = build_selection_spec(client_factory, \"rp_to_rp\")\r\n rp_to_vm_select_spec = build_selection_spec(client_factory, \"rp_to_vm\")\r\n # For getting to resource pool from Compute Resource\r\n cr_to_rp = build_traversal_spec(client_factory, \"cr_to_rp\",\r\n \"ComputeResource\", \"resourcePool\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # For getting to child res pool from the parent res pool\r\n rp_to_rp = build_traversal_spec(client_factory, \"rp_to_rp\", \"ResourcePool\",\r\n \"resourcePool\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # For getting to Virtual Machine from the Resource Pool\r\n rp_to_vm = build_traversal_spec(client_factory, \"rp_to_vm\", \"ResourcePool\",\r\n \"vm\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # Get the assorted traversal spec which takes care of the objects to\r\n # be searched for from the root folder\r\n traversal_spec = build_traversal_spec(client_factory, \"visitFolders\",\r\n \"Folder\", \"childEntity\", False,\r\n [visit_folders_select_spec, dc_to_hf,\r\n dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,\r\n rp_to_rp, h_to_vm, rp_to_vm])\r\n return traversal_spec", "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()", "def build(self, spec, prefix):\n make()", "def _build(self, prefilt=None):\n self.make_filiation()\n if prefilt is not None:\n self.prefilter(filt=prefilt)\n self.make_trees()\n return", "def build_tree(self, prefix, depth):\n for count, function in [[self.n_files, self.make_file],\n [self.n_children, self.make_child_recurse],\n [self.n_symlinks, self.make_symlink]]:\n for i in range(count):\n if not self.can_continue():\n return\n name = os.path.join(prefix, self.name_gen.next())\n function(name, depth)", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()+self.regularizer_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.cluster_layer = Clustering(self.args)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()\n self.loss = self.loss + self.gamma*self.cluster_layer(self.walker_layer)\n self.loss = self.loss + self.regularizer_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n rspec = RSpec(version=rspec_version)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n top_auth = resource_hrn.split('.')[0]\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],top_auth)\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n #print \"sfa_leases\", sfa_leases\n if sfa_leases:\n # SFAWRAP BUG ???\n # rspec.version.add_leases bugs with an empty set of leases\n # slice_id = leases[0]['slice_id']\n # TypeError: list indices must be integers, not str\n rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n \n return rspec.toxml()", "def _build_impl(self):", "def _build(self):", "def _build(self):", "def build(root):", "def path_builder(self) -> CaseEvent.TestPathBuilder:\n return self._path_builder", "def build(self) -> None:", "def build_tree(v, pop_obs_spec, stat_vars, vertical_idx):\n\n # vertical as the root\n root = {\n 'sv': ['top'],\n 'l': text_format.format_title(v),\n 't': 'p',\n 'c': 0, # count of child nodes\n 'cd': [],\n 'sv_set': set(), # used for counting child nodes\n }\n # specs with 0 constaints are of type \"value\",\n # as the level 1 cd of root\n for pos in pop_obs_spec[0]:\n ui_node = util.UiNode(pos, {}, False)\n childStatsVars = []\n # find all the statsvars belong to the node\n for sv in stat_vars[pos.key]:\n if pos.cpv == sv.pv:\n childStatsVars.append(sv.dcid)\n root['c'] += 1\n if len(childStatsVars) > 0:\n root['cd'].append({\n 'populationType': ui_node.pop_type,\n 'sv': childStatsVars,\n 'l': text_format.format_title(ui_node.text),\n 't': 'v',\n 'c': len(childStatsVars),\n 'mprop': ui_node.mprop,\n })\n\n # build specs with >= 1 constraints recursively\n for pos in pop_obs_spec[1]:\n child = build_tree_recursive(pos, 1, pop_obs_spec, stat_vars,\n )\n # For certain branch, we would like to put them under 0 pv nodes:\n if (pos.pop_type in ['EarthquakeEvent', 'CycloneEvent',\n 'MortalityEvent']):\n for pv0 in root['cd']:\n # hoist logic will break if multiple 0 pv\n if (pv0['populationType'] == pos.pop_type and pv0['mprop'] == 'count'):\n if 'cd' not in pv0:\n pv0['cd'] = []\n pv0['cd'].append(child)\n if 'sv_set' not in pv0:\n pv0['sv_set'] = set()\n pv0['sv_set'] |= child['sv_set']\n break\n else:\n root['cd'].append(child)\n root['sv_set'] |= child['sv_set']\n del child['sv_set']\n\n # update the count\n for pv0 in root['cd']:\n if 'sv_set' in pv0:\n pv0['c'] += len(pv0['sv_set'])\n del pv0['sv_set']\n root['c'] += len(root['sv_set'])\n del root['sv_set']\n statsvar_path = {}\n return traverseTree(root, [vertical_idx], statsvar_path)", "def build(_):", "def build():", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.cluster_layer = Clustering(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()+self.gamma*self.cluster_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def startElement(self, name, attrs): # creating the node along the path being tracked\n name = clean_node_name(name)\n p_attrs = process_attrs(attrs)\n\n if name == \"\":\n raise ValueError, \"XML Node name cannot be empty\"\n\n elif name == \"requirement\":\n self.obj_depth.append(requirement_q2class(p_attrs))\n\n elif name == \"prompt\":\n self.obj_depth.append(prompt_q2class(p_attrs))\n\n elif name == \"house\":\n self.obj_depth.append(house_q2class(p_attrs))\n\n elif name == \"visible\":\n self.obj_depth.append(visible_q2class(p_attrs))\n\n elif name == \"computer\":\n self.obj_depth.append(computer_q2class(p_attrs))\n\n elif name == \"special\":\n self.obj_depth.append(special_q2class(p_attrs))\n\n elif name == \"title\":\n self.obj_depth.append(title_q2class(p_attrs))\n\n elif name == \"memories\":\n self.obj_depth.append(memories_q2class(p_attrs))\n\n elif name == \"tip\":\n self.obj_depth.append(tip_q2class(p_attrs))\n\n elif name == \"score\":\n self.obj_depth.append(score_q2class(p_attrs))\n\n elif name == \"exit\":\n self.obj_depth.append(exit_q2class(p_attrs))\n\n elif name == \"inventory\":\n self.obj_depth.append(inventory_q2class(p_attrs))\n\n elif name == \"memory\":\n self.obj_depth.append(memory_q2class(p_attrs))\n\n elif name == \"prereq\":\n self.obj_depth.append(prereq_q2class(p_attrs))\n\n elif name == \"desc\":\n self.obj_depth.append(desc_q2class(p_attrs))\n\n elif name == \"room\":\n self.obj_depth.append(room_q2class(p_attrs))\n\n elif name == \"player\":\n self.obj_depth.append(player_q2class(p_attrs))\n\n elif name == \"l\":\n self.obj_depth.append(l_q2class(p_attrs))\n\n elif name == \"o\":\n self.obj_depth.append(o_q2class(p_attrs))\n\n elif name == \"item\":\n self.obj_depth.append(item_q2class(p_attrs))\n\n elif name == \"intro\":\n self.obj_depth.append(intro_q2class(p_attrs))\n\n elif name == \"t\":\n self.obj_depth.append(t_q2class(p_attrs))\n\n self.char_buffer = []\n self.last_processed = \"start\"", "def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n import time\n start_time = None\n end_time = None\n\n # Default duration for WiLab is 2 hours\n duration_default = 120\n for lease in leases:\n if 'end_time' in lease:\n end_time = lease['end_time']\n start_time = lease['start_time']\n break\n\n if start_time is None:\n # start_time = Now\n start_time = time.time()\n\n if end_time is None:\n end_time = int(start_time + duration_default*60)\n #raise Exception, \"end_time is mandatory in leases\"\n\n # duration in seconds from now till end_time\n duration = end_time - start_time\n # duration in minutes\n duration = duration / 60\n duration = int(duration)\n if duration < duration_default:\n duration = duration_default\n Log.tmp(\"start_time = \",start_time)\n Log.tmp(\"end_time = \",end_time)\n Log.tmp(\"duration = \",duration)\n # RSpec will have expires date = now + duration\n rspec = RSpec(version=rspec_version, ttl=duration, expires=end_time)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n i = 0\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n\n # The only change for WiLab compared to Generic SFAWrapParser\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],cm[1])\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource['client_id'] = \"PC\" + str(i)\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n i = i + 1\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n #sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n ##print \"sfa_leases\", sfa_leases\n #if sfa_leases:\n # # SFAWRAP BUG ???\n # # rspec.version.add_leases bugs with an empty set of leases\n # # slice_id = leases[0]['slice_id']\n # # TypeError: list indices must be integers, not str\n # rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n return rspec.toxml()", "def build(self):\n raise NotImplementedError", "def _internal_build(self):\n self.nodes = self.__tree.Nodes()\n self.edges = self.__tree.Edges()\n self.augmentedEdges = {}\n for key, val in self.__tree.AugmentedEdges().items():\n self.augmentedEdges[key] = list(val)\n self.root = self.__tree.Root()\n\n seen = set()\n self.branches = set()\n\n # Find all of the branching nodes in the tree, degree > 1\n # That is, they appear in more than one edge\n for e1, e2 in self.edges:\n if e1 not in seen:\n seen.add(e1)\n else:\n self.branches.add(e1)\n\n if e2 not in seen:\n seen.add(e2)\n else:\n self.branches.add(e2)\n\n # The nodes that are not branches are leaves\n self.leaves = set(self.nodes.keys()) - self.branches\n self.leaves.remove(self.root)", "def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def _build_octree(self):\n\n # cleanup old tree\n self._nodes_positions = []\n self._nodes_mass = []\n self._nodes_sizes = []\n self._nodes_children_types = []\n self._nodes_children_ids = []\n\n min_pos = np.min(self._positions)\n max_pos = np.max(self._positions)\n\n self._build_octree_branch(\n bodies=list(range(self.bodies)),\n coords_min=np.array([min_pos] * 3),\n coords_max=np.array([max_pos] * 3)\n )", "def __build_test_model_children_tree_1(self) -> Model:\n self.model_builder.clear()\n\n r_a = SystemFile(\"a\", 1024, True)\n r_aa = SystemFile(\"aa\", 512, False)\n r_a.add_child(r_aa)\n r_ab = SystemFile(\"ab\", 512, False)\n r_a.add_child(r_ab)\n r_b = SystemFile(\"b\", 3090, True)\n r_ba = SystemFile(\"ba\", 2048, True)\n r_b.add_child(r_ba)\n r_baa = SystemFile(\"baa\", 2048, False)\n r_ba.add_child(r_baa)\n r_bb = SystemFile(\"bb\", 42, True) # only in remote\n r_b.add_child(r_bb)\n r_bba = SystemFile(\"bba\", 42, False) # only in remote\n r_bb.add_child(r_bba)\n r_bd = SystemFile(\"bd\", 1000, False)\n r_b.add_child(r_bd)\n r_c = SystemFile(\"c\", 1234, False) # only in remote\n r_d = SystemFile(\"d\", 5678, True) # only in remote\n r_da = SystemFile(\"da\", 5678, False) # only in remote\n r_d.add_child(r_da)\n\n l_a = SystemFile(\"a\", 1024, True)\n l_aa = SystemFile(\"aa\", 512, False)\n l_a.add_child(l_aa)\n l_ab = SystemFile(\"ab\", 512, False)\n l_a.add_child(l_ab)\n l_b = SystemFile(\"b\", 1611, True)\n l_ba = SystemFile(\"ba\", 512, True)\n l_b.add_child(l_ba)\n l_baa = SystemFile(\"baa\", 512, False)\n l_ba.add_child(l_baa)\n l_bc = SystemFile(\"bc\", 99, True) # only in local\n l_b.add_child(l_bc)\n l_bca = SystemFile(\"bca\", 99, False) # only in local\n l_bc.add_child(l_bca)\n l_bd = SystemFile(\"bd\", 1000, False)\n l_b.add_child(l_bd)\n\n s_b = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.RUNNING, \"b\", \"\")\n s_b.total_transfer_state = LftpJobStatus.TransferState(1611, 3090, 52, 10, 1000)\n s_b.add_active_file_transfer_state(\"ba/baa\", LftpJobStatus.TransferState(512, 2048, 25, 5, 500))\n s_c = LftpJobStatus(0, LftpJobStatus.Type.PGET, LftpJobStatus.State.QUEUED, \"c\", \"\")\n s_d = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.QUEUED, \"d\", \"\")\n\n self.model_builder.set_remote_files([r_a, r_b, r_c, r_d])\n self.model_builder.set_local_files([l_a, l_b])\n self.model_builder.set_lftp_statuses([s_b, s_c, s_d])\n return self.model_builder.build_model()", "def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.regularizer_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up", "def _build_graph(self):\n pass", "def build (self):\n raise NotImplementedError", "def build(self, graph, name_scopes, training):\n raise NotImplementedError('Must be overridden by concrete subclass')", "def tree_construct(self, *args, **kwargs):\n l_files = []\n d_constructCallback = {}\n fn_constructCallback = None\n d_probe = {}\n l_range = []\n\n for k, v in kwargs.items():\n if k == 'l_files': l_files = v\n if k == 'constructCallback': fn_constructCallback = v\n if k == 'd_probe': d_probe = v\n\n if d_probe: l_files = d_probe['l_files']\n index = 0\n total = len(l_files)\n if int(self.verbosityLevel) and self.toConsole():\n l_range = tqdm(l_files, desc = ' Constructing tree')\n else:\n l_range = l_files\n for l_series in l_range:\n if len(l_series):\n str_path = os.path.dirname(l_series[0])\n l_series = [ os.path.basename(i) for i in l_series]\n # self.simpleProgress_show(index, total)\n self.d_inputTree[str_path] = l_series\n if fn_constructCallback:\n kwargs['path'] = str_path\n d_constructCallback = fn_constructCallback(l_series, **kwargs)\n self.d_inputTreeCallback[str_path] = d_constructCallback\n self.d_outputTree[str_path] = \"\"\n index += 1\n return {\n 'status': True,\n 'd_constructCallback': d_constructCallback,\n 'totalNumberOfAllSeries': index,\n 'd_probe': d_probe\n }", "def build(self, datas):\n\t\t# Browse the list of files\n\t\tfor data in datas:\n\t\t\tif isString(data):\n\t\t\t\tdata = Data(data)\n\t\t\telif isList(data):\n\t\t\t\tstate = None\n\t\t\t\tname = \"\"\n\t\t\t\tif len(data) >= 1:\n\t\t\t\t\tname = data[0]\n\t\t\t\tif len(data) >= 2:\n\t\t\t\t\tstate = data[1]\n\t\t\t\tdata = Data(name, state)\n\t\t\t# Cut the path of the file folder and piece\n\t\t\tself.addNode(self.tree,data.path(),data)", "def _build(self, **kwargs):", "def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.cluster_layer = Clustering(self.args)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.gamma*self.cluster_layer(self.factorization_layer)+self.regularizer_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def get_specification(self) -> Dict:\n specification = {\n 'version': VERSION,\n 'metadata': {\n 'twoColumn': True,\n 'layout': self.autolayout\n },\n 'nodes': []\n }\n\n def strip_io(io_list: list, direction) -> list:\n \"\"\"\n Strips every input/output from metadata and leaves only\n `name` and `type` keys.\n \"\"\"\n return [\n {\n 'name': io['name'],\n 'type': io['type'],\n 'direction': direction\n }\n for io in io_list\n ]\n\n toremove = set()\n for key, node in self.nodes.items():\n try:\n node_cls = load_class(node.cls_name)\n except (ModuleNotFoundError, ImportError, Exception) as err:\n msg = f'Could not add {node_cls}. Reason:'\n _LOGGER.warn('-' * len(msg))\n _LOGGER.warn(msg)\n _LOGGER.warn(err)\n _LOGGER.warn('-' * len(msg))\n toremove.add(key)\n continue\n parameterschema = node_cls.form_parameterschema()\n\n properties = []\n for name, props in parameterschema['properties'].items():\n new_property = {'name': name}\n\n if 'default' in props:\n new_property['default'] = props['default']\n\n if 'description' in props:\n new_property['description'] = props['description']\n\n def add_default(default_val):\n if new_property.get('default') is None:\n new_property['default'] = default_val\n\n # Case for an input with range defined\n if 'enum' in props:\n new_property['type'] = 'select'\n new_property['values'] = list(map(str, props['enum']))\n add_default(new_property['values'][0])\n # Case for a single value input\n elif 'type' in props:\n if 'array' in props['type']:\n new_property['type'] = 'list'\n if 'items' in props and 'type' in props['items']:\n dtype = props['items']['type']\n new_property['dtype'] = dtype\n add_default([])\n elif 'boolean' in props['type']:\n new_property['type'] = 'checkbox'\n add_default(False)\n elif 'string' in props['type']:\n new_property['type'] = 'text'\n add_default('')\n elif 'integer' in props['type']:\n new_property['type'] = 'integer'\n add_default(0)\n elif 'number' in props['type']:\n new_property['type'] = 'number'\n add_default(0)\n elif 'object' in props['type']:\n # Object arguments should be defined in specification\n # as node inputs, rather than properties\n new_property = None\n else:\n new_property['type'] = 'text'\n add_default('')\n # If no type is specified then text is used\n else:\n new_property['type'] = 'text'\n add_default('')\n\n if new_property is not None:\n properties.append(new_property)\n\n specification['nodes'].append({\n 'name': node.name,\n 'type': node.type,\n 'category': node.category,\n 'properties': properties,\n 'interfaces': strip_io(\n self.io_mapping[node.type]['inputs'],\n 'input'\n ) + strip_io(\n self.io_mapping[node.type]['outputs'],\n 'output'\n )\n })\n\n for key in toremove:\n del self.nodes[key]\n return specification", "def __init__(self, options, build_revision):\n\n self.options = options\n self._src_dir = os.path.abspath(options.src_dir)\n self._chrome_dir = os.path.join(self._src_dir, 'chrome')\n # TODO: This scode should not be grabbing so deeply into WebKit.\n # Worse, this code ends up looking at top-of-tree WebKit\n # instead of the revision in DEPS.\n self._webkit_dir = os.path.join(self._src_dir, 'third_party', 'WebKit',\n 'Source', 'WebCore')\n self._v8_dir = os.path.join(self._src_dir, 'v8')\n # TODO: need to get the build *output* directory passed in instead so Linux\n # and Mac don't have to walk up a directory to get to the right directory.\n if chromium_utils.IsWindows():\n self._build_dir = os.path.join(options.build_dir, options.target)\n self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'win')\n elif chromium_utils.IsLinux():\n self._build_dir = os.path.join(os.path.dirname(options.build_dir),\n 'out', options.target)\n self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'linux')\n elif chromium_utils.IsMac():\n self._build_dir = os.path.join(os.path.dirname(options.build_dir),\n 'xcodebuild', options.target)\n self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'mac')\n else:\n raise NotImplementedError(\n 'Platform \"%s\" is not currently supported.' % sys.platform)\n self._staging_dir = slave_utils.GetStagingDir(self._src_dir)\n\n self._symbol_dir_base = options.dirs['symbol_dir_base']\n self._www_dir_base = options.dirs['www_dir_base']\n self._build_name = slave_utils.SlaveBuildName(self._src_dir)\n self._symbol_dir_base = os.path.join(self._symbol_dir_base,\n self._build_name)\n self._www_dir_base = os.path.join(self._www_dir_base, self._build_name)\n\n self._version_file = os.path.join(self._chrome_dir, 'VERSION')\n\n if options.default_chromium_revision:\n self._chromium_revision = options.default_chromium_revision\n else:\n self._chromium_revision = slave_utils.SubversionRevision(self._chrome_dir)\n if options.default_webkit_revision:\n self._webkit_revision = options.default_webkit_revision\n else:\n self._webkit_revision = slave_utils.SubversionRevision(self._webkit_dir)\n if options.default_v8_revision:\n self._v8_revision = options.default_v8_revision\n else:\n self._v8_revision = slave_utils.SubversionRevision(self._v8_dir)\n self.last_change_file = os.path.join(self._staging_dir, 'LAST_CHANGE')\n # The REVISIONS file will record the revisions information of the main\n # components Chromium/WebKit/V8.\n self.revisions_path = os.path.join(self._staging_dir, 'REVISIONS')\n self._build_revision = build_revision\n # Will be initialized in GetLastBuildRevision.\n self.last_chromium_revision = None\n self.last_webkit_revision = None\n self.last_v8_revision = None\n\n self._files_file = os.path.join(self._tool_dir,\n archive_utils.FILES_FILENAME)\n self._test_files = self.BuildOldFilesList(TEST_FILE_NAME)\n\n self._dual_upload = options.factory_properties.get('dual_upload', False)\n self._archive_files = None", "def build(self):", "def build(self):", "def build(self):", "def build(self):\n level_dict = {'method': '',\n 'basis': None,\n 'auxiliary_basis': None,\n 'dispersion': None,\n 'cabs': None,\n 'method_type': None,\n 'software': None,\n 'software_version': None,\n 'compatible_ess': None,\n 'solvation_method': None,\n 'solvent': None,\n 'solvation_scheme_level': None,\n 'args': None}\n allowed_keys = list(level_dict.keys())\n\n if isinstance(self.repr, str):\n if ' ' in self.repr:\n # illegal inputs like 'dlpno-ccsd(t)/def2-svp def2-svp/c' or 'b3 lyp'\n raise ValueError(f'{self.repr} has empty spaces. Please use a dictionary format '\n f'to clearly specify method, basis, auxiliary basis, and dispersion in this case. '\n f'See documentation for more details.')\n if self.repr.count('/') >= 2:\n # illegal inputs like 'dlpno-ccsd(t)/def2-svp/def2-svp/c'\n raise ValueError(f'{self.repr} has multiple slashes. Please use a dictionary format '\n f'to specify method, basis, auxiliary basis, and dispersion in this case. '\n f'See documentation for more details.')\n if '/' not in self.repr:\n # e.g., 'AM1', 'XTB', 'CBS-QB3'\n # Note that this function is not designed to distinguish between composite and semi-empirical methods.\n level_dict['method'] = self.repr\n else:\n splits = self.repr.split('/')\n level_dict['method'] = splits[0]\n level_dict['basis'] = splits[1]\n\n elif isinstance(self.repr, dict):\n # also treats representations of LevelOfTheory.as_dict from a restart file\n if 'method' not in self.repr.keys():\n raise ValueError(f'The repr dictionary argument must at least have a \"method\" key, got:\\n{self.repr}')\n for key, value in self.repr.items():\n if key in allowed_keys and value:\n level_dict[key] = value\n elif key not in allowed_keys:\n raise ValueError(f'Got an illegal key \"{key}\" in level of theory dictionary representation'\n f'\\n{self.repr}')\n\n elif isinstance(self.repr, Level):\n level_dict = self.repr.as_dict()\n\n else:\n raise ValueError(f'The repr argument must be either a string, a dictionary or a Level type.\\n'\n f'Got {self.repr} which is a {type(self.repr)}.')\n\n self.repr = None # reset\n self.__init__(**level_dict)", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'key': 'str',\n 'display_name': 'str',\n 'description': 'str',\n 'glossary_key': 'str',\n 'parent_term_key': 'str',\n 'is_allowed_to_have_child_terms': 'bool',\n 'path': 'str',\n 'lifecycle_state': 'str',\n 'time_created': 'datetime',\n 'time_updated': 'datetime',\n 'created_by_id': 'str',\n 'updated_by_id': 'str',\n 'owner': 'str',\n 'workflow_status': 'str',\n 'uri': 'str',\n 'associated_object_count': 'int',\n 'associated_objects': 'list[TermAssociatedObject]'\n }\n\n self.attribute_map = {\n 'key': 'key',\n 'display_name': 'displayName',\n 'description': 'description',\n 'glossary_key': 'glossaryKey',\n 'parent_term_key': 'parentTermKey',\n 'is_allowed_to_have_child_terms': 'isAllowedToHaveChildTerms',\n 'path': 'path',\n 'lifecycle_state': 'lifecycleState',\n 'time_created': 'timeCreated',\n 'time_updated': 'timeUpdated',\n 'created_by_id': 'createdById',\n 'updated_by_id': 'updatedById',\n 'owner': 'owner',\n 'workflow_status': 'workflowStatus',\n 'uri': 'uri',\n 'associated_object_count': 'associatedObjectCount',\n 'associated_objects': 'associatedObjects'\n }\n\n self._key = None\n self._display_name = None\n self._description = None\n self._glossary_key = None\n self._parent_term_key = None\n self._is_allowed_to_have_child_terms = None\n self._path = None\n self._lifecycle_state = None\n self._time_created = None\n self._time_updated = None\n self._created_by_id = None\n self._updated_by_id = None\n self._owner = None\n self._workflow_status = None\n self._uri = None\n self._associated_object_count = None\n self._associated_objects = None", "def makeTree(self):\n return makeTree(self.events,self.outTree)", "def _build_dependency_graph(self):\n\n #\n # Find the binary roles\n #\n nodes, roles = self._find_roles()\n\n #\n # Build the graph\n #\n working_list = list(set(nodes.keys()))\n\n setters = [b for b, r in roles.items() if Role.SETTER in r or Role.SETTER_GETTER in r]\n\n while working_list:\n b = working_list[0]\n working_list = working_list[1:]\n\n if nodes[b] not in self._graph:\n self._graph[nodes[b]] = []\n\n # it's a root node\n if Role.GETTER not in roles[b] and Role.SETTER_GETTER not in roles[b]:\n nodes[b].set_root()\n\n # takes params from some other binary\n else:\n is_orphan = True\n for setter in setters:\n setter_strings_set = set(nodes[setter].role_strings)\n node_strings_set = set(nodes[b].role_strings)\n if setter_strings_set.intersection(node_strings_set):\n if nodes[setter] not in self._graph:\n self._graph[nodes[setter]] = []\n self._graph[nodes[setter]].append(nodes[b])\n is_orphan = False\n\n # mark orphans\n if is_orphan:\n nodes[b].set_orphan()\n\n # Clean up\n for k, childs in self._graph.iteritems():\n self._graph[k] = list(set(childs))\n\n # set leaves\n for k, c in self._graph.iteritems():\n if not c:\n k.set_leaf()\n\n # post processing:\n # remove those nodes that are not orphans\n # and are not network parsers\n\n nodes = self.nodes\n children = [c for x in self._graph.values() for c in x if x]\n leafs_non_orphan = [n for n in nodes if n.leaf and not n.orphan]\n seed_names = [x.split('/')[-1] for x in self._seed_bins]\n spurious_nodes = [n for n in leafs_non_orphan if n not in children and n.bin.split('/')[-1] not in seed_names]\n for to_rem in spurious_nodes:\n del self._graph[to_rem]", "def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.cluster_layer = Clustering(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.gamma*self.cluster_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def __init__( # pylint: disable=too-many-statements\n self,\n step: Optional[Step], # pylint: disable=redefined-outer-name\n goal: Optional[str],\n **kwargs: Any,\n ) -> None:\n #: The parent invocation, if any.\n self.parent: Optional[Invocation] = Invocation.current\n\n #: The step being invoked.\n self.step = step\n\n #: The goal being built.\n self.goal = goal\n\n #: The arguments to the invocation.\n self.kwargs = kwargs\n\n #: The full name (including parameters) of the invocation.\n self.name = \"make\"\n if self.step is not None:\n self.name = self.step.name\n args_string = _dict_to_str(kwargs)\n if args_string:\n self.name += \"/\"\n self.name += args_string\n\n assert (self.parent is None) == (step is None)\n\n #: How many sub-invocations were created so far.\n self.sub_count = 0\n\n if self.parent is None:\n #: A short unique stack to identify invocations in the log.\n self.stack: str = \"#0\"\n else:\n self.parent.sub_count += 1\n if self.parent.stack == \"#0\":\n self.stack = \"#\" + str(self.parent.sub_count)\n else:\n self.stack = self.parent.stack + \".\" + str(self.parent.sub_count)\n\n #: The full name used for logging.\n self.log = self.name\n global jobs # pylint: disable=invalid-name\n if _is_test or jobs.value > 1:\n self.log = self.stack + \" - \" + self.name\n\n self._verify_no_loop()\n\n #: A condition variable to wait on for this invocation.\n self.condition: Optional[asyncio.Condition] = None\n\n #: The required input targets (phony or files) the invocations depends on.\n self.required: List[str] = []\n\n #: The required locked names (and whether to lock them for read or write).\n self.required_locks: Dict[str, bool] = {}\n\n #: Whether the required locks have been obtained.\n self.has_locks: bool = False\n\n #: The newest input file, if any.\n self.newest_input_path: Optional[str] = None\n\n #: The modification time of the newest input file, if any.\n self.newest_input_mtime_ns = 0\n\n #: The queued async actions for creating the input files.\n self.async_actions: List[Coroutine] = []\n\n #: The expanded outputs for access by the step function.\n self.expanded_outputs: List[str] = []\n\n #: The output files that existed prior to the invocation.\n self.initial_outputs: List[str] = []\n\n #: The phony outputs, if any.\n self.phony_outputs: List[str] = []\n\n #: The built outputs, if any.\n self.built_outputs: List[str] = []\n\n #: A pattern for some missing output file(s), if any.\n self.missing_output: Optional[str] = None\n\n #: A path for some missing old built output file, if any.\n self.abandoned_output: Optional[str] = None\n\n #: The oldest existing output file path, or None if some output files are missing.\n self.oldest_output_path: Optional[str] = None\n\n #: The modification time of the oldest existing output path.\n self.oldest_output_mtime_ns = 0\n\n #: The reason to abort this invocation, if any.\n self.exception: Optional[StepException] = None\n\n #: The old persistent actions (from the disk) for ensuring rebuild when actions change.\n self.old_persistent_actions: List[PersistentAction] = []\n\n #: The old list of outputs (from the disk) for ensuring complete dynamic outputs.\n self.old_persistent_outputs: List[str] = []\n\n #: The new persistent actions (from the code) for ensuring rebuild when actions change.\n self.new_persistent_actions: List[PersistentAction] = []\n\n #: Whether we already decided to run actions.\n self.must_run_action = False\n\n #: Whether we actually skipped all actions so far.\n self.did_skip_actions = False\n\n #: Whether we actually run any actions.\n self.did_run_actions = False\n\n #: Whether we should remove stale outputs before running the next action.\n global remove_stale_outputs # pylint: disable=invalid-name\n self.should_remove_stale_outputs = remove_stale_outputs.value", "def build(self):\n pass", "def build(self):\n pass", "def build_docs():\n docs = []\n for base_id in range(DOCUMENTS_PER_LEVEL):\n d = jina_pb2.Document()\n d.granularity = 0\n d.adjacency = 0\n d.id = base_id\n docs.append(d)\n iterate_build(d, 0, 2, 0, 2)\n return docs", "def build_graph(self):\n pass", "def build_document(self):\n pass", "def __init__(self, source_root, build_root, reporter):\n self._source_root = os.path.abspath(source_root)\n self._build_root = os.path.abspath(build_root)\n self._reporter = reporter\n self._docset = None\n self._docmap = dict()\n self._dirs = dict()\n self._files = dict()\n self._modules = dict()\n self._classes = set()\n self._namespaces = set()\n self._members = set()\n self._walk_dir(os.path.join(self._source_root, 'src'))\n for fileobj in self.get_files():\n if fileobj and fileobj.is_source_file() and not fileobj.is_external():\n (basedir, name) = os.path.split(fileobj.get_abspath())\n (basename, ext) = os.path.splitext(name)\n header = self.get_file(os.path.join(basedir, basename + '.h'))\n if not header and ext == '.cu':\n header = self.get_file(os.path.join(basedir, basename + '.cuh'))\n if not header and fileobj.is_test_file():\n basedir = os.path.dirname(basedir)\n header = self.get_file(os.path.join(basedir, basename + '.h'))\n if not header:\n # Somewhat of a hack; currently, the tests for\n # analysisdata/modules/ and trajectoryanalysis/modules/\n # is at the top-level tests directory.\n # TODO: It could be clearer to split the tests so that\n # there would be a separate modules/tests/.\n header = self.get_file(os.path.join(basedir, 'modules', basename + '.h'))\n if not header and basename.endswith('_tests'):\n header = self.get_file(os.path.join(basedir, basename[:-6] + '.h'))\n if header:\n fileobj.set_main_header(header)\n rootdir = self._get_dir(os.path.join('src', 'gromacs'))\n for subdir in rootdir.get_subdirectories():\n self._create_module(subdir)\n rootdir = self._get_dir(os.path.join('src', 'testutils'))\n self._create_module(rootdir)", "def _build(self):\n raise NotImplementedError()", "def build(self, new_sorted_scope: SortedSet, new_sorted_scope_vids: SortedSet, new_table: np.array):\n f = Factor()\n f.scope_vars = new_sorted_scope\n # f.scope_vids = SortedSet(reduce_tuples(f.scope_vars))\n f.scope_vids = new_sorted_scope_vids\n f.table = new_table\n f.type = self.type # this is a string 'P', 'U'\n return f", "def _build(self, # pylint: disable=arguments-differ\n features, labels, params=None, config=None):\n # Pre-process features and labels\n features, labels = self._preprocess(features, labels)\n results = self._call_graph_fn(features=features, labels=labels)\n\n loss = None\n train_op = None\n eval_metrics = None\n if Modes.is_infer(self.mode):\n predictions = self._build_predictions(results=results, features=features, labels=labels)\n extra_ops = self._build_extra_ops(results=results, features=features, labels=labels)\n else:\n _, loss = self._build_loss(results, features, labels)\n eval_metrics = self._build_eval_metrics(results, features, labels)\n\n if Modes.is_train(self.mode):\n train_op = self._build_train_op(loss)\n self._build_summary_op(results=results, features=features, labels=labels)\n\n predictions = self._build_predictions(results=results, features=features, labels=labels)\n extra_ops = self._build_extra_ops(results=results, features=features, labels=labels)\n\n track(predictions, tf.GraphKeys.PREDICTIONS)\n\n return EstimatorSpec(mode=self.mode,\n predictions=predictions,\n loss=loss,\n extra_ops=extra_ops,\n train_op=train_op,\n eval_metric_ops=eval_metrics)", "def _build_pod_spec(self):\n logger.debug(\"Building Pod Spec\")\n crds = []\n try:\n crds = [\n yaml.load(Path(f).read_text())\n for f in [\n \"files/configs.config.gatekeeper.sh.yaml\",\n \"files/constrainttemplates.templates.gatekeeper.sh.yaml\",\n \"files/constraintpodstatuses.status.gatekeeper.sh.yaml\",\n \"files/constrainttemplatepodstatuses.status.gatekeeper.sh.yaml\",\n ]\n ]\n except yaml.YAMLError as exc:\n logger.error(\"Error in configuration file:\", exc)\n\n crd_objects = [\n CustomResourceDefintion(crd[\"metadata\"][\"name\"], crd[\"spec\"])\n for crd in crds\n ]\n\n config = self.model.config\n spec_template = {}\n with open(\"files/pod-spec.yaml.jinja2\") as fh:\n spec_template = Template(fh.read())\n\n try:\n image_details = self.image.fetch()\n except OCIImageResourceError as e:\n self.model.unit.status = e.status\n return\n\n template_args = {\n \"crds\": crd_objects,\n \"image_details\": image_details,\n \"imagePullPolicy\": config[\"imagePullPolicy\"],\n \"app_name\": self.app.name,\n \"audit_cli_args\": self._audit_cli_args(),\n \"namespace\": os.environ[\"JUJU_MODEL_NAME\"],\n }\n\n spec = yaml.load(spec_template.render(**template_args))\n\n print(f\"Pod spec: {spec}\")\n return spec", "def build(self):\n if not hasattr(self, 'subtitle'):\n self.subtitle = self.data_code['subtitle']\n #print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (\n #self.ntimes, self.nelements, self.ntotal, self.subtitle))\n if self.is_built:\n return\n nnodes = 1\n\n #self.names = []\n #self.nelements //= nnodes\n self.nelements //= self.ntimes\n self.ntotal = self.nelements * nnodes * 2\n #self.ntotal\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n self._times = np.zeros(self.ntimes, 'float32')\n #self.ntotal = self.nelements * nnodes\n\n self.element_node = np.zeros((self.ntotal, 2), 'int32')\n\n # the number is messed up because of the offset for the element's properties\n if not self.nelements * nnodes * 2 == self.ntotal:\n msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (\n self.ntimes, self.nelements, nnodes, self.nelements * nnodes,\n self.ntotal)\n raise RuntimeError(msg)\n\n # [angle, sc, sd, se, sf]\n self.data = np.zeros((self.ntimes, self.ntotal, 5), 'complex64')", "def init(self):\n if not self.needs_build:\n return self.tree\n if self.new_values and self.tree:\n ixs = self.new_values\n search_tree = self.tree\n else:\n search_tree = {}\n ixs = range(len(self.vos))\n for ix in ixs:\n vo = self.vos[ix]\n for label, label_value in vo.items():\n if label == \"value\":\n continue\n if label not in search_tree:\n search_tree[label] = defaultdict(set)\n search_tree[label][label_value].add(ix)\n self.tree = search_tree\n self.needs_build = False\n self.new_values = None\n return self.tree", "def __init__(self, \r\n initial_concept_uri=None, \r\n lang=None, \r\n broader=True, \r\n narrower=False, \r\n verbose=False,\r\n refresh=False):\r\n def get_cached_skos_option_dict():\r\n '''\r\n Helper function to retrieve cached skos_option_dict\r\n '''\r\n cached_skos_option_dict_path = os.path.join(self.cache_dir, 'skos_options.yaml')\r\n try:\r\n cached_skos_option_dict_file = open(cached_skos_option_dict_path, 'r')\r\n cached_skos_option_dict = yaml.load(cached_skos_option_dict_file)\r\n cached_skos_option_dict_file.close()\r\n except:\r\n cached_skos_option_dict = {}\r\n \r\n return cached_skos_option_dict\r\n \r\n # Start of constructor\r\n assert narrower or broader, 'Need at least one of \"broader\" or \"narrower\" set to True in order to build concept trees'\r\n \r\n self.fcache_dir = os.path.join(tempfile.gettempdir(), 'concept_hierarchy')\r\n \r\n self.lang = lang or 'en'\r\n self.narrower = narrower\r\n self.broader = broader\r\n self.verbose = verbose\r\n \r\n self.skos_option_dict = {'altLabels': True, \r\n 'narrower': narrower, \r\n 'broader': broader,\r\n 'lang': lang\r\n } \r\n \r\n # Force refresh if SKOS options have changed\r\n self.refresh = refresh or (self.skos_option_dict != get_cached_skos_option_dict()) \r\n\r\n self.concept_fetcher = ConceptFetcher(self.skos_option_dict)\r\n \r\n self.concept_registry = {}\r\n \r\n if self.refresh:\r\n if self.verbose:\r\n print 'Refreshing disk cache'\r\n else:\r\n self.load() \r\n \r\n if initial_concept_uri:\r\n self.get_concept_from_uri(initial_concept_uri) # Build tree around initial URI if specified\r", "def build(c):", "def build_graph(self):\n raise NotImplementedError", "def build_step(self):\n\n pass", "def build_graph(self):\n self.import_tree(ZOO_PATH, self.import_zoo, self.verify_zoos)\n self.import_tree(WILD_PATH, self.import_wild, self.verify_wilds)\n self.import_tree(PANDA_PATH, self.import_redpanda, self.verify_pandas)\n self.import_tree(MEDIA_PATH, self.import_media, self.verify_media)", "def build_tree(self):\n active = self.get_active()\n family = self.dbstate.db.get_family_from_handle(active)\n self.goto_handle(handle=family)", "def build(self):\n if not hasattr(self, 'subtitle'):\n self.subtitle = self.data_code['subtitle']\n #print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (\n #self.ntimes, self.nelements, self.ntotal, self.subtitle))\n nnodes = 1\n\n #self.names = []\n #self.nelements //= nnodes\n self.nelements //= self.ntimes\n #self.ntotal\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))\n\n self.ntotal = self.nelements * nnodes * 2\n if self.is_sort1:\n ntimes = self.ntimes\n ntotal = self.ntotal\n else:\n #print(\"ntimes=%s nelements=%s ntotal=%s nnodes=%s\" % (self.ntimes, self.nelements, self.ntotal, nnodes))\n ntimes = self.ntotal\n ntotal = self.nelements // 2\n #self.ntotal = ntotal\n #print(\"**BEND: ntimes=%s ntotal=%s\" % (ntimes, ntotal))\n #self.ntotal = nelements * nnodes * 2\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self._times = np.zeros(ntimes, dtype=dtype)\n #self.ntotal = self.nelements * nnodes\n\n self.element_node = np.zeros((ntotal, 2), dtype=idtype)\n\n # the number is messed up because of the offset for the element's properties\n if not self.nelements * nnodes * 2 == self.ntotal:\n msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (\n self.ntimes, self.nelements, nnodes, self.nelements * nnodes,\n self.ntotal)\n raise RuntimeError(msg)\n\n # [angle, sc, sd, se, sf, omax, omin, mst, msc]\n self.data = np.zeros((ntimes, ntotal, 9), dtype=fdtype)", "def _build_impl_impl(self, input):", "def build(req):\n\n errors = []\n v = {}\n data = {\"tab\": {}, \"expanded\": {}}\n tdir = \"/tmp/\" + f.get_tmp_file_name()\n\n index = get_html()\n\n if \"tab_zip\" not in req.files:\n return {\"errors\": [\"No tab file\"]}\n if \"expanded_zip\" not in req.files:\n return {\"errors\": [\"No expanded file\"]}\n\n ## tab\n data[\"tab\"][\"ext\"] = f.get_ext(req.files[\"tab_zip\"].filename)\n if data[\"tab\"][\"ext\"] == \"zip\":\n os.mkdir(tdir)\n if not f.extract_zip(req.files[\"tab_zip\"], tdir + \"/tab\"):\n return {\"errors\": [\"Wrong tab zip file\"]}\n\n file_name = \"index2.html\"\n try:\n os.rename(tdir + \"/tab/index.html\", tdir + \"/tab/\" + file_name)\n except os.FileNotFoundError:\n return {\"errors\": [\"No index.html in tab zip\"]}\n elif not data[\"tab\"][\"ext\"]:\n return {\"errors\": [\"No tab file\"]}\n else:\n f.save_file(req.files[\"tab_zip\"], tdir + \"/tab.\" + data[\"tab\"][\"ext\"])\n\n ## expanded\n data[\"expanded\"][\"ext\"] = f.get_ext(req.files[\"expanded_zip\"].filename)\n if data[\"expanded\"][\"ext\"] == \"zip\":\n if not f.extract_zip(req.files[\"expanded_zip\"], tdir + \"/expanded\"):\n return {\"errors\": [\"Wrong expanded zip file\"]}\n\n file_name = \"index2.html\"\n try:\n os.rename(tdir + \"/expanded/index.html\", tdir + \"/expanded/\" + file_name)\n except os.FileNotFoundError:\n return {\"errors\": [\"No index.html in expanded zip\"]}\n elif not data[\"expanded\"][\"ext\"]:\n return {\"errors\": [\"No expanded file\"]}\n else:\n f.save_file(req.files[\"expanded_zip\"], tdir + \"/expanded.\" + data[\"expanded\"][\"ext\"])\n\n v[\"expandMS\"] = str(f.get_int_param(\"expand_seconds\") * 1000)\n\n v[\"width\"] = f.strtoken(f.get_param(\"size\"), 1, \"x\")\n v[\"height\"] = f.strtoken(f.get_param(\"size\"), 2, \"x\")\n\n v[\"backgroundColor\"] = f.get_param(\"background_color\")\n\n v[\"clicktag_layer_select\"] = \"true\" if f.get_param(\"clicktag_layer\") else \"false\"\n\n v[\"tabURL\"] = \"\"\n v[\"tabImage\"] = \"\"\n\n if data[\"tab\"][\"ext\"] == \"zip\":\n v[\"tabUrl\"] = \"tab/index2.html\"\n else:\n v[\"tabImage\"] = \"tab.\"+data[\"tab\"][\"ext\"]\n\n v[\"expandedURL\"] = \"\"\n v[\"expandedImage\"] = \"\"\n if data[\"expanded\"][\"ext\"] == \"zip\":\n v[\"expandedURL\"] = \"expanded/index2.html\"\n else:\n v[\"expandedImage\"] = \"expanded.\"+data[\"expanded\"][\"ext\"]\n\n return {\"errors\": errors, \"dir\": tdir, \"index\": index, \"vars\": v}", "def build_serializer(self):\n self._add_child_elements_recursive(self.get_root_element())", "def __init__(self):\n self.drones = ZergUnit(UnitTypeId.DRONE, to_count=0)\n self.lings = ZergUnit(UnitTypeId.ZERGLING, to_count=999)\n self.queens = ZergUnit(UnitTypeId.QUEEN, to_count=3)\n self.roaches = ZergUnit(UnitTypeId.ROACH, to_count=100, priority=True)\n self.ravagers = ZergUnit(UnitTypeId.RAVAGER, to_count=0)\n self.defense_spines = DefensiveBuilding(\n unit_type=UnitTypeId.SPINECRAWLER, position_type=DefensePosition.Entrance, to_base_index=1, to_count=3\n )\n self.gas = StepBuildGas(to_count=3)\n\n unit_building = BuildOrder(\n [\n Step(None, self.drones, skip_until=self.should_build_drones),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.defense_spines),\n Step(\n RequiredAll([UnitExists(UnitTypeId.ROACHWARREN), UnitExists(UnitTypeId.ROACH)]),\n self.ravagers,\n skip_until=self.should_build_ravagers,\n ),\n Step(UnitExists(UnitTypeId.ROACHWARREN), self.roaches),\n Step(\n RequiredAll(\n [\n UnitExists(UnitTypeId.SPAWNINGPOOL),\n UnitExists(\n UnitTypeId.ROACHWARREN,\n include_pending=True,\n include_not_ready=True,\n include_killed=True,\n ),\n ]\n ),\n self.lings,\n ),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.queens),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.lings),\n ]\n )\n\n buildings: BuildOrder = BuildOrder(\n [\n Step(None, ActBuilding(UnitTypeId.SPAWNINGPOOL, to_count=1)),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), ActBuilding(UnitTypeId.ROACHWARREN, to_count=1)),\n Step(None, self.gas, skip_until=self.should_build_gas),\n ]\n )\n\n super().__init__(buildings, unit_building)", "def __init__(self, annotated_text):\n self.tokens = ['ROOT'] # initially has only root element\n self.spans = [None]\n self.heads = [None] # root has no head element\n self.labels = [None] # root has no head element => no label\n\n span_to_index = {} # maps token spans to indexes\n root_indexes = [] # to store indexes of root elements\n\n # get token spans and values from the Texterra-annotated document\n for i, an in enumerate(annotated_text['annotations']['syntax-relation']):\n span = (an['start'], an['end'])\n self.spans.append(span)\n span_to_index[span] = i + 1\n self.tokens.append(annotated_text['text'][an['start']: an['end']])\n\n # iterate over the document again to set heads and labels\n for i, an in enumerate(annotated_text['annotations']['syntax-relation']):\n if 'parent' in an['value']:\n self.heads.append(span_to_index[(an['value']['parent']['start'], an['value']['parent']['end'])])\n self.labels.append(an['value']['type'])\n else:\n self.heads.append(0)\n self.labels.append('ROOT')\n root_indexes.append(i + 1)\n\n # stores dependency structure of the sentence in dict, with\n # root elements as key and their child elements as value.\n # child elements that have their own children are stored as dicts\n # where they serve as key and their children as value.\n self.tree = {}\n self._visited = [] # stores elements visited during tree's building process\n self.to_string = ''\n\n # iterate over root elements and build their subtrees\n for root_index in root_indexes:\n # get the root's span\n root_span = self.spans[root_index]\n\n # indicate the root as visited\n self._visited.append(root_index)\n\n # build the roots subtree\n sub_tree, sub_tree_string = self._build_tree(root_index)\n sub_tree_key = (root_span[0], root_span[1], self.tokens[root_index], 'ROOT')\n self.tree[sub_tree_key] = sub_tree\n\n # attach the subtrees string to the sentence's parse string\n if len(root_indexes) > 0 and not sub_tree_string.startswith('('):\n format_string = '({0}) '\n else:\n format_string = '{0} '\n self.to_string += format_string.format(sub_tree_string)", "def build(self):\n if self.is_built:\n return\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self.node_element = np.zeros((self.ntotal, 2), dtype=idtype)\n #oxx, oyy, txy, angle, major, minor, ovm\n self.data = np.zeros((self.ntimes, self.nelements, 8), dtype=fdtype)\n self.location = np.empty(self.ntotal, dtype='U8')\n\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def _build(self):\n xml = ET.parse(self.fn)\n root = xml.getroot()\n\n metadata = None\n trk = None\n self.prefix = root.tag[:-3]\n metadata = root.find(self._get_tag('metadata'))\n # print(metadata.find(self._get_tag('time')))\n trk = root.find(self._get_tag('trk'))\n\n trkseg = trk.find(self._get_tag('trkseg'))\n\n # I just wanted to flatten the track point and get the\n # fields that I am actually interested in.\n def walker(node):\n nonlocal data\n tags = {'lat': float,\n 'lon': float,\n 'ele': float,\n 'time': cvt_time,\n 'temp': float,\n 'hr': float}\n for tag in tags:\n if node.tag.find(tag) >= 0:\n data[tag] = tags[tag](node.text)\n for child in node:\n walker(child)\n\n for trkpt in trkseg.findall(self._get_tag('trkpt')):\n data = {}\n data['lat'] = trkpt.attrib['lat']\n data['lon'] = trkpt.attrib['lon']\n walker(trkpt)\n self.points.append(TrackPoint(**data))", "def build(self, **kwargs):\n if not self.built:\n with tf.name_scope(self.name):\n self._build(**kwargs)\n self.built = True\n return self", "def treeparser(self,spot):\n\n\t\tspot_sub = self.spots[spot]\n\t\trootdir = spot_sub['rootdir']\n\t\t#---start with all files under rootdir\n\t\tfns = [os.path.join(dirpath,fn) \n\t\t\tfor (dirpath, dirnames, filenames) \n\t\t\tin os.walk(rootdir,followlinks=True) for fn in filenames]\n\t\t#---regex combinator is the only place where we enforce a naming convention via top,step,part\n\t\t#---note that we may wish to generalize this depending upon whether it is wise to have three parts\n\t\tregex = ('^%s\\/'%re.escape(rootdir.rstrip('/'))+\n\t\t\t'\\/'.join([spot_sub['top'],spot_sub['step'],spot_sub['part']])\n\t\t\t+'$')\n\t\tmatches_raw = [i.groups() for fn in fns for i in [re.search(regex,fn)] if i]\n\t\tif not matches_raw: \n\t\t\tstatus('no matches found for spot: \"%s,%s\"'%spot,tag='warning')\n\t\t\treturn\n\t\t#---first we organize the top,step,part into tuples which serve as keys\n\t\t#---we organize the toc as a doubly-nested dictionary of trajectory parts\n\t\t#---the top two levels of the toc correspond to the top and step signifiers\n\t\t#---note that this procedure projects the top,step,part naming convention into the toc\n\t\tmatches = [self.spots[spot]['divy_keys'](i) for i in matches_raw]\n\t\tself.toc[spot] = collections.OrderedDict()\n\t\t#---sort the tops into an ordered dictionary\n\t\tfor top in sorted(set(zip(*matches)[0])): \n\t\t\tself.toc[spot][top] = collections.OrderedDict()\n\t\t#---collect unique steps for each top and load them with the parts\n\t\tfor top in self.toc[spot]:\n\t\t\t#---sort the steps into an ordered dictionary\n\t\t\tfor step in sorted(set([i[1] for i in matches if i[0]==top])):\n\t\t\t\t#---we sort the parts into an ordered dictionary\n\t\t\t\t#---this is the leaf of the toc tree and we use dictionaries\n\t\t\t\tparts = sorted([i[2] for i in matches if i[0]==top and i[1]==step])\n\t\t\t\tself.toc[spot][top][step] = collections.OrderedDict([(part,{}) for part in parts])\n\t\t#---now the toc is prepared with filenames but subsequent parsings will identify EDR files", "def build_order(self):\n seen = set()\n\n def _already_built(node):\n # visit only once\n if node in seen:\n return True\n seen.add(node)\n\n # prune if the result is already computed\n if node.output_ready:\n return True\n\n return False\n\n for target in self.targets:\n if target in seen:\n continue\n for node in target.postorder(prune_fn=_already_built):\n yield node", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def __init__(self, args, graph):\n self.args = args\n self.graph = graph\n if self.args.walker == \"first\":\n self.walker = RandomWalker(self.graph,\n self.args.num_of_walks,\n self.args.random_walk_length)\n\n self.degrees, self.walks = self.walker.do_walks()\n else:\n self.walker = SecondOrderRandomWalker(self.graph, False, self.args.P, self.args.Q)\n self.walker.preprocess_transition_probs()\n self.walks, self.degrees = self.walker.simulate_walks(self.args.num_of_walks,\n self.args.random_walk_length)\n self.nodes = [node for node in self.graph.nodes()]\n del self.walker\n self.vocab_size = len(self.degrees)\n self.true_step_size = self.args.num_of_walks*self.vocab_size\n self.build()", "def build_tree(self, w):\n w_abs = np.abs(w)\n if sum(w_abs) != 1.:\n w_abs = w_abs / sum(w_abs)\n self.w = w_abs\n self.tree = np.zeros(w.shape)\n self._build_node(w_abs, 1)\n self.w_apx = extract_distribution(self.tree)\n\n n_levels = np.ceil(np.log2(len(w)))\n self.lfsr = []\n for n in range(int(n_levels)):\n seed = np.random.randint(1, int(2**(self.lfsr_nbits-n)-1))\n self.lfsr.append(LFSR(self.lfsr_nbits-n, seed))", "def build(self):\n if not self.built:\n with tf.name_scope(self.name):\n self._build()\n self.built = True\n return self", "def _build_tree(self, index):\n\n children = []\n to_string = '({0}/{1}'.format(self.tokens[index], self.labels[index])\n\n for i in range(1, len(self.tokens)):\n\n if i not in self._visited and self.heads[i] == index:\n self._visited.append(i)\n child_tree = {}\n c, s = self._build_tree(i)\n child_tree[(self.spans[i][0], self.spans[i][1], self.tokens[i], self.labels[i])] = c\n children.append(child_tree)\n to_string += ' {0}'.format(s)\n\n if len(children) > 0:\n to_string += ')'\n return children, to_string\n else:\n return children, to_string[1:]", "def _build_tree_nodes(self):\n parent_node = self._create_node('Parent node', href='#', count=2)\n child1 = self._create_node('Child#1', href='#', count=1)\n self._add_node(parent_node, child1)\n child2 = self._create_node('Child#2', href='#', count=1)\n self._add_node(parent_node, child2)\n return [parent_node]", "def build_filesystem(program: list[list[str]]) -> Node:\n root = Node('/')\n current_node = root\n for line in program:\n if line[0] == '$':\n if line[1] == 'cd':\n param = line[2]\n if param == '/':\n # cd to filesystem root, '/'\n current_node = root\n elif param == '..':\n # go up one folder\n if current_node != root:\n current_node = current_node.parent\n else:\n # go into a child folder\n current_node = current_node.children[param]\n elif line[1] == 'ls':\n # No action required for the ls command\n continue\n elif line[0] == 'dir':\n # 'line' lists a child dir of the current folder\n child_name = line[1]\n if child_name not in current_node.children:\n child = Node(child_name, current_node)\n current_node.children[child_name] = child\n else:\n # 'line' lists a file in the current folder\n file_size = int(line[0])\n file_name = line[1]\n if file_name not in current_node.content:\n current_node.content[file_name] = file_size\n return root", "def __init__(self, root, branches=None):\n self.tree_dict = {}\n self.directory = Path(root)\n self.start = str(self.directory).rfind(os.sep) + 1\n self.branches = branches\n self.get()", "def _build_impl(self, input):\n self.out = self._build_impl_impl(input)\n return self.out # output", "def build_tree(elem, level = 1024, remove_root = 0):\n if level <= 0:\n return None\n level -= 1\n\n lista = elem.objectValues()\n node = {}\n children = []\n\n for i in lista:\n result = (build_tree(i, level))\n if result:\n children.append(result)\n\n if remove_root:\n return children\n else:\n node[\"title\"] = get_id(elem)\n node[\"children\"] = []\n\n if len(lista):\n node[\"key\"] = get_id(elem)\n node[\"isFolder\"] = True\n\n if not len(node[\"children\"]):\n node[\"isLazy\"] = True\n\n node[\"children\"] = children\n\n return node", "def test_list_namespaced_build(self):\n pass", "def build(cls, args, env):\n webnav_env = env._env\n return cls(args.beam_size, env, args.path_length,\n webnav_env.embedding_dim, gamma=args.gamma,\n keep_prob=args.rnn_keep_prob,\n sarsa=args.algorithm == \"sarsa\")", "def __init__(self, walk_length):\n self.feature_spec = {\n self._SOURCE_ID: tf.io.FixedLenFeature([], dtype=tf.int64),\n self._DESTINATION_ID: tf.io.FixedLenFeature([], dtype=tf.int64),\n self._FEATURES: tf.io.FixedLenFeature([walk_length], dtype=tf.float32),\n }", "def __init__(self, spec):\n self.spec = spec", "def build(self):\n self.build_inputs()\n self.build_image_embeddings()\n self.build_seq_embeddings()\n self.build_encoder()\n self.build_prediction_model()\n self.setup_encoder_initializer()\n self.setup_global_step()\n self.list_trainable_variables()", "def build(self):\n raise NotImplementedError(\"This is an interface method. Implement it in subclass.\")", "def build_toc(self) -> None:\n logger.info(__('writing toc.ncx file...'))\n\n if self.config.epub_tocscope == 'default':\n doctree = self.env.get_and_resolve_doctree(self.config.root_doc,\n self, prune_toctrees=False,\n includehidden=False)\n refnodes = self.get_refnodes(doctree, [])\n self.toc_add_files(refnodes)\n else:\n # 'includehidden'\n refnodes = self.refnodes\n self.check_refnodes(refnodes)\n navpoints = self.build_navpoints(refnodes)\n level = max(item['level'] for item in self.refnodes)\n level = min(level, self.config.epub_tocdepth)\n copy_asset_file(path.join(self.template_dir, 'toc.ncx_t'), self.outdir,\n self.toc_metadata(level, navpoints))", "def test_create_namespaced_build(self):\n pass", "def _build_components(self):\n\n # Build dataframes.\n dfs = {}\n for df_name, df_repr in self._repr.dataframes.items():\n cols = {}\n for i in range(len(df_repr.columns)):\n col = df_repr.columns[i]\n cols[col.name] = {}\n cols[col.name]['index'] = i\n cols[col.name]['type'] = col.vtype\n dfs[df_name] = DataFrame(df_name, cols)\n self._dataframes = dfs\n\n # Build operator contexts.\n self._op_ctxs = {}\n for _, op_repr in self._repr.operators.items():\n op_ctx = OperatorContext(op_repr, self.dataframes)\n\n if op_ctx.name == '_start_op':\n op_ctx.add_task_start_handler(self.graph_start_handlers)\n elif op_ctx.name == '_end_op':\n op_ctx.add_task_finish_handler(self.graph_finish_handlers)\n else:\n op_ctx.add_task_start_handler(self.task_start_handlers)\n op_ctx.add_task_ready_handler(self.task_ready_handlers)\n op_ctx.add_task_finish_handler(self.task_finish_handlers)\n\n self._op_ctxs[op_ctx.name] = op_ctx", "def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()", "def _rebuild(self, *args, **kwargs):\n handle = self._args.copy() # Original constructor arguments\n argnames = [i for i in self._traversable if i not in kwargs]\n handle.update(OrderedDict([(k, v) for k, v in zip(argnames, args)]))\n handle.update(kwargs)\n return type(self)(**handle)", "def _buildindex( self ):\n try:\n import ROOT as rt\n except:\n print \"Could not load ROOT\"\n sys.exit(-1)\n \n # sigh. this is a mess\n self.producers = [] # all producer names found in ROOT files\n self.datatypes = [] # all data types\n self.flavors = [] # flavor = hash of string listing set of trees found in a given file\n self.flavor_def = {} # map from flavor to list of tree names\n self.rawdigits_entrymap = {} # only used if file type is raw digits. maps rse to (position,wfms) in data tree\n self.rawdigits_tpcindex = {}\n flavor_eventset = {}\n eventsets = []\n events_to_files = {}\n events_to_flavors = {}\n\n # this loop is going into each file in our list and\n # - taking the list of trees in the file and making a has out of their names\n # - this hash is used to define the 'flavor' of the file\n # - we also make a list of events in the tree, labeling each entry with (run,subrun,event) ID\n # - we keep track of such list of entries and group files (and flavors) with the same event list\n # - determine filetype: LArCV or LArLite\n self.filetype = None\n for f in self.larlitefilelist:\n r = rt.TFile(f)\n nfkeys = r.GetListOfKeys().GetEntries()\n\n # now here we parse the type of objects in the ROOT file\n # we are looking to determine three file types supported by pylard\n # (1) larlite (2) larcv (3) rawdigitreader\n trees = []\n for i in range(nfkeys):\n keyname = r.GetListOfKeys().At(i).GetName()\n if keyname==\"larlite_id_tree\":\n found_id_tree = True\n elif \"_tree\" in keyname:\n producer = keyname.split(\"_\")[1]\n dtype = keyname.split(\"_\")[0]\n if producer not in self.producers:\n self.producers.append( producer )\n if dtype not in self.datatypes:\n self.datatypes.append( dtype )\n elif \"rawdigitwriter\" in keyname:\n trees.append( \"rawdigitwriter/RawDigits\" )\n trees.append( \"rawdigitwriter/OpDetWaveforms\" )\n trees.append( \"rawdigitwriter/IndexRawDigits\" )\n trees.append( \"rawdigitwriter/IndexOpDetWfms\" )\n if keyname not in trees:\n trees.append(keyname)\n hashstr = \"\"\n trees.sort()\n for keyname in trees:\n hashstr += keyname +\";\"\n\n # determine filetype from type of keys we see\n is_supported_rootfile = False\n idtreename = None\n if \"larlite_id_tree\" in trees:\n thisfiletype = \"LARLITE\"\n is_supported_rootfile = True\n if \"image2d\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"partroi\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"rawdigitwriter/OpDetWaveforms\" in trees:\n thisfiletype = \"RAWDIGITS\"\n is_supported_rootfile = True\n if not is_supported_rootfile:\n continue\n\n if self.filetype is not None and self.filetype!=thisfiletype:\n print \"Error in parsing filelist: Cannot mix filetypes (LArCV/LArLite/RawDigitTree)\"\n return\n elif self.filetype is None:\n self.filetype = thisfiletype\n \n # now we determine the idtree to use\n if self.filetype==\"LARLITE\":\n idtreename = \"larlite_id_tree\"\n elif self.filetype==\"LARCV\":\n if self.loaded_larcv == False:\n s = time.time()\n import larcv as larcv\n print \"LOADING LARCV: \",time.time()-s,\"secs\"\n self.loaded_larcv = True\n for treename in trees:\n if \"image2d\" in treename:\n if idtreename is None:\n idtreename = treename\n else:\n pass # we only use this if we have to\n if \"partroi\" in treename:\n idtreename = treename # we prefer to use this tree for speed\n break\n elif self.filetype==\"RAWDIGITS\":\n idtreename = \"rawdigitwriter/IndexOpDetWfms\"\n\n if idtreename is None:\n print \"Error: Could not setup a proper ID tree for this file\"\n continue\n\n # now we parse the tree contents. define a flavor for it based on all the trees\n # we also get the (run,subrun,event) id for the event\n m = hashlib.md5()\n m.update(hashstr)\n flavor = m.digest()\n if flavor not in self.flavors:\n self.flavors.append( flavor )\n flavor_eventset[flavor] = []\n self.flavor_def[flavor] = hashstr\n if self.filetype==\"LARLITE\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"LARCV\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"RAWDIGITS\":\n idtree = r.Get(idtreename)\n \n eventset = [] # list of events\n for n in range(idtree.GetEntries()):\n idtree.GetEntry(n)\n if self.filetype==\"LARLITE\":\n rse = ( idtree._run_id, idtree._subrun_id, idtree._event_id )\n elif self.filetype==\"LARCV\":\n idbranchname = idtreename.replace(\"_tree\",\"_branch\")\n idbranch = None\n exec(\"idbranch=idtree.%s\"%(idbranchname))\n rse = ( idbranch.run(), idbranch.subrun(), idbranch.event() )\n elif self.filetype==\"RAWDIGITS\":\n rse = ( idtree.idx_run, idtree.idx_subrun, idtree.idx_event )\n self.rawdigits_entrymap[rse] = (idtree.entrystart, idtree.nentries )\n eventset.append(rse)\n if rse not in flavor_eventset[flavor]:\n flavor_eventset[flavor].append( rse )\n else:\n raise ValueError( \"found a repeated run/subrun/event index (%s). what?\"%( str(rse) ) )\n if self.filetype==\"RAWDIGITS\":\n # rawdigits has another tree index for the TPC\n tpcindex = r.Get(\"rawdigitwriter/IndexRawDigits\")\n for n in range(tpcindex.GetEntries()):\n tpcindex.GetEntry(n)\n rse = ( tpcindex.idx_run, tpcindex.idx_subrun, tpcindex.idx_event )\n self.rawdigits_tpcindex[rse] = (tpcindex.entrystart, tpcindex.nentries)\n \n eventset = tuple(eventset)\n if eventset not in events_to_files:\n events_to_files[eventset] = {}\n events_to_flavors[eventset] = []\n eventsets.append( eventset )\n events_to_files[eventset][flavor] = f\n events_to_flavors[eventset].append( flavor )\n del idtree\n r.Close()\n self.parsed = True\n\n # now we take our collection of event lists and\n # - sort the event lists\n # - make lists of files with the same set of events in the order of the sorted event list\n # - for each list we also make a dictionary between (run,subrun,event) index to the entry number\n # - we pick the list with the biggest number of events as the \"official\" file list\n eventsets.sort()\n flavorfiles = {}\n flavorsets = []\n\n flavorset_rse_dict = {}\n flavorset_entry_dict = {}\n for eventset in eventsets:\n events_to_flavors[eventset].sort() # sort the flavors with this event-set\n flavorset = tuple( events_to_flavors[eventset] )\n if flavorset not in flavorfiles:\n flavorfiles[flavorset] = []\n flavorsets.append(flavorset)\n flavorset_rse_dict[flavorset] = {}\n flavorset_entry_dict[flavorset] = {}\n for flavor in flavorset:\n flavorfiles[flavorset].append( events_to_files[eventset][flavor] )\n for rse in eventset:\n ientry = len( flavorset_rse_dict[flavorset] )\n flavorset_rse_dict[flavorset][rse] = ientry\n flavorset_entry_dict[flavorset][ientry] = rse\n\n # look for largest fileset\n maxset = None\n nfiles = 0\n for fset in flavorsets:\n n = len(flavorfiles[fset])\n if n>nfiles:\n nfiles = n\n maxset = fset\n # these are the final file list and event dictionary we want\n self.sorted_filelist = flavorfiles[maxset]\n self.rse_dict = flavorset_rse_dict[maxset]\n self.entry_dict = flavorset_entry_dict[maxset]\n\n # for rawdigits, we also build the entry to data map\n if self.filetype==\"RAWDIGITS\":\n treepos = 0\n treepos_tpc = 0\n for entry in range(len(self.entry_dict)):\n rse = self.entry_dict[entry] \n # update OPDET tree\n pos_entries = self.rawdigits_entrymap[rse] # pos is from start of file, nentries is for the event block\n merged_pos_entries = ( treepos, pos_entries[1] )\n treepos += pos_entries[1]\n self.rawdigits_entrymap[rse] = merged_pos_entries # update \n # update TPC tree\n pos_entries = self.rawdigits_tpcindex[rse]\n merged_pos_entries = ( treepos_tpc, pos_entries[1] )\n treepos_tpc += pos_entries[1]\n self.rawdigits_tpcindex[rse] = merged_pos_entries # update", "def build(self):\n if not self.built:\n logger.debug(f\"Building {self.name}...\")\n with tf.name_scope(self.name):\n self._build()\n self.built = True\n return self" ]
[ "0.6779343", "0.64210516", "0.57740533", "0.55809647", "0.5465794", "0.5431072", "0.5333568", "0.52883077", "0.5245698", "0.52180934", "0.5119556", "0.5117688", "0.5117688", "0.5085158", "0.504372", "0.5026567", "0.5005321", "0.4998705", "0.4986079", "0.49831903", "0.49652678", "0.49404657", "0.49230203", "0.49208888", "0.49124372", "0.49109963", "0.4906557", "0.4897252", "0.4896741", "0.48877546", "0.48778883", "0.48729622", "0.4860842", "0.4852873", "0.482914", "0.48186666", "0.4815133", "0.4813574", "0.48073763", "0.4803206", "0.4803206", "0.4803206", "0.47835416", "0.47826397", "0.47560254", "0.47559503", "0.47494298", "0.4709474", "0.47019732", "0.47019732", "0.46942997", "0.4679078", "0.46790177", "0.46631387", "0.4651181", "0.46480808", "0.46456894", "0.46367452", "0.46320143", "0.46285507", "0.46180278", "0.46145642", "0.46094984", "0.45922583", "0.45862436", "0.45849445", "0.45823497", "0.45759875", "0.4569598", "0.45664346", "0.45624197", "0.4546748", "0.4539141", "0.4535941", "0.45358318", "0.45308936", "0.45243046", "0.45239314", "0.45197156", "0.4518491", "0.45176315", "0.45150888", "0.45049784", "0.45046303", "0.4499899", "0.44994342", "0.44951162", "0.44942465", "0.44889015", "0.448676", "0.44862476", "0.448531", "0.44826335", "0.4480247", "0.44799373", "0.44760013", "0.44697443", "0.4464536", "0.4463082", "0.44620222" ]
0.70819336
0
Builds the Recursive Traversal Spec to traverse the object managed object hierarchy.
def build_recursive_traversal_spec(client_factory): visit_folders_select_spec = build_selection_spec(client_factory, "visitFolders") # For getting to hostFolder from datacenter dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter", "hostFolder", False, [visit_folders_select_spec]) # For getting to vmFolder from datacenter dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter", "vmFolder", False, [visit_folders_select_spec]) # For getting Host System to virtual machine h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem", "vm", False, [visit_folders_select_spec]) # For getting to Host System from Compute Resource cr_to_h = build_traversal_spec(client_factory, "cr_to_h", "ComputeResource", "host", False, []) # For getting to datastore from Compute Resource cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds", "ComputeResource", "datastore", False, []) rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp") rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm") # For getting to resource pool from Compute Resource cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp", "ComputeResource", "resourcePool", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # For getting to child res pool from the parent res pool rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool", "resourcePool", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # For getting to Virtual Machine from the Resource Pool rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool", "vm", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # Get the assorted traversal spec which takes care of the objects to # be searched for from the root folder traversal_spec = build_traversal_spec(client_factory, "visitFolders", "Folder", "childEntity", False, [visit_folders_select_spec, dc_to_hf, dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp, rp_to_rp, h_to_vm, rp_to_vm]) return traversal_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def HierarchyIterator(obj):\n while obj:\n yield obj\n for opChild in SplineInputGeneratorHelper.HierarchyIterator(obj.GetDown()):\n yield opChild\n obj = obj.GetNext()", "def recurse(self):\n url = self._api + '?recursive=1'\n json = self._json(self._get(url), 200)\n return Tree(json, self._session) if json else None", "def _build_tree(self, root, obj):\n\n if obj is None:\n return\n\n for attr_name in obj.__class__.__ordered__:\n if attr_name.startswith('_'):\n continue\n\n attr = getattr(obj.__class__, attr_name)\n\n if isinstance(attr, XmlElementProperty):\n element = root.add_child(attr.name)\n self._build_tree(element, getattr(obj, attr_name))\n elif isinstance(attr, XmlAttributeProperty):\n value = getattr(obj, attr_name)\n if value is not None:\n root.add_attribute(attr.name, value)", "def build_tree(self, prefix, depth):\n for count, function in [[self.n_files, self.make_file],\n [self.n_children, self.make_child_recurse],\n [self.n_symlinks, self.make_symlink]]:\n for i in range(count):\n if not self.can_continue():\n return\n name = os.path.join(prefix, self.name_gen.next())\n function(name, depth)", "def _get_object_subtree(self):\n raise NotImplementedError", "def __build_test_model_children_tree_1(self) -> Model:\n self.model_builder.clear()\n\n r_a = SystemFile(\"a\", 1024, True)\n r_aa = SystemFile(\"aa\", 512, False)\n r_a.add_child(r_aa)\n r_ab = SystemFile(\"ab\", 512, False)\n r_a.add_child(r_ab)\n r_b = SystemFile(\"b\", 3090, True)\n r_ba = SystemFile(\"ba\", 2048, True)\n r_b.add_child(r_ba)\n r_baa = SystemFile(\"baa\", 2048, False)\n r_ba.add_child(r_baa)\n r_bb = SystemFile(\"bb\", 42, True) # only in remote\n r_b.add_child(r_bb)\n r_bba = SystemFile(\"bba\", 42, False) # only in remote\n r_bb.add_child(r_bba)\n r_bd = SystemFile(\"bd\", 1000, False)\n r_b.add_child(r_bd)\n r_c = SystemFile(\"c\", 1234, False) # only in remote\n r_d = SystemFile(\"d\", 5678, True) # only in remote\n r_da = SystemFile(\"da\", 5678, False) # only in remote\n r_d.add_child(r_da)\n\n l_a = SystemFile(\"a\", 1024, True)\n l_aa = SystemFile(\"aa\", 512, False)\n l_a.add_child(l_aa)\n l_ab = SystemFile(\"ab\", 512, False)\n l_a.add_child(l_ab)\n l_b = SystemFile(\"b\", 1611, True)\n l_ba = SystemFile(\"ba\", 512, True)\n l_b.add_child(l_ba)\n l_baa = SystemFile(\"baa\", 512, False)\n l_ba.add_child(l_baa)\n l_bc = SystemFile(\"bc\", 99, True) # only in local\n l_b.add_child(l_bc)\n l_bca = SystemFile(\"bca\", 99, False) # only in local\n l_bc.add_child(l_bca)\n l_bd = SystemFile(\"bd\", 1000, False)\n l_b.add_child(l_bd)\n\n s_b = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.RUNNING, \"b\", \"\")\n s_b.total_transfer_state = LftpJobStatus.TransferState(1611, 3090, 52, 10, 1000)\n s_b.add_active_file_transfer_state(\"ba/baa\", LftpJobStatus.TransferState(512, 2048, 25, 5, 500))\n s_c = LftpJobStatus(0, LftpJobStatus.Type.PGET, LftpJobStatus.State.QUEUED, \"c\", \"\")\n s_d = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.QUEUED, \"d\", \"\")\n\n self.model_builder.set_remote_files([r_a, r_b, r_c, r_d])\n self.model_builder.set_local_files([l_a, l_b])\n self.model_builder.set_lftp_statuses([s_b, s_c, s_d])\n return self.model_builder.build_model()", "def work_tree(obj, **kwargs):\n max_depth = 0\n exclusions = kwargs.get('exclusions', {\"groups\": [], \"classes\": [], \"params\": []})\n groups_done = {}\n classes = {\"depths\": {}, \"content\": {}}\n params = {\"depths\": {}, \"content\": {}}\n if hasattr(obj, 'hostname') and not hasattr(obj, 'name'):\n obj.name = obj.hostname\n to_index = [(obj, 1)]\n\n while to_index:\n (obj, depth) = to_index.pop()\n if obj.name in groups_done and groups_done[obj.name] <= depth:\n continue\n\n objclasses = obj.classes.exclude(classname__in=exclusions['classes'])\n updated_classes = update_values(objclasses, \"classname\", \"classparams\", depth=depth, results=classes)\n\n objparams = obj.parameters.exclude(paramkey__in=exclusions['params'])\n updated_params = update_values(objparams, \"paramkey\", \"paramvalue\", depth=depth, results=params)\n\n if not updated_classes or not updated_params:\n return (\"Fail\", \"Fail\")\n\n groups_done[obj.name] = depth\n depth += 1\n for group in obj.groups.exclude(name__in=exclusions['groups']):\n to_index.append((group, depth))\n if max_depth < depth:\n max_depth = depth\n\n params[\"content\"]['max_depth'] = max_depth\n params[\"content\"]['done_count'] = len(groups_done)\n return (classes[\"content\"], params[\"content\"])", "def _fetchObjectChildren(self, obj, obj_path):\n obj_children = []\n path_strings = []\n tree_items = []\n\n is_attr_list = [False] * len(obj_children)\n\n # Object attributes\n # Needed to handle errors while getting object's attributes\n # Related with spyder-ide/spyder#6728 and spyder-ide/spyder#9959\n for attr_name in dir(obj):\n try:\n attr_value = getattr(obj, attr_name)\n obj_children.append((attr_name, attr_value))\n path_strings.append('{}.{}'.format(obj_path, attr_name)\n if obj_path else attr_name)\n is_attr_list.append(True)\n except Exception:\n # Attribute could not be get\n pass\n assert len(obj_children) == len(path_strings), \"sanity check\"\n\n for item, path_str, is_attr in zip(obj_children, path_strings,\n is_attr_list):\n name, child_obj = item\n tree_items.append(TreeItem(child_obj, name, path_str, is_attr))\n\n return tree_items", "def _traverse_tree(self):\n if not self.children:\n yield self\n for child in self.children:\n yield from child._traverse_tree()", "def make_drs_tree(self):\n pass", "def work_tree2(obj, **kwargs):\n if 'exclusions' in kwargs:\n exclusions = kwargs['exclusions']\n else:\n exclusions = Exclusions([], [], [])\n #groups_done = {}\n classes = NodeResults(nodetype='classes')\n params = NodeResults(nodetype='params')\n if hasattr(obj, 'hostname') and not hasattr(obj, 'name'):\n obj.name = obj.hostname\n to_index = [(obj, 1)]\n\n # loop opts\n index_pop = to_index.pop\n index_extend = to_index.extend\n egroups, eclasses, eparams = exclusions\n add_classes = classes.add_entries\n add_params = params.add_entries\n\n while to_index:\n (obj, depth) = index_pop()\n #objname = obj.name\n #if objname in groups_done and groups_done[objname] <= depth:\n #continue\n try:\n objclasses = obj.classes.exclude(classname__in=eclasses)\n add_classes(objclasses, \"classname\", \"classparams\", depth)\n objparams = obj.parameters.exclude(paramkey__in=eparams)\n add_params(objparams, \"paramkey\", \"paramvalue\", depth)\n except RuntimeError, e:\n return (\"Fail\", \"Fail\") # or just let it bubble up to the caller\n\n #groups_done[objname] = depth\n depth += 1\n children = [(group, depth) for group in obj.groups.exclude(name__in=egroups)]\n index_extend(children)\n\n return classes.as_dict(), params.as_dict() # or (classes.entries, params.entries)", "def asciitree(obj,depth=0,wide=2,last=[],recursed=False):\n\tcorner = u'\\u251C'\n\tcorner_end = u'\\u2514'\n\thorizo,horizo_bold = u'\\u2500',u'\\u2501'\n\tvertic,vertic_bold = u'\\u2502',u'\\u2503'\n\ttl,tr,bl,br = u'\\u250F',u'\\u2513',u'\\u2517',u'\\u251B'\n\tspacer_both = dict([(k,{\n\t\t0:'\\n',1:(' '*(wide+1)*(depth-1)+c+horizo*wide),\n\t\t2:' '*(wide+1)*(depth-1)}[depth] if depth <= 1 \n\t\telse (''.join([(vertic if d not in last else ' ')+\n\t\t' '*wide for d in range(1,depth)]))+c+horizo*wide) \n\t\tfor (k,c) in [('mid',corner),('end',corner_end)]])\n\tspacer = spacer_both['mid']\n\tif type(obj) in [float,int,bool]+str_types_list:\n\t\tif depth == 0: print(spacer+str(obj)+'\\n'+horizo*len(obj))\n\t\telse: print(spacer+str(obj))\n\telif isinstance(obj,dict) and all([type(i) in [str,float,int,bool] for i in obj.values()]) and depth==0:\n\t\tasciitree({'HASH':obj},depth=1,recursed=True)\n\telif type(obj) in [list,tuple]:\n\t\tfor ind,item in enumerate(obj):\n\t\t\tspacer_this = spacer_both['end'] if ind==len(obj)-1 else spacer\n\t\t\tif type(item) in [float,int,bool]+str_types_list: print(spacer_this+str(item))\n\t\t\telif item != {}:\n\t\t\t\tprint(spacer_this+'('+str(ind)+')')\n\t\t\t\tasciitree(item,depth=depth+1,\n\t\t\t\t\tlast=last+([depth] if ind==len(obj)-1 else []),\n\t\t\t\t\trecursed=True)\n\t\t\telse: print('unhandled tree object %s'%item)\n\telif isinstance(obj,dict) and obj != {}:\n\t\tfor ind,key in enumerate(obj.keys()):\n\t\t\tspacer_this = spacer_both['end'] if ind==len(obj)-1 else spacer\n\t\t\tif type(obj[key]) in [float,int,bool]+str_types_list: print(spacer_this+str(key)+' = '+str(obj[key]))\n\t\t\t# special: print single-item lists of strings on the same line as the key\n\t\t\telif type(obj[key])==list and len(obj[key])==1 and type(obj[key][0]) in [str,float,int,bool]:\n\t\t\t\tprint(spacer_this+key+' = '+str(obj[key]))\n\t\t\t# special: skip lists if blank dictionaries\n\t\t\telif type(obj[key])==list and all([i=={} for i in obj[key]]):\n\t\t\t\tprint(spacer_this+key+' = (empty)')\n\t\t\telif obj[key] != {}:\n\t\t\t\t# fancy border for top level\n\t\t\t\tif depth == 0:\n\t\t\t\t\tprint('\\n'+tl+horizo_bold*(len(key)+0)+\n\t\t\t\t\t\ttr+spacer_this+vertic_bold+str(key)+vertic_bold+'\\n'+\\\n\t\t\t\t\t\tbl+horizo_bold*len(key)+br+'\\n'+vertic)\n\t\t\t\telif obj[key]==None: print(spacer_this+key+' = None')\n\t\t\t\telse: print(spacer_this+key)\n\t\t\t\tif obj[key]!=None: \n\t\t\t\t\tasciitree(obj[key],depth=depth+1,\n\t\t\t\t\t\tlast=last+([depth] if ind==len(obj)-1 else []),\n\t\t\t\t\t\trecursed=True)\n\t\t\telif type(obj[key])==list and obj[key]==[]:\n\t\t\t\tprint(spacer_this+'(empty)')\n\t\t\telif obj[key]=={}: print(spacer_this+'%s = {}'%key)\n\t\t\telse: print('unhandled tree object %s'%key)\n\telse: print('unhandled tree object %s'%obj)\n\tif not recursed: print('\\n')", "async def test_async_browse_children() -> None:\n # pylint: disable=too-many-statements\n requester = UpnpTestRequester(RESPONSE_MAP)\n factory = UpnpFactory(requester)\n device = await factory.async_create_device(\"http://dlna_dms:1234/device.xml\")\n notify_server = UpnpTestNotifyServer(\n requester=requester,\n source=(\"192.168.1.2\", 8090),\n )\n event_handler = notify_server.event_handler\n profile = DmsDevice(device, event_handler=event_handler)\n\n # Object 0 is the root and must always exist\n requester.response_map[(\"POST\", \"http://dlna_dms:1234/upnp/control/ContentDir\")] = (\n 200,\n {},\n read_file(\"dlna/dms/action_Browse_children_0.xml\"),\n )\n result = await profile.async_browse_direct_children(\"0\")\n assert result.number_returned == 4\n assert result.total_matches == 4\n assert result.update_id == 2333\n children = result.result\n assert len(children) == 4\n assert children[0].title == \"Browse Folders\"\n assert children[0].id == \"64\"\n assert children[0].child_count == \"4\"\n assert children[1].title == \"Music\"\n assert children[1].id == \"1\"\n assert children[1].child_count == \"7\"\n assert children[2].title == \"Pictures\"\n assert children[2].id == \"3\"\n assert children[2].child_count == \"5\"\n assert children[3].title == \"Video\"\n assert children[3].id == \"2\"\n assert children[3].child_count == \"3\"\n\n # Object 2 will give some different results\n requester.response_map[(\"POST\", \"http://dlna_dms:1234/upnp/control/ContentDir\")] = (\n 200,\n {},\n read_file(\"dlna/dms/action_Browse_children_2.xml\"),\n )\n result = await profile.async_browse_direct_children(\"2\")\n assert result.number_returned == 3\n assert result.total_matches == 3\n assert result.update_id == 2333\n children = result.result\n assert len(children) == 3\n assert children[0].title == \"All Video\"\n assert children[0].id == \"2$8\"\n assert children[0].child_count == \"583\"\n assert children[1].title == \"Folders\"\n assert children[1].id == \"2$15\"\n assert children[1].child_count == \"2\"\n assert children[2].title == \"Recently Added\"\n assert children[2].id == \"2$FF0\"\n assert children[2].child_count == \"50\"\n\n # Object that is an item and not a container\n requester.response_map[(\"POST\", \"http://dlna_dms:1234/upnp/control/ContentDir\")] = (\n 200,\n {},\n read_file(\"dlna/dms/action_Browse_children_item.xml\"),\n )\n result = await profile.async_browse_direct_children(\"1$6$35$1$1\")\n assert result.number_returned == 0\n assert result.total_matches == 0\n assert result.update_id == 2333\n assert result.result == []\n\n # Bad object ID should result in a UpnpError (HTTP 701: No such object)\n requester.exceptions.append(UpnpResponseError(status=701))\n with pytest.raises(UpnpResponseError) as err:\n await profile.async_browse_direct_children(\"no object\")\n\n assert err.value.status == 701", "def test_help_on_objects(hlwm, path='', depth=8):\n help_txt = hlwm.call(['help', path]).stdout\n assert f\"Object '{path}'\" in help_txt\n\n if depth < 0:\n return\n\n for child in hlwm.list_children(path):\n newpath = (path + '.' + child).lstrip('.')\n test_help_on_objects(hlwm, path=newpath, depth=depth - 1)", "def __json__(self, **kwargs):\n return self.nestify(instance=self, **kwargs).tree", "def show_tree(obj,d=0):\n print \"%s%s\" % (\"-\"*d,obj.__class__.__name__)\n if 'get_children' in dir(obj):\n for a in obj.get_children(): show_tree(a,d+1)", "def make_recursive(obj):\n if isinstance(obj, list):\n for i, l in enumerate(obj):\n obj[i] = AttrDict.make_recursive(l)\n elif isinstance(obj, dict):\n for k, v in obj.items():\n obj[k] = AttrDict.make_recursive(v)\n return AttrDict(obj)\n return obj", "def populateTree(self, obj, obj_name='', inspected_node_is_visible=None):\n logger.debug(\"populateTree with object id = 0x{:x}\".format(id(obj)))\n if inspected_node_is_visible is None:\n inspected_node_is_visible = (obj_name != '')\n self._inspected_node_is_visible = inspected_node_is_visible\n\n if self._inspected_node_is_visible:\n self._root_item = TreeItem(None, _('<invisible_root>'),\n _('<invisible_root>'), None)\n self._root_item.children_fetched = True\n self._inspected_item = TreeItem(obj, obj_name,\n obj_name, is_attribute=None)\n self._root_item.append_child(self._inspected_item)\n else:\n # The root itself will be invisible\n self._root_item = TreeItem(obj, obj_name,\n obj_name, is_attribute=None)\n self._inspected_item = self._root_item\n\n # Fetch all items of the root so we can\n # select the first row in the constructor.\n root_index = self.index(0, 0)\n self.fetchMore(root_index)", "def object_specs(self):\n if self._object_specs is None:\n self.object_specs = self.generate_object_specs()\n \n return self._object_specs", "def print_recursive(self, indents):\n\n\t\tind = \"\\t\"\n\t\toutput = indents * ind + self.name\n\t\tprint(output)\n\t\tfor i in self.children:\n\t\t\ti.print_recursive(indents+1)", "def _recurse_children(self, offset):\n while offset < self.obj_offset + self.Length:\n item = obj.Object(\"VerStruct\", offset = offset, vm = self.obj_vm, parent = self)\n if item.Length < 1 or item.get_key() == None:\n raise StopIteration(\"Could not recover a key for a child at offset {0}\".format(item.obj_offset))\n yield item.get_key(), item.get_children()\n offset = self.offset_pad(offset + item.Length)\n raise StopIteration(\"No children\")", "def traverse(object, path, default=None, request=None):", "def getAllLinks(jsonData, propDict, refDict, prefix='', context=''):\n linkList = OrderedDict()\n # check keys in propertyDictionary\n # if it is a Nav property, check that it exists\n # if it is not a Nav Collection, add it to list\n # otherwise, add everything IN Nav collection\n # if it is a Complex property, check that it exists\n # if it is, recurse on collection or individual item\n for key in propDict:\n item = getType(key).split(':')[-1]\n if propDict[key]['isNav']:\n insideItem = jsonData.get(item)\n if insideItem is not None:\n cType = propDict[key].get('isCollection') \n autoExpand = propDict[key].get('OData.AutoExpand',None) is not None or\\\n propDict[key].get('OData.AutoExpand'.lower(),None) is not None\n if cType is not None:\n cSchema = refDict.get(getNamespace(cType),(None,None))[1]\n if cSchema is None:\n cSchema = context \n for cnt, listItem in enumerate(insideItem):\n linkList[prefix+str(item)+'.'+getType(propDict[key]['isCollection']) +\n '#' + str(cnt)] = (listItem.get('@odata.id'), autoExpand, cType, cSchema, listItem)\n else:\n cType = propDict[key]['attrs'].get('type')\n cSchema = refDict.get(getNamespace(cType),(None,None))[1]\n if cSchema is None:\n cSchema = context \n linkList[prefix+str(item)+'.'+getType(propDict[key]['attrs']['name'])] = (\\\n insideItem.get('@odata.id'), autoExpand, cType, cSchema, insideItem)\n for key in propDict:\n item = getType(key).split(':')[-1]\n if propDict[key]['realtype'] == 'complex':\n if jsonData.get(item) is not None:\n if propDict[key].get('isCollection') is not None:\n for listItem in jsonData[item]:\n linkList.update(getAllLinks(\n listItem, propDict[key]['typeprops'], refDict, prefix+item+'.', context))\n else:\n linkList.update(getAllLinks(\n jsonData[item], propDict[key]['typeprops'], refDict, prefix+item+'.', context))\n rsvLogger.debug(str(linkList))\n return linkList", "def dump_iteration_tree(obj):\n def _dump_iteration_tree(obj, f, tablevel):\n if is_instance(obj, Driver):\n f.write(' ' * tablevel)\n f.write(obj.get_pathname())\n f.write('\\n')\n for comp in obj.workflow:\n if is_instance(comp, Driver) or is_instance(comp, Assembly):\n _dump_iteration_tree(comp, f, tablevel + 3)\n else:\n f.write(' ' * (tablevel + 3))\n f.write(comp.get_pathname())\n f.write('\\n')\n elif is_instance(obj, Assembly):\n f.write(' ' * tablevel)\n f.write(obj.get_pathname())\n f.write('\\n')\n _dump_iteration_tree(obj.driver, f, tablevel + 3)\n f = cStringIO.StringIO()\n _dump_iteration_tree(obj, f, 0)\n return f.getvalue()", "def build_traversal_spec(client_factory, name, spec_type, path, skip,\r\n select_set):\r\n traversal_spec = client_factory.create('ns0:TraversalSpec')\r\n traversal_spec.name = name\r\n traversal_spec.type = spec_type\r\n traversal_spec.path = path\r\n traversal_spec.skip = skip\r\n traversal_spec.selectSet = select_set\r\n return traversal_spec", "def __init__(self, obj, datamodel=None):\n with RecursiveConverter.in_progress:\n self.obj = obj\n self.class_name = obj.__class__.__name__\n self.datamodel = datamodel\n self.is_root = datamodel is None\n if self.is_root:\n RecursiveConverter.converted_modules = {}\n RecursiveConverter.typedefs = []\n self.datamodel = VHDLModule('-', obj)\n\n # recursively convert all child modules\n self.childs = []\n\n def conv(self, node):\n if isinstance(node, VHDLList):\n if node.elements_compatible_typed:\n if isinstance(node.elems[0], VHDLModule):\n if self.is_compatible_with_converted_module(node.elems[0]):\n return\n self.childs.append(RecursiveConverter(node.elems[0].current, node.elems[0]))\n\n else:\n # dynamic list..need to convert all modules\n for x in node.elems:\n if isinstance(x, VHDLModule):\n if self.is_compatible_with_converted_module(x):\n return\n self.childs.append(RecursiveConverter(x.current, x))\n elif isinstance(node, VHDLModule):\n if self.is_compatible_with_converted_module(node):\n return\n self.childs.append(RecursiveConverter(node.current, node))\n\n if self.is_root:\n logger.info(f'Creating top.vhd ...')\n self.top_vhdl = TopGenerator(obj)\n\n # maybe some input/output is a convertible module?\n for node in self.inputs:\n conv(self, node)\n\n for node in self.outputs:\n conv(self, node)\n\n # iterate all functions and discover local variables that may need to be converted\n for x in self.obj.__dict__.values():\n if isinstance(x, PyhaFunc):\n for key, val in x.get_local_types().items():\n if isinstance(val, Hardware):\n node = init_vhdl_type(key, val)\n conv(self, node)\n\n # convert instance elements before the instance itself, recursive\n for node in self.datamodel.elems:\n conv(self, node)\n\n self.red_node = get_objects_rednode(obj)\n convert_name = self.get_module_converted_name(self.datamodel)\n logger.info(f'{convert_name} to VHDL ...')\n\n self.conv = convert(self.red_node, obj) # actual conversion happens here\n\n self.vhdl_conversion = str(self.conv)\n RecursiveConverter.converted_modules[convert_name] = (self.datamodel, self.vhdl_conversion)\n RecursiveConverter.typedefs.extend(self.conv.build_typedefs())", "def buildHierarchy(self, test_input):\n for entry in test_input:\n if entry['manager']not in self.relations:\n self.relations[entry['manager']] = Node(entry['manager'], entry['name'])\n else:\n self.relations[entry['manager']].employees.append(entry['name'])", "def findHierarchy(self):\n def __recursiveHelper(key_name, output, indent):\n if key_name in self.relations:\n for employee in self.relations[key_name].employees:\n output += \" \" * indent + str(employee) +\"\\n\"\n # return __recursiveHelper(employee, output, indent+1)\n __recursiveHelper(employee, output, indent+1)\n else:\n print(output)\n return output\n\n\n #experimenting with Iter() and next() iterators/generators\n #and a while loop in the recursive function:\n\n # def __recursiveHelper(key_name, output, indent):\n # if key_name in self.relations:\n # employees = iter(self.relations[key_name].employees)\n # employee = next(employees, \"stop\")\n # while employees and employee != 'stop':\n # output += \" \" * indent + str(employee) +\"\\n\"\n # __recursiveHelper(next(employees, \"stop\"), output, indent+1)\n # else:\n # employee = next(employees, \"stop\")\n #\n # else:\n # return output\n\n\n\n\n\n output = \"\"\n indent = -1\n # self.relations is a dictionary of manager-name string keys.\n # The employees of None are the top-ranking managers.\n # only issue:\n # having trouble returning the concatenated output\n # from the recursive function:\n return __recursiveHelper(None, output, indent+1)", "def nested_object_traversal(obj: any, leaf_function: Callable, leaf_type: type):\n if isinstance(obj, (list, tuple)):\n result = [Role.nested_object_traversal(elem, leaf_function, leaf_type) for elem in obj]\n return type(obj)(result)\n elif isinstance(obj, dict):\n return {\n k: Role.nested_object_traversal(v, leaf_function, leaf_type)\n for k, v in sorted(obj.items())\n }\n elif isinstance(obj, leaf_type):\n return leaf_function(obj)\n else:\n return obj", "def reconstruct_object(flat_obj):\n\theads = list(set([x[0][1:].split('/')[0] for x in flat_obj]))\n\n\t# this is a primitive value\n\tif len(heads) == 1 and heads[0] == \"\":\n\t\treturn flat_obj[0][1]\n\n\t# check if it is a list\n\tif all([v.isdigit() for v in heads]):\n\t\theads = sorted([int(v) for v in heads])\n\t\tretval = list(range(len(heads)))\n\telse:\n\t\tretval = {}\n\n\tfor h in heads:\n\t\t# recursively construct objects from paths\n\t\tprefix = \"/{}\".format(h)\n\t\tsub_paths = [(x[0][len(prefix):], x[1]) for x in flat_obj \n\t\t\t\t\t if x[0].startswith(prefix)]\n\t\tretval[h] = reconstruct_object(sub_paths)\n\n\treturn retval", "async def _materialize_walk_obj(d) -> Tree:\n if isinstance(d, ViewModel):\n # Resolve the first level of awaitables\n edge_set = set(d.__visited_edges__)\n edges = await resolve_parallel_dict(d, edge_set)\n # Resolve all edges recursively\n vals = await asyncio.gather(*(_materialize_walk_obj(v) for k, v in edges))\n for (k, _), val in zip(edges, vals):\n if k in edge_set:\n setattr(d, k, val)\n return d\n elif isinstance(d, dict):\n # Resolve the first level of awaitables\n items = await resolve_parallel_dict(d)\n vals = await asyncio.gather(*(_materialize_walk_obj(v) for k, v in items))\n for (k, _), val in zip(items, vals):\n d[k] = val\n return d\n elif isinstance(d, primitive) or d is None:\n return d\n elif isinstance(d, PaginatedEdge):\n d.edges = await resolve_parallel_iterable(d.edges)\n return d\n elif isinstance(d, Iterable):\n resolved = await resolve_parallel_iterable(d)\n return await asyncio.gather(\n *(val for val in (_materialize_walk_obj(v) for v in resolved) if val)\n )\n elif type(d) == types.AsyncGeneratorType:\n d_list = [i async for i in d] # TODO: Optimize\n resolved = await resolve_parallel_iterable(d_list)\n return await asyncio.gather(\n *(val for val in (_materialize_walk_obj(v) for v in resolved) if val)\n )\n elif isawaitable(d) or callable(d):\n # TODO: Profile and optimize recursive call\n resolved = await async_resolve_field(d)\n return await _materialize_walk_obj(resolved)\n raise Exception(\"Invalid type: \" + str(type(d)))", "def findChildren(obj, dirname=''):\n\n lst = []\n for name, child in obj.objectItems():\n if hasattr(aq_base(child), 'isPrincipiaFolderish') and \\\n child.isPrincipiaFolderish:\n lst.extend(findChildren(child, dirname + obj.getId() + '/'))\n else:\n lst.append((dirname + obj.getId() + \"/\" + name, child))\n\n return lst", "def _build_tree(self, root, obj, declared_ns):\n\n if obj is None:\n return\n\n get_logger().debug('Building tree for %s (%s)', str(obj), root.name)\n for attr_name in obj.__class__.__ordered__:\n if attr_name.startswith('_'):\n continue\n\n attr = getattr(obj.__class__, attr_name)\n\n if isinstance(attr, XmlElementProperty):\n if not _attr_supports_version(attr, self.version):\n get_logger().debug('Skipping class attribute %s for not supported version %s',\n attr.name, self.version)\n continue\n\n child = getattr(obj, attr_name)\n if not _attr_element_content_serializable(attr, child):\n get_logger().debug('NOT Serializing Child Element %s (%s) because of its value',\n attr.name, attr_name)\n continue\n\n get_logger().debug('Serializing Child Element %s (%s)', attr.name, attr_name)\n self._serialize_object_to_node(root, attr.name, child, declared_ns, attr.kind)\n elif isinstance(attr, XmlAttributeProperty):\n if not _attr_supports_version(attr, self.version):\n get_logger().debug('Skipping class attribute %s for not supported version %s',\n attr.name, self.version)\n continue\n\n value = getattr(obj, attr_name)\n if value is not None:\n root.add_attribute(attr.name, value)", "def _recursion_helper(self, iterator, recursion_level):\n for resource in iterator:\n # Check if we need to display contents of a container.\n if resource.is_container() and recursion_level > 0:\n yield _HeaderFormatWrapper(\n resource,\n display_detail=self._display_detail,\n use_gsutil_style=self._use_gsutil_style)\n\n # Get container contents by adding wildcard to URL.\n nested_iterator = self._get_container_iterator(\n resource.storage_url, recursion_level-1)\n for nested_resource in nested_iterator:\n yield nested_resource\n\n else:\n # Resource wasn't a container we can recurse into, so just yield it.\n yield _ResourceFormatWrapper(\n resource,\n all_versions=self._all_versions,\n display_detail=self._display_detail,\n include_etag=self._include_etag,\n readable_sizes=self._readable_sizes,\n full_formatter=self._full_formatter)", "def build_tree(elem, level = 1024, remove_root = 0):\n if level <= 0:\n return None\n level -= 1\n\n lista = elem.objectValues()\n node = {}\n children = []\n\n for i in lista:\n result = (build_tree(i, level))\n if result:\n children.append(result)\n\n if remove_root:\n return children\n else:\n node[\"title\"] = get_id(elem)\n node[\"children\"] = []\n\n if len(lista):\n node[\"key\"] = get_id(elem)\n node[\"isFolder\"] = True\n\n if not len(node[\"children\"]):\n node[\"isLazy\"] = True\n\n node[\"children\"] = children\n\n return node", "def __json__(self, **kwargs):\n return self.nestify(**kwargs).tree", "def _populate_terms(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n # Make parents and relationships references to the actual GO terms.\n for rec in self.values():\n # Given parent GO IDs, set parent GO Term objects\n rec.parents = set([self[goid] for goid in rec._parents])\n\n # For each parent GO Term object, add it's child GO Term to the children data member\n for parent_rec in rec.parents:\n parent_rec.children.add(rec)\n\n if has_relationship:\n self._populate_relationships(rec)", "def build_tree(self):\n active = self.get_active()\n family = self.dbstate.db.get_family_from_handle(active)\n self.goto_handle(handle=family)", "def create_hierarchy(self):\n\t\tpass", "def test_Tree():", "def generate_object_specs(self):\n return [[] for _ in xrange(self.batch_size)]", "def render_spec_table(spec,\n rst_table=None,\n depth_char=\".\",\n depth=0,\n show_subattributes=True,\n show_subdatasets=True,\n show_sublinks=True,\n show_subgroups=False,\n recursive_subgroups=False,\n appreviate_main_object_doc=True):\n # Create a new table if necessary\n rst_table = rst_table if rst_table is not None else RSTTable(cols=['Id', 'Type', 'Description'])\n\n ###########################################\n # Render the row for the current object\n ###########################################\n # Determine the type of the object\n spec_type = SpecToRST.spec_basetype_name(spec)\n\n # Determine the name of the object\n depth_str = depth_char * depth\n spec_name = depth_str\n if spec.get('name', None) is not None:\n spec_name += spec.name\n elif spec.data_type_def is not None:\n spec_name += '<%s>' % spec.data_type_def\n elif spec.data_type_inc is not None:\n spec_name += '<%s>' % RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(spec.data_type_inc),\n spec.data_type_inc)\n else:\n spec_type_key = spec.type_key()\n spec_name += '<%s>' % RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(spec[spec_type_key]),\n spec[spec_type_key])\n\n # Create the doc description of the spec\n if appreviate_main_object_doc and depth == 0:\n # Create the appreviated description of the main object\n spec_doc = \"Top level %s for %s\" % (spec_type, spec_name.lstrip(depth_str))\n spec_doc += SpecToRST.render_specification_properties(spec=spec,\n newline=rst_table.newline,\n ignore_props=['primitive_type'])\n else:\n # Create the description for the object\n spec_doc = SpecToRST.clean_schema_doc_string(doc_str=spec.doc,\n add_prefix=rst_table.newline + rst_table.newline)\n # Create the list of additional object properties to be added as a list ot the doc\n spec_doc += SpecToRST.render_specification_properties(spec=spec,\n newline=rst_table.newline,\n ignore_props=['primitive_type'])\n\n # Render the object to the table\n rst_table.add_row(row_values=[spec_name, spec_type, spec_doc],\n replace_none='',\n convert_to_str=True)\n\n ################################################\n # Recursively add the subobjects if requested\n ################################################\n # Recursively add all attributes of the current spec\n if (isinstance(spec, DatasetSpec) or isinstance(spec, GroupSpec)) and show_subattributes:\n for a in spec.attributes:\n SpecToRST.render_spec_table(spec=a,\n rst_table=rst_table,\n depth_char=depth_char,\n depth=depth + 1)\n # Recursively add all Datasets of the current spec\n if isinstance(spec, GroupSpec) and show_subdatasets:\n for d in spec.datasets:\n SpecToRST.render_spec_table(spec=d,\n rst_table=rst_table,\n depth_char=depth_char,\n depth=depth + 1)\n # Recursively add all Links for the current spec\n if isinstance(spec, GroupSpec) and show_sublinks:\n for link in spec.links:\n SpecToRST.render_spec_table(spec=link,\n rst_table=rst_table,\n depth_char=depth_char,\n depth=depth + 1)\n # Recursively add all subgroups if requested\n if show_subgroups and isinstance(spec, GroupSpec):\n if recursive_subgroups:\n for g in spec.groups:\n SpecToRST.render_spec_table(spec=g,\n rst_table=rst_table,\n depth_char=depth_char,\n depth=depth+1,\n show_subgroups=show_subgroups,\n show_subdatasets=show_subdatasets,\n show_subattributes=show_subattributes)\n else:\n for g in spec.groups:\n SpecToRST.render_spec_table(spec=g,\n rst_table=rst_table,\n depth_char=depth_char,\n depth=depth+1,\n recursive_subgroups=recursive_subgroups,\n show_subgroups=False,\n show_subattributes=False,\n show_subdatasets=False)\n # Return the created table\n return rst_table", "def test_tree_binary_tree() -> None:\n t = generate_binary_tree_resources(4, 3)\n field(t, (\"root\", \"ds\", \"f1\")).identity = \"email\"\n field(t, (\"root.0.1.0\", \"ds.0.1.0\", \"f1\")).identity = \"ssn\"\n field(t, (\"root.1.1\", \"ds.1.1\", \"f1\")).identity = \"user_id\"\n assert generate_traversal({\"email\": \"X\"}, *t)\n assert generate_traversal({\"ssn\": \"X\"}, *t)\n assert generate_traversal({\"user_id\": \"X\"}, *t)", "def createHierarchyObj(self, root, name, factory, relpath=\"\", alog=None):\n return createHierarchyObj(root, name, factory, relpath, alog)", "def walkTree(self):\n if self.parentId:\n print self.parentId, self.id, self.value\n for child in self.children.itervalues():\n child.walkTree()", "def getParents(obj):", "async def materialize_walk_obj(d) -> Tree:\n return await _materialize_walk_obj(d)", "def get_featured_tree(self):\n\n for t in self.tree.get_terminals():\n t.sample_series = self.feature_table[t.name]\n self.feature_tree = self.recursion_tree(self.tree.root)\n for clade in self.feature_tree.find_clades(order='level'):\n clade.depth = 1+len(self.feature_tree.get_path(clade))\n \n #i = 0\n #for clade in self.feature_tree.find_clades(order='level'):\n # clade.ID_num = i \n #clade.abu = np.mean(clade.sample_series.values)\n #clade.domain_otu = clade.sample_series.idxmax()", "def _get_subobjects(self) -> Iterable[SymbolicObject]:\n\n return self._subobjects", "def iter_recursive_objects(self):\n from noc.inv.models.interface import Interface\n\n for i in Interface.objects.filter(managed_object=self.id):\n yield i", "def get_recursed_tree(self, root_nodes):\n nodes = []\n for n in root_nodes:\n nodes.append({\n \"id\": n.pk,\n \"label\": n.title,\n \"slug\": n.slug,\n \"view_url\": reverse('sveedocuments:page-details', args=[n.slug]),\n \"children\": self.get_recursed_tree(n.get_children())\n })\n return nodes", "def _generateExpandedEOCs(self, obj, **args):\n return []", "def rcontainer_tree_str(obj):\n tree_task = ContainerTreePrintTask()\n the_recurser = ObjectRecursion(tasks=[tree_task])\n return the_recurser.recurse(obj)[0][0]", "def __iter__(self):\n for tree in self._tree.subTrees():\n yield self.__class__(tree)", "def intialize_hierarchy_paths(self):\n\n leaf_nodes = [node[0] for node in self.tree.out_degree if node[1] == 0]\n paths = [self.tree_utils.determine_path_to_root([node]) for node in leaf_nodes]\n\n # Normalize paths per level in hierarchy - currently the nodes are of increasing number throughout the tree.\n normalized_paths = [self.tree_utils.normalize_path_from_root_per_level(path) for path in paths]\n\n normalized_encoder = {'Root': {'original_key': 0, 'derived_key': 0}}\n normalized_decoder = { 0: {'original_key': 0, 'value': 'Root'}}\n decoder = dict(self.tree.nodes(data=\"name\"))\n encoder = dict([(value, key) for key, value in decoder.items()])\n\n #initiaize encoders\n for path, normalized_path in zip(paths, normalized_paths):\n key = path[-1]\n derived_key = normalized_path[-1]\n if key in leaf_nodes:\n normalized_encoder[decoder[key]] = {'original_key': key, 'derived_key': derived_key}\n normalized_decoder[derived_key] = {'original_key': key, 'value': decoder[key]}\n\n oov_path = [[0, 0, 0]]\n normalized_paths = oov_path + normalized_paths\n\n #Align length of paths if necessary\n longest_path = max([len(path) for path in normalized_paths])\n\n # Sort paths ascending\n sorted_normalized_paths = []\n for i in range(len(normalized_paths)):\n found_path = normalized_paths[0]\n for path in normalized_paths:\n for found_node, node in zip(found_path,path):\n if found_node > node:\n found_path = path\n break\n\n if not (found_path is None):\n sorted_normalized_paths.append(found_path)\n normalized_paths.remove(found_path)\n\n return normalized_encoder, normalized_decoder, sorted_normalized_paths", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()", "def test_tree(self):\n root = role_middleware.get_root()\n tree_list = role_middleware.get_tree(root.id)\n role_middleware.force_refresh()\n print(tree_list)", "def test_grandchildren():\n\n # note c.upto(\"status\").desired.grandchildren\n # this is the same as *c.upto(\"status\").desired in python3.5+\n res = conf.status.conditions.choose(lambda c: (c.type, c.reason, c.upto(\"status\").desired.grandchildren))\n assert \"type\" in res\n assert \"reason\" in res\n assert \"version\" in res\n assert \"image\" in res\n assert \"force\" in res", "def get_children(obj):\n ret = obj.to_dict()\n if obj.children.all():\n ret.__setitem__('children',[get_children(j) for j in obj.children.all()])\n return ret", "def tree(obj):\n if isinstance(obj, (bool, int, float, str, bytes)):\n return obj\n elif isinstance(obj, (dt.date, dt.time)):\n return obj.isoformat()\n elif isinstance(obj, dict):\n return {k: tree(v) for k, v in obj.items()}\n elif isnamedtupleinstance(obj):\n return {f: tree(getattr(obj, f)) for f in obj._fields}\n elif isinstance(obj, (list, tuple, set)):\n return [tree(i) for i in obj]\n elif is_dataclass(obj):\n return {obj.__class__.__qualname__: tree(dataclassNonDefaults(obj))}\n else:\n return str(obj)", "def _set_level_depth(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n\n def _init_level(rec):\n if rec.level is None:\n if rec.parents:\n rec.level = min(_init_level(rec) for rec in rec.parents) + 1\n else:\n rec.level = 0\n return rec.level\n\n def _init_depth(rec):\n if rec.depth is None:\n if rec.parents:\n rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1\n else:\n rec.depth = 0\n return rec.depth\n\n def _init_reldepth(rec):\n if not hasattr(rec, 'reldepth'):\n up_terms = rec.get_goterms_upper()\n if up_terms:\n rec.reldepth = max(_init_reldepth(rec) for rec in up_terms) + 1\n else:\n rec.reldepth = 0\n return rec.reldepth\n\n for rec in self.values():\n\n # Add invert relationships\n if has_relationship:\n if rec.depth is None:\n _init_reldepth(rec)\n\n # print(\"BBBBBBBBBBB1\", rec.id, rec.relationship)\n #for (typedef, terms) in rec.relationship.items():\n # invert_typedef = self.typedefs[typedef].inverse_of\n # # print(\"BBBBBBBBBBB2 {} ({}) ({}) ({})\".format(\n # # rec.id, rec.relationship, typedef, invert_typedef))\n # if invert_typedef:\n # # Add inverted relationship\n # for term in terms:\n # if not hasattr(term, 'relationship'):\n # term.relationship = defaultdict(set)\n # term.relationship[invert_typedef].add(rec)\n # print(\"BBBBBBBBBBB3\", rec.id, rec.relationship)\n\n if rec.level is None:\n _init_level(rec)\n\n if rec.depth is None:\n _init_depth(rec)", "def recursive_visit(self, node):\n node = self.generic_visit(node)\n\n # walk through the children: either iterate the node or look up the keys\n if hasattr(node, '_dict_keys'):\n for v in node._dict_keys:\n self.recursive_visit(getattr(node, v))\n\n if hasattr(node, '_list_keys'):\n for v in node._list_keys:\n self.recursive_visit(getattr(node, v))\n else:\n iter_target = None\n # need special handling of node.data or node_list in order to walk through all formatting node, e.g. endl\n if hasattr(node, 'node_list'): # use the unproxy list to get all formatting\n iter_target = node.node_list\n elif hasattr(node, 'data'):\n iter_target = node.data\n elif hasattr(node, '__iter__'):\n iter_target = node\n\n if iter_target:\n change_list = []\n for child in iter_target:\n new_node = self.recursive_visit(child)\n if new_node is not child:\n change_list.append((child, new_node))\n\n for original_child, new_child in change_list:\n i = original_child.index_on_parent\n iter_target.remove(original_child)\n iter_target.insert(i, new_child)\n\n return node", "def _tree_nodes(self):\n\n self.arbor._grow_tree(self)\n yield self\n if self.ancestors is None:\n return\n for ancestor in self.ancestors:\n for a_node in ancestor._tree_nodes:\n yield a_node", "def test_hierarchy_jumps(self):\n it = [\n \"[[Chapter]] Chapter I\",\n \"This is chapter I text\",\n \"[[Article]] Article I\",\n \"This is article I text\",\n ]\n\n descriptor = {\n 'components': ['Chapter', 'Section', 'Sub-section', 'Article'],\n 'patterns': ['Chapter', 'Section', 'Sub-section', 'Article']\n }\n\n doc = parse_iterable(it, descriptor)\n\n def identifier(x):\n reg = re.compile(r'\\[(\\d+\\_?(\\d+)?)[a-z]?\\]')\n return int(reg.search(x).groups(0)[0])\n\n reading_order = sorted(doc.graph.nodes(), key=identifier)\n\n expected = [\n \"ROOT [0]\",\n \"Chapter [1]\",\n \"Article [2]\",\n ]\n\n self.assertListEqual(reading_order, expected)", "def _analyze_tree(self, tree, parent_path: Text = ''):\n # TODO: Move the loading of nested embryos to an embryo method\n\n if not tree:\n return\n\n for obj in tree:\n if obj is None:\n # an empty node, do nothing\n continue\n if isinstance(obj, dict):\n k = list(obj.keys())[0]\n v = obj[k]\n if isinstance(v, str):\n # in this case, we have a file name or nested embryo with\n # associated template rendering metadata we must parse out.\n if k == 'embryo':\n # embryo:falcon_app(foo)\n match = RE_RENDERING_EMBRYO.match(v)\n nested_embryo_name, ctx_key = match.groups()\n self.nested_embryos.append(\n {\n 'embryo_name': nested_embryo_name,\n 'context_path': ctx_key,\n 'dir_path': parent_path,\n }\n )\n else:\n match = RE_RENDERING_METADATA.match(v)\n fname = k\n if not match:\n shout(\n f'unable to find renderer match for \"{k}: {v}\".. skipping'\n )\n else:\n tpl_name, ctx_key = match.groups()\n fpath = join(parent_path, fname)\n self.template_meta[fpath] = {\n 'template_name': tpl_name,\n 'context_path': ctx_key,\n }\n self.fpaths.add(fpath)\n else:\n # call _analyze_tree on subdirectory\n child_path = join(parent_path, k)\n self._analyze_tree(obj[k], child_path)\n self.directory_paths.add(child_path)\n elif obj.endswith('/'):\n # it's an empty directory name\n dir_name = obj\n dir_path = join(parent_path, dir_name)\n self.directory_paths.add(dir_path)\n elif ':' in obj:\n parts = obj.split(':')\n if parts[0] == 'embryo':\n # embryo:falcon_app(foo)\n match = RE_RENDERING_EMBRYO.match(parts[1])\n nested_embryo_name, ctx_key = match.groups()\n self.nested_embryos.append(\n {\n 'embryo_name': nested_embryo_name,\n 'context_path': ctx_key,\n 'dir_path': parent_path,\n }\n )\n else:\n fname, metadata_str = parts\n match = RE_RENDERING_METADATA.match(metadata_str)\n tpl_name, ctx_key = match.groups()\n fpath = join(parent_path, fname)\n self.template_meta[fpath] = {\n 'template_name': tpl_name,\n 'context_path': ctx_key,\n }\n self.fpaths.add(fpath)\n else:\n # it's a plain ol' file name\n fname = obj\n fpath = join(parent_path, fname)\n self.fpaths.add(fpath)\n if fpath in self.jinja2_templates:\n # attempt to resolve the full path\n self.template_meta[fpath] = {\n 'template_name': fpath,\n 'context_path': None,\n }\n elif fname in self.jinja2_templates:\n # top-level resolution of file name only\n self.template_meta[fpath] = {\n 'template_name': fname,\n 'context_path': None,\n }", "def _each_nest(spec, parent_nest, post_order):\n pre_order = not post_order\n\n level = parent_nest.level + 1\n\n if isinstance(spec, dict):\n name = spec[\"name\"]\n coefficient = spec[\"coefficient\"]\n assert isinstance(\n coefficient, (int, float)\n ), \"Coefficient '%s' (%s) not a number\" % (\n name,\n coefficient,\n ) # forgot to eval coefficient?\n alternatives = [\n a[\"name\"] if isinstance(a, dict) else a for a in spec[\"alternatives\"]\n ]\n\n nest = Nest(name=name)\n nest.level = parent_nest.level + 1\n nest.coefficient = coefficient\n nest.product_of_coefficients = parent_nest.product_of_coefficients * coefficient\n nest.alternatives = alternatives\n nest.ancestors = parent_nest.ancestors + [name]\n\n if pre_order:\n yield spec, nest\n\n # recursively iterate the list of alternatives\n for alternative in spec[\"alternatives\"]:\n for sub_node, sub_nest in _each_nest(alternative, nest, post_order):\n yield sub_node, sub_nest\n\n if post_order:\n yield spec, nest\n\n elif isinstance(spec, str):\n name = spec\n\n nest = Nest(name=name)\n nest.level = parent_nest.level + 1\n nest.product_of_coefficients = parent_nest.product_of_coefficients\n nest.ancestors = parent_nest.ancestors + [name]\n\n yield spec, nest", "def subtrees(self):\n yield from subtrees(self)", "def test_01_BuildObjects(self):\n # print(PrettyFormatAny.form(self.m_pyhouse_obj, 'A1-01-A - Main', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'A1-02-B - House', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'A1-01-C - Location', 190))\n self.assertIsInstance(self.m_pyhouse_obj, PyHouseInformation)\n self.assertIsInstance(self.m_pyhouse_obj.House, HouseInformation)\n self.assertIsInstance(self.m_pyhouse_obj.House.Location, LocationInformationPrivate)", "def device_tree():\n\n\tglobal cached_tree\n\n\tif cached_tree is not None:\n\t\treturn cached_tree\n\n\tfrom .. import devices\n\n\ttree = {}\n\n\tfor manufacturer in devices.manufacturers:\n\t\tsubtree = {}\n\n\t\tfor model, mock_model in zip(manufacturer.models, manufacturer.mock_models):\n\t\t\tif model is None and mock_model is None:\n\t\t\t\tcontinue\n\t\t\telif model is not None and mock_model is None:\n\t\t\t\tname = model.name\n\t\t\telif model is None and mock_model is not None:\n\t\t\t\tname = mock_model.name\n\t\t\telif model is not None and mock_model is not None:\n\t\t\t\tif model.name != mock_model.name:\n\t\t\t\t\traise ValueError('Different device names: \"{0}\" and '\n\t\t\t\t\t\t\t'\"{1}\".'.format(model.name, mock_model.name))\n\n\t\t\t\tname = model.name\n\n\t\t\tsubtree[name] = {}\n\n\t\t\tif model is not None:\n\t\t\t\tsubtree[name]['real'] = model.implementation\n\n\t\t\tif mock_model is not None:\n\t\t\t\tsubtree[name]['mock'] = mock_model.implementation\n\n\t\ttree[manufacturer.name] = subtree\n\n\tcached_tree = tree\n\treturn cached_tree", "def walk(self):\n yield self\n for child in self.children:\n for descendant in child.walk():\n yield descendant", "def execute(self, ns, devices=None, _deep=None):\n for device in devices:\n yield fcmd.NewTableCommand(title=device)\n for parent in get_parents(ns, device, _deep):\n yield get_obj_info(ns, parent, self.app.config.human_friendly)", "def optimized_work_tree(obj, **kwargs):\n exclusions = kwargs.get('exclusions', {\"groups\": [], \"classes\": [], \"params\": []})\n groups_done = {}\n classes = {\"depths\": {}, \"content\": {}}\n params = {\"depths\": {}, \"content\": {}}\n if hasattr(obj, 'hostname') and not hasattr(obj, 'name'):\n obj.name = obj.hostname\n to_index = [(obj, 1)]\n\n index_pop = to_index.pop\n index_extend = to_index.extend\n while to_index:\n (obj, depth) = index_pop()\n objname = obj.name\n if objname in groups_done and groups_done[objname] <= depth:\n continue\n\n objclasses = obj.classes.exclude(classname__in=exclusions['classes'])\n updated_classes = optimized_update_values(objclasses, \"classname\", \"classparams\", depth=depth, results=classes)\n\n objparams = obj.parameters.exclude(paramkey__in=exclusions['params'])\n updated_params = optimized_update_values(objparams, \"paramkey\", \"paramvalue\", depth=depth, results=params)\n\n if not updated_classes or not updated_params:\n return (\"Fail\", \"Fail\")\n\n groups_done[objname] = depth\n depth += 1\n children = ((group, depth) for group in obj.groups.exclude(name__in=exclusions['groups']))\n index_extend(children)\n\n params['content']['done_count'] = len(groups_done)\n return (classes[\"content\"], params[\"content\"])", "def job_tree(self):\n\n # 1. Enforce depth of 1 for steps\n def depth_one(steps):\n depth_one = []\n for step in steps:\n if type(step) is list:\n if type(step[0]) is list:\n depth_one.append(step[0])\n else:\n depth_one.append(step)\n else:\n depth_one.append([step])\n return depth_one\n\n # 2. Convert steps to list of node objects (0,1,2,3...)\n def assign_nodes(steps):\n nodes = [i for i in range(len(steps))]\n objects = list(\n set([elem for sublist in steps for elem in sublist]))\n\n # checks for multiple src and dst objects -- added when looking for\n # mutiples\n split_objects = []\n for obj in objects:\n if len(obj) > 1:\n new_objs = obj.split(\", \")\n split_objects.extend(new_objs)\n else:\n split_objects.append(obj)\n objects = split_objects\n del(split_objects)\n\n # populate with leafless trees (Node objects, no edges)\n for node in nodes:\n nodes[node] = Node(str(node))\n\n # search for leafy trees\n for obj in objects:\n\n # accounts for multiple drc/dst objects\n leaves = []\n for i, sublist in enumerate(steps):\n for string in sublist:\n if string.count(',') > 0:\n if obj in string:\n leaves.append(i)\n else:\n if obj in sublist:\n leaves.append(i)\n leaves = sorted(list(set(leaves)))\n\n if len(leaves) > 1:\n viable_edges = []\n\n # compute cross-product\n for leaf1 in leaves:\n for leaf2 in leaves:\n if str(leaf1) != str(leaf2) and sorted((leaf1, leaf2)) not in viable_edges:\n viable_edges.append(sorted((leaf1, leaf2)))\n\n # form edge networks\n for edge in viable_edges:\n n1, n2 = nodes[edge[0]], nodes[edge[1]]\n n1.add_edge(n2)\n n2.add_edge(n1)\n nodes[int(n1.name)], nodes[int(n2.name)] = n1, n2\n return nodes\n\n # 3. Determine number of trees and regroup by connected nodes\n def connected_nodes(nodes):\n proto_trees = []\n nodes = set(nodes)\n\n while nodes:\n n = nodes.pop()\n group = {n}\n queue = [n]\n while queue:\n n = queue.pop(0)\n neighbors = n.edges\n neighbors.difference_update(group)\n nodes.difference_update(neighbors)\n group.update(neighbors)\n queue.extend(neighbors)\n proto_trees.append(group)\n return proto_trees\n\n # 4. Convert nodes to nested dictionary of parent-children relations\n # i.e. adding depth -- also deals with tree-node sorting and path\n # optimization\n def build_tree_dict(trees, steps):\n # node sorting in trees\n sorted_trees = []\n for tree in trees:\n sorted_trees.append(\n sorted(tree, key=lambda x: int(x.name)))\n\n # retrieve values of the nodes (the protocol's containers)\n # for each tree ... may want to use dictionary eventually\n all_values = []\n for tree in sorted_trees:\n values = [steps[int(node.name)] for node in tree]\n all_values.append(values)\n\n # create relational tuples:\n all_digs = []\n singles = []\n dst_potentials = []\n for tree_idx in range(len(sorted_trees)):\n edge_flag = False\n tree_digs = []\n for node_idx in range(len(sorted_trees[tree_idx])):\n\n # digs: directed graph vectors\n digs = []\n dst_nodes = []\n node_values = all_values[tree_idx][node_idx]\n src_node = str(sorted_trees[tree_idx][node_idx].name)\n\n # ACTION ON MULTIPLE OBJECTS (E.G. TRANSFER FROM SRC -> DST\n # WELLS)\n # Outcome space: {1-1, 1-many, many-1, many-many}\n if len(node_values) == 2:\n # single destination (x-1)\n if node_values[1].count(\",\") == 0:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[1] == sublist[0]]\n # multiple destinations (x-many)\n elif node_values[1].count(\",\") > 0:\n dst_nodes = []\n for dst in node_values[1].replace(\", \", \"\"):\n for i, sublist in enumerate(steps):\n if i not in dst_nodes and dst == sublist[0]:\n dst_nodes.append(i)\n\n # ACTION ON A SINGLE OBJECT\n elif len(node_values) == 1:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[0] == sublist[0]]\n\n # Constructing tuples in (child, parent) format\n for dst_node in dst_nodes:\n dig = (int(dst_node), int(src_node))\n digs.append(dig)\n\n # else: an edge-case for dictionaries constructed with no edges\n # initiates tree separation via flag\n if digs != []:\n edge_flag = False\n tree_digs.append(digs)\n else:\n edge_flag = True\n digs = [(int(src_node), int(src_node))]\n tree_digs.append(digs)\n\n # digraph cycle detection: avoids cycles by overlooking set\n # repeats\n true_tree_digs = []\n for digs in tree_digs:\n for dig in digs:\n if tuple(sorted(dig, reverse=True)) not in true_tree_digs:\n true_tree_digs.append(\n tuple(sorted(dig, reverse=True)))\n\n # edge-case for dictionaries constructed with no edges\n if true_tree_digs != [] and edge_flag == False:\n all_digs.append(true_tree_digs)\n elif edge_flag == True:\n all_digs.extend(tree_digs)\n\n # Enforces forest ordering\n all_digs = sorted(all_digs, key=lambda x: x[0])\n\n # job tree traversal to find all paths:\n forest = []\n for digs_set in all_digs:\n\n # pass 1: initialize nodes dictionary\n nodes = OrderedDict()\n for tup in digs_set:\n id, parent_id = tup\n # ensure all nodes accounted for\n nodes[id] = OrderedDict({'id': id})\n nodes[parent_id] = OrderedDict({'id': parent_id})\n\n # pass 2: create trees and parent-child relations\n for tup in digs_set:\n id, parent_id = tup\n node = nodes[id]\n # links node to its parent\n if id != parent_id:\n # add new_node as child to parent\n parent = nodes[parent_id]\n if not 'children' in parent:\n # ensure parent has a 'children' field\n parent['children'] = []\n children = parent['children']\n children.append(node)\n\n desired_tree_idx = sorted(list(nodes.keys()))[0]\n forest.append(nodes[desired_tree_idx])\n return forest\n\n # 5. Convert dictionary-stored nodes to unflattened, nested list of\n # parent-children relations\n def dict_to_list(forest):\n forest_list = []\n for tree in forest:\n tString = str(json.dumps(tree))\n tString = tString.replace('\"id\": ', \"\").replace('\"children\": ', \"\").replace(\n '[{', \"[\").replace('}]', \"]\").replace('{', \"[\").replace('}', \"]\")\n\n # find largest repeated branch (if applicable)\n # maybe think about using prefix trees or SIMD extensions for better\n # efficiency\n x, y, length, match = 0, 0, 0, ''\n for y in range(len(tString)):\n for x in range(len(tString)):\n substring = tString[y:x]\n if len(list(re.finditer(re.escape(substring), tString))) > 1 and len(substring) > length:\n match = substring\n length = len(substring)\n\n # checking for legitimate branch repeat\n if \"[\" in match and \"]\" in match:\n hits = []\n index = 0\n if len(tString) > 3:\n while index < len(tString):\n index = tString.find(str(match), index)\n if index == -1:\n break\n hits.append(index)\n index += len(match)\n\n # find all locations of repeated branch and remove\n if len(hits) > 1:\n for start_loc in hits[1:]:\n tString = tString[:start_loc] + \\\n tString[start_loc:].replace(match, \"]\", 1)\n\n # increment all numbers in string to match the protocol\n newString = \"\"\n numString = \"\"\n for el in tString:\n if el.isdigit(): # build number\n numString += el\n else:\n if numString != \"\": # convert it to int and reinstantaite numString\n numString = str(int(numString) + 1)\n newString += numString\n newString += el\n numString = \"\"\n tString = newString\n del newString\n\n forest_list.append(ast.literal_eval(tString))\n return forest_list\n\n # 6. Print job tree(s)\n def print_tree(lst, level=0):\n print(' ' * (level - 1) + '+---' * (level > 0) + str(lst[0]))\n for l in lst[1:]:\n if type(l) is list:\n print_tree(l, level + 1)\n else:\n print(' ' * level + '+---' + l)\n\n # 1\n steps = depth_one(self.object_list)\n # 2\n nodes = assign_nodes(steps)\n # 3\n proto_forest = connected_nodes(nodes)\n # 4\n forest = build_tree_dict(proto_forest, steps)\n # 5\n self.forest_list = dict_to_list(forest)\n # 6\n print(\"\\n\" + \"A suggested Job Tree based on container dependency: \\n\")\n for tree_list in self.forest_list:\n print_tree(tree_list)", "def _generateNestingLevel(self, obj, **args):\n start = args.get('startOffset')\n end = args.get('endOffset')\n if start is not None and end is not None:\n return []\n\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'nestinglevel'\n nestingLevel = self._script.utilities.nestingLevel(obj)\n if nestingLevel:\n result.append(self._script.formatting.getString(**args)\\\n % nestingLevel)\n return result", "def isrecursive(object):\r\n return PrettyPrinter().isrecursive(object)", "def walk(self): # DirObj.walk\n for name, subdir in self.subdirs.iteritems():\n for e in subdir.walk():\n yield e\n for name, fileEntry in self.files.iteritems():\n yield fileEntry\n yield self", "def recreate_tree(self, fetch_from_api=False):\n logger.debug('Creating organization category tree and pages...')\n for parent, children in self._hierarchy(\n fetch_from_api=fetch_from_api).items():\n self._create_pages(parent)\n parent_category = f'[[Category:{parent}]]'\n for child in children:\n self._create_pages(\n child, parent_category=parent_category)\n logger.debug('Done.')", "def _helpWalk(syn, synId, includeTypes, newpath=None):\n starting = syn.get(synId, downloadFile=False)\n # If the first file is not a container, return immediately\n if newpath is None and not is_container(starting):\n return\n elif newpath is None:\n dirpath = (starting[\"name\"], synId)\n else:\n dirpath = (newpath, synId)\n dirs = []\n nondirs = []\n results = syn.getChildren(synId, includeTypes)\n for i in results:\n if is_container(i):\n dirs.append((i[\"name\"], i[\"id\"]))\n else:\n nondirs.append((i[\"name\"], i[\"id\"]))\n yield dirpath, dirs, nondirs\n for name in dirs:\n # The directory path for each os.walk() result needs to be built up\n # This is why newpath is passed in\n newpath = os.path.join(dirpath[0], name[0])\n for x in _helpWalk(syn, name[1], includeTypes, newpath=newpath):\n yield x", "def generate_hierarchy(self,descr):\n # assert the existence of all the keys we need to set up at least on level\n assert 'problem_class' in descr\n assert 'problem_params' in descr\n assert 'dtype_u' in descr\n assert 'dtype_f' in descr\n assert 'sweeper_class' in descr\n assert 'level_params' in descr\n\n # convert problem-dependent parameters consisting of dictionary of lists to a list of dictionaries with only a\n # single entry per key, one dict per level\n pparams_list = self.__dict_to_list(descr['problem_params'])\n # put this newly generated list into the description dictionary (copy to avoid changing the original one)\n descr_new = cp.deepcopy(descr)\n descr_new['problem_params'] = pparams_list\n # generate list of dictionaries out of the description\n descr_list = self.__dict_to_list(descr_new)\n\n # sanity check: is there a transfer class? is there one even if only a single level is specified?\n if len(descr_list) > 1:\n assert 'transfer_class' in descr_new\n assert 'transfer_params' in descr_new\n elif 'transfer_class' in descr_new:\n print('WARNING: you have specified transfer classes, but only a single level...')\n\n # generate levels, register and connect if needed\n for l in range(len(descr_list)):\n\n # check if we have a hook on this list. if not, use default class.\n if 'hook_class' in descr_list[l]:\n hook = descr_list[l]['hook_class']\n else:\n hook = hookclass.hooks\n\n if 'sweeper_params' in descr_list[l]:\n swparams = descr_list[l]['sweeper_params']\n else:\n swparams = {}\n\n if not 'collocation_class' in swparams:\n assert 'collocation_class' in descr_list[l]\n swparams['collocation_class'] = descr_list[l]['collocation_class']\n\n if not 'num_nodes' in swparams:\n assert 'num_nodes' in descr_list[l]\n swparams['num_nodes'] = descr_list[l]['num_nodes']\n\n L = levclass.level(problem_class = descr_list[l]['problem_class'],\n problem_params = descr_list[l]['problem_params'],\n dtype_u = descr_list[l]['dtype_u'],\n dtype_f = descr_list[l]['dtype_f'],\n sweeper_class = descr_list[l]['sweeper_class'],\n sweeper_params = swparams,\n level_params = descr_list[l]['level_params'],\n hook_class = hook,\n id = 'L'+str(l))\n\n self.register_level(L)\n\n if l > 0:\n self.connect_levels(transfer_class = descr_list[l]['transfer_class'],\n transfer_params = descr_list[l]['transfer_params'],\n fine_level = self.levels[l-1],\n coarse_level = self.levels[l])", "def do_mptt_traversal(organized_hierarchy_dict):\n\n mptt_dict = {}\n root = construct_hierarchy_tree(organized_hierarchy_dict)\n stack = []\n if not root:\n return mptt_dict\n\n stack.append(root)\n\n counter = 0\n\n while stack:\n employee = stack[-1]\n supervisor = employee.supervisor\n if not supervisor:\n # this is a root\n supervisor_id = None\n else:\n supervisor_id = mptt_dict[supervisor.name]['id']\n\n employee_id = uuid.uuid4().hex\n if employee.name not in mptt_dict:\n # we are seeing this employee for the first time, so we set their \"lft\" value. The \"rgt\" will just be the\n # initial '0'.\n counter += 1\n employee.lft = counter\n mptt_dict[employee.name] = {\n 'name': employee.name,\n \"id\": employee_id,\n \"supervisor_id\": supervisor_id,\n \"lft\": employee.lft,\n \"rgt\": employee.rgt\n }\n\n # reverse the children so that when added to the stack, the starting child is picked first\n subordinates = reversed(employee.subordinates)\n for subordinate in subordinates:\n stack.append(subordinate)\n else:\n # we are now seeing this employee for the second time, so we set the \"rgt\" value.\n counter += 1\n mptt_dict[employee.name]['rgt'] = counter\n # we are done with it, remove it from the stack\n stack.pop()\n\n return mptt_dict", "def test_get_all_ancestor_properties(self):\n pass", "def generate_type_hierarchy(ctx):\n ctx.run(\"./env/bin/python -m puresnmp.types > doc/typetree.rst\")", "def make_tree(\n self,\n recursive: bool = True\n ) -> list:\n children = []\n for file in self.path.iterdir():\n path = file\n\n if path.is_dir() and recursive:\n # try create Study\n try:\n children.append(Study(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n except NotStudyFolder:\n pass\n # try create Experiment\n try:\n children.append(Experiment(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n except NotExperimentFolder:\n pass\n #try create Processing\n try:\n children.append(Processing(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n except NotProcessingFolder:\n pass\n children.append(Folder(path, parent=self, recursive=recursive, dataset_index=self._dataset_index,\n dataset_state=self._dataset_state))\n continue\n try:\n if path.name in self._dataset_index:\n children.append(Dataset(path, **self._dataset_state))\n continue\n except (UnsuportedDatasetType, IncompleteDataset, NotADatasetDir):\n pass\n try:\n children.append(JCAMPDX(path, load=False))\n continue\n except (InvalidJcampdxFile, JcampdxVersionError):\n pass\n return children", "def test_render_tree(self) -> None:\n\n def get_children(node):\n return node.children\n\n node, expect, withtags = self.tree_case_1()\n actual = render_tree(node, get_children)\n assert expect == actual, (expect, actual)\n\n node, expect, withtags = self.tree_case_2()\n actual = render_tree(node, get_children, 1)\n assert expect == actual, (expect, actual)\n\n # Ensure that we can call render_tree on the same Node\n # again. This wasn't possible in version 2.4.1 and earlier\n # due to a bug in render_tree (visited was set to {} as default\n # parameter)\n actual = render_tree(node, get_children, 1)\n assert expect == actual, (expect, actual)", "def getHierarchyObj(self, root, name, relpath):\n return getHierarchyObj(root, name, relpath)", "def gen_model(children, tree_structure):\n referenced = False\n extended = False\n for child in children:\n #print child.arg\n node = dict()\n extended = False\n if hasattr(child, 'substmts'):\n for attribute in child.substmts:\n # process the 'type' attribute:\n # Currently integer, enumeration and string are supported.\n if attribute.keyword == 'type':\n if len(attribute.arg.split(':'))>1:\n attribute.arg = attribute.arg.split(':')[-1]\n # Firstly, it is checked if the attribute type has been previously define in typedefs.\n if attribute.arg in TYPEDEFS:\n if TYPEDEFS[attribute.arg]['type'][:3] == 'int':\n node['type'] = 'integer'\n node['format'] = TYPEDEFS[attribute.arg]['format']\n elif TYPEDEFS[attribute.arg]['type'] == 'enumeration':\n node['type'] = 'string'\n node['enum'] = [e\n for e in TYPEDEFS[attribute.arg]['enum']]\n # map all other types to string\n else:\n node['type'] = 'string'\n elif attribute.arg[:3] == 'int':\n node['type'] = 'integer'\n node['format'] = attribute.arg\n elif attribute.arg == 'decimal64':\n node['type'] = 'number'\n node['format'] = 'double'\n elif attribute.arg == 'boolean':\n node['type'] = attribute.arg\n elif attribute.arg == 'enumeration':\n node['type'] = 'string'\n node['enum'] = [e[0]\n for e in attribute.i_type_spec.enums]\n # map all other types to string\n else:\n node['type'] = 'string'\n elif attribute.keyword == 'mandatory':\n parent_model = to_upper_camelcase(child.parent.arg)\n if parent_model not in PARENT_MODELS.keys():\n PARENT_MODELS[parent_model] = {'models':[],'discriminator':to_lower_camelcase(child.arg)}\n # Process the reference to another model.\n # We differentiate between single and array references.\n elif attribute.keyword == 'uses':\n if len(attribute.arg.split(':'))>1:\n attribute.arg = attribute.arg.split(':')[-1]\n\n ref = to_upper_camelcase(attribute.arg)\n ref = '#/definitions/' + ref\n if str(child.keyword) == 'list':\n node['items'] = {'$ref': ref}\n node['type'] = 'array'\n for attribute in child.substmts:\n if attribute.keyword == 'key':\n listkey = to_lower_camelcase(attribute.arg)\n if listkey:\n node['x-key'] = listkey\n referenced = True\n elif str(child.keyword) == 'grouping':\n ref = to_upper_camelcase(attribute.arg)\n if ref in tree_structure:\n PARENT_MODELS[ref]['models'].append(child.arg)\n list_properties = [item for item in tree_structure[ref]['properties']]\n ref = '#/definitions/' + ref\n node['allOf'] = []\n node['allOf'].append({'$ref': ref})\n index = 0\n for i in range(0, len(child.i_children)):\n #print len(child.i_children)\n if to_lower_camelcase(child.i_children[index].arg) in list_properties:\n del child.i_children[index]\n else:\n index+=1\n extended = True\n else:\n pending_models.append(child)\n else:\n node['$ref'] = ref\n referenced = True\n\n # When a node contains a referenced model as an attribute the algorithm\n # does not go deeper into the sub-tree of the referenced model.\n if not referenced :\n if not extended:\n node = gen_model_node(child, node)\n else:\n node_ext = dict()\n node_ext = gen_model_node(child, node_ext)\n node['allOf'].append( node_ext)\n extended = False\n\n # Leaf-lists need to create arrays.\n # Copy the 'node' content to 'items' and change the reference\n if child.keyword == 'leaf-list':\n ll_node = {'type': 'array', 'items': node}\n node = ll_node\n # Groupings are class names and upper camelcase.\n # All the others are variables and lower camelcase.\n if child.keyword == 'grouping':\n tree_structure[to_upper_camelcase(child.arg)] = node\n else:\n tree_structure[to_lower_camelcase(child.arg)] = node\n # TODO: do we really need this return value? We are working on the\n # reference anyhow.\n return tree_structure", "def iter_tree(self):\n yield self\n for c in self.children:\n for ci in c.iter_tree:\n yield ci", "def Walk(self, in_order=False):\n if in_order:\n # Walking in order - yield self first.\n yield self\n for subtest in self.subtests:\n for f in subtest.Walk(in_order):\n yield f\n if not in_order:\n # Walking depth first - yield self last.\n yield self", "def traverse(self, path, response=None, validated_hook=None):\n request = self\n request_get = request.get\n if response is None:\n response = self.response\n\n # remember path for later use\n browser_path = path\n\n # Cleanup the path list\n if path[:1] == '/':\n path = path[1:]\n if path[-1:] == '/':\n path = path[:-1]\n clean = []\n for item in path.split('/'):\n # Make sure that certain things that dont make sense\n # cannot be traversed.\n if item in ('REQUEST', 'aq_self', 'aq_base'):\n return response.notFoundError(path)\n if not item or item == '.':\n continue\n elif item == '..':\n del clean[-1]\n else:\n clean.append(item)\n path = clean\n\n # How did this request come in? (HTTP GET, PUT, POST, etc.)\n method = request_get('REQUEST_METHOD', 'GET').upper()\n\n # Probably a browser\n no_acquire_flag = 0\n if method in ('GET', 'POST', 'PURGE') and \\\n not is_xmlrpc_response(response):\n # index_html is still the default method, only any object can\n # override it by implementing its own __browser_default__ method\n method = 'index_html'\n elif method != 'HEAD' and self.maybe_webdav_client:\n # Probably a WebDAV client.\n no_acquire_flag = 1\n\n URL = request['URL']\n parents = request['PARENTS']\n object = parents[-1]\n del parents[:]\n\n self.roles = getRoles(None, None, object, UNSPECIFIED_ROLES)\n\n # if the top object has a __bobo_traverse__ method, then use it\n # to possibly traverse to an alternate top-level object.\n if hasattr(object, '__bobo_traverse__'):\n try:\n new_object = object.__bobo_traverse__(request)\n if new_object is not None:\n object = new_object\n self.roles = getRoles(None, None, object,\n UNSPECIFIED_ROLES)\n except Exception:\n pass\n\n if not path and not method:\n return response.forbiddenError(self['URL'])\n\n # Traverse the URL to find the object:\n if hasattr(object, '__of__'):\n # Try to bind the top-level object to the request\n # This is how you get 'self.REQUEST'\n object = object.__of__(RequestContainer(REQUEST=request))\n parents.append(object)\n\n steps = self.steps\n self._steps = _steps = list(map(quote, steps))\n path.reverse()\n\n request['TraversalRequestNameStack'] = request.path = path\n request['ACTUAL_URL'] = request['URL'] + quote(browser_path)\n\n # Set the posttraverse for duration of the traversal here\n self._post_traverse = post_traverse = []\n\n entry_name = ''\n try:\n # We build parents in the wrong order, so we\n # need to make sure we reverse it when we're done.\n while 1:\n bpth = getattr(object, '__before_publishing_traverse__', None)\n if bpth is not None:\n bpth(object, self)\n\n path = request.path = request['TraversalRequestNameStack']\n # Check for method:\n if path:\n entry_name = path.pop()\n else:\n # If we have reached the end of the path, we look to see\n # if we can find IBrowserPublisher.browserDefault. If so,\n # we call it to let the object tell us how to publish it.\n # BrowserDefault returns the object to be published\n # (usually self) and a sequence of names to traverse to\n # find the method to be published.\n\n # This is webdav support. The last object in the path\n # should not be acquired. Instead, a NullResource should\n # be given if it doesn't exist:\n if no_acquire_flag and \\\n hasattr(object, 'aq_base') and \\\n not hasattr(object, '__bobo_traverse__'):\n\n if (object.__parent__ is not\n aq_inner(object).__parent__):\n from webdav.NullResource import NullResource\n object = NullResource(parents[-2], object.getId(),\n self).__of__(parents[-2])\n\n if IBrowserPublisher.providedBy(object):\n adapter = object\n else:\n adapter = queryMultiAdapter((object, self),\n IBrowserPublisher)\n if adapter is None:\n # Zope2 doesn't set up its own adapters in a lot\n # of cases so we will just use a default adapter.\n adapter = DefaultPublishTraverse(object, self)\n\n object, default_path = adapter.browserDefault(self)\n if default_path:\n request._hacked_path = 1\n if len(default_path) > 1:\n path = list(default_path)\n method = path.pop()\n request['TraversalRequestNameStack'] = path\n continue\n else:\n entry_name = default_path[0]\n elif (method\n and hasattr(object, method)\n and entry_name != method\n and getattr(object, method) is not None):\n request._hacked_path = 1\n entry_name = method\n method = 'index_html'\n else:\n if hasattr(object, '__call__'):\n self.roles = getRoles(\n object, '__call__',\n object.__call__, self.roles)\n if request._hacked_path:\n i = URL.rfind('/')\n if i > 0:\n response.setBase(URL[:i])\n break\n step = quote(entry_name)\n _steps.append(step)\n request['URL'] = URL = f'{request[\"URL\"]}/{step}'\n\n try:\n subobject = self.traverseName(object, entry_name)\n if hasattr(object, '__bobo_traverse__') or \\\n hasattr(object, entry_name):\n check_name = entry_name\n else:\n check_name = None\n\n self.roles = getRoles(\n object, check_name, subobject,\n self.roles)\n object = subobject\n # traverseName() might raise ZTK's NotFound\n except (KeyError, AttributeError, ztkNotFound):\n if response.debug_mode:\n return response.debugError(\n \"Cannot locate object at: %s\" % URL)\n else:\n return response.notFoundError(URL)\n except Forbidden as e:\n if self.response.debug_mode:\n return response.debugError(e.args)\n else:\n return response.forbiddenError(entry_name)\n\n parents.append(object)\n\n steps.append(entry_name)\n finally:\n parents.reverse()\n\n # Note - no_acquire_flag is necessary to support\n # things like DAV. We have to make sure\n # that the target object is not acquired\n # if the request_method is other than GET\n # or POST. Otherwise, you could never use\n # PUT to add a new object named 'test' if\n # an object 'test' existed above it in the\n # hierarchy -- you'd always get the\n # existing object :(\n if no_acquire_flag and \\\n hasattr(parents[1], 'aq_base') and \\\n not hasattr(parents[1], '__bobo_traverse__'):\n base = aq_base(parents[1])\n if not hasattr(base, entry_name):\n try:\n if entry_name not in base:\n raise AttributeError(entry_name)\n except TypeError:\n raise AttributeError(entry_name)\n\n # After traversal post traversal hooks aren't available anymore\n del self._post_traverse\n\n request['PUBLISHED'] = parents.pop(0)\n\n # Do authorization checks\n user = groups = None\n i = 0\n\n if 1: # Always perform authentication.\n\n last_parent_index = len(parents)\n if hasattr(object, '__allow_groups__'):\n groups = object.__allow_groups__\n inext = 0\n else:\n inext = None\n for i in range(last_parent_index):\n if hasattr(parents[i], '__allow_groups__'):\n groups = parents[i].__allow_groups__\n inext = i + 1\n break\n\n if inext is not None:\n i = inext\n v = getattr(groups, 'validate', old_validation)\n\n auth = request._auth\n\n if v is old_validation and self.roles is UNSPECIFIED_ROLES:\n # No roles, so if we have a named group, get roles from\n # group keys\n if hasattr(groups, 'keys'):\n self.roles = list(groups.keys())\n else:\n try:\n groups = groups()\n except Exception:\n pass\n try:\n self.roles = list(groups.keys())\n except Exception:\n pass\n\n if groups is None:\n # Public group, hack structures to get it to validate\n self.roles = None\n auth = ''\n\n if v is old_validation:\n user = old_validation(groups, request, auth, self.roles)\n elif self.roles is UNSPECIFIED_ROLES:\n user = v(request, auth)\n else:\n user = v(request, auth, self.roles)\n\n while user is None and i < last_parent_index:\n parent = parents[i]\n i = i + 1\n if hasattr(parent, '__allow_groups__'):\n groups = parent.__allow_groups__\n else:\n continue\n if hasattr(groups, 'validate'):\n v = groups.validate\n else:\n v = old_validation\n if v is old_validation:\n user = old_validation(\n groups, request, auth, self.roles)\n elif self.roles is UNSPECIFIED_ROLES:\n user = v(request, auth)\n else:\n user = v(request, auth, self.roles)\n\n if user is None and self.roles != UNSPECIFIED_ROLES:\n response.unauthorized()\n\n if user is not None:\n if validated_hook is not None:\n validated_hook(self, user)\n request['AUTHENTICATED_USER'] = user\n request['AUTHENTICATION_PATH'] = '/'.join(steps[:-i])\n\n # Remove http request method from the URL.\n request['URL'] = URL\n\n # Run post traversal hooks\n if post_traverse:\n result = exec_callables(post_traverse)\n if result is not None:\n object = result\n\n return object", "def __iter__(self):\n element = self\n\n while element.HasField(\"pathtype\"):\n yield element\n\n if element.HasField(\"nested_path\"):\n element = element.nested_path\n else:\n break", "def build_tree_helper(x, n, d, max_d, name=defaultname):\n ret = {}\n ret['name'] = name(x)\n if d == max_d:\n return ret\n children = collatz.children(x, n)\n if x == 1:\n children = children[1:]\n if children:\n ret['children'] = [build_tree_helper(x, n, d + 1, max_d, name) for x in children]\n return ret", "def __init__(self, \r\n initial_concept_uri=None, \r\n lang=None, \r\n broader=True, \r\n narrower=False, \r\n verbose=False,\r\n refresh=False):\r\n def get_cached_skos_option_dict():\r\n '''\r\n Helper function to retrieve cached skos_option_dict\r\n '''\r\n cached_skos_option_dict_path = os.path.join(self.cache_dir, 'skos_options.yaml')\r\n try:\r\n cached_skos_option_dict_file = open(cached_skos_option_dict_path, 'r')\r\n cached_skos_option_dict = yaml.load(cached_skos_option_dict_file)\r\n cached_skos_option_dict_file.close()\r\n except:\r\n cached_skos_option_dict = {}\r\n \r\n return cached_skos_option_dict\r\n \r\n # Start of constructor\r\n assert narrower or broader, 'Need at least one of \"broader\" or \"narrower\" set to True in order to build concept trees'\r\n \r\n self.fcache_dir = os.path.join(tempfile.gettempdir(), 'concept_hierarchy')\r\n \r\n self.lang = lang or 'en'\r\n self.narrower = narrower\r\n self.broader = broader\r\n self.verbose = verbose\r\n \r\n self.skos_option_dict = {'altLabels': True, \r\n 'narrower': narrower, \r\n 'broader': broader,\r\n 'lang': lang\r\n } \r\n \r\n # Force refresh if SKOS options have changed\r\n self.refresh = refresh or (self.skos_option_dict != get_cached_skos_option_dict()) \r\n\r\n self.concept_fetcher = ConceptFetcher(self.skos_option_dict)\r\n \r\n self.concept_registry = {}\r\n \r\n if self.refresh:\r\n if self.verbose:\r\n print 'Refreshing disk cache'\r\n else:\r\n self.load() \r\n \r\n if initial_concept_uri:\r\n self.get_concept_from_uri(initial_concept_uri) # Build tree around initial URI if specified\r", "def _gen_test_tree_6():\n tree = BinaryNode(20)\n tree.left = BinaryNode(10)\n tree.right = BinaryNode(30)\n tree.left.right = BinaryNode(25)\n return tree", "def __getitem__(self, object):\n # check for previously unknown object\n if object not in self.parents:\n self.parents[object] = object\n self.weights[object] = 1\n return object\n\n # find path of objects leading to the root\n path = [object]\n root = self.parents[object]\n while root != path[-1]:\n path.append(root)\n root = self.parents[root]\n\n # compress the path and return\n for ancestor in path:\n self.parents[ancestor] = root\n return root", "def get_all_paths_containing_string_in_nested_objects(object_ut, target_str, _result, max_depth=2, _path_string=\"\", _current_depth=0):\n tuple_inspected = inspect.getmembers(object_ut)\n destructive_callables = [\"__clear__\", \"__setattr__\", \"__init__\", \"__init_subclass__\", \"__delattr__\", \"__call__\"]\n path_value = namedtuple(\"path_value\", [\"locator\", \"value\"])\n\n _current_depth += 1\n if _current_depth > max_depth:\n return _result\n\n if hasattr(tuple_inspected, \"__iter__\"):\n for v in tuple_inspected:\n try:\n attr = getattr(eval(\"object_ut\" + _path_string), v[0])\n\n if callable(attr) and (v[0] not in destructive_callables):\n postfix = \"()\"\n candidate_str = str(attr())\n else:\n postfix = \"\"\n candidate_str = str(attr)\n\n if target_str in candidate_str:\n _result.append(path_value(_path_string + \".\" + str(v[0]) + postfix, str(v[1]) + postfix))\n if (_current_depth + 1) <= max_depth:\n _result = get_all_paths_containing_string_in_nested_objects(attr, target_str, _result=_result,\n _path_string=_path_string + \".\" + v[0] + postfix,\n _current_depth=_current_depth)\n\n except:\n # Many exceptions can be expected here\n # as this evaluates almost all attributes of a given object without knowing much about them.\n pass\n return _result", "def walk(self):\n current = self\n yield current\n while current.parent:\n current = current.parent\n yield current", "def test_team_template_folders_id_children_get(self):\n pass", "def __init__(self):\n self.tree = {}", "def traverse(\n self, node, key=ROOT_NODE, parents=None, cb=print, context=None, depth=0\n ):\n\n # Trim parents breadcrumb as 4 will suffice.\n parents = parents[-4:] if parents else []\n\n # Unwind items as a dict or an enumerated list\n # to simplify traversal.\n if isinstance(node, (dict, list)):\n valuelist = node.items() if isinstance(node, dict) else enumerate(node)\n if key is not ROOT_NODE:\n parents.append(key)\n parents.append(node)\n for k, i in valuelist:\n self.traverse(i, k, parents, cb, context, depth=depth + 1)\n return\n\n # Resolve HTTP references adding fragments\n # to 'schema', 'headers' or 'parameters'\n do_traverse, new_context = self.check_traverse_and_set_context(key, node)\n # If the context changes, update the global pointer too.\n # TODO: we would eventually get rid of self.context completely.\n if new_context:\n self.context = new_context\n context = new_context\n\n log.debug(\"test node context %r, %r, %r\", key, node, do_traverse)\n\n if do_traverse:\n ancestor, needle = parents[-3:-1]\n # log.info(f\"replacing: {needle} in {ancestor} with ref {node}. Parents are {parents}\")\n ancestor[needle] = cb(key, node, context)\n\n # Get the component where to store the given item.\n component_name = self.get_component_name(needle, parents)\n\n # Use a pre and post traversal functions.\n # - before: append the reference to yaml_components.\n # - traverse\n # - after: deepcopy the resulting item in the yaml_components\n # then replace it with the reference in the specs\n if component_name:\n # log.info(f\"needle {needle} in components_map.\")\n host, fragment = urldefrag(node)\n fragment = basename(fragment.strip(\"/\"))\n self.yaml_components[component_name][fragment] = ancestor[needle]\n\n if isinstance(ancestor[needle], (dict, list)):\n self.traverse(\n ancestor[needle], key, parents, cb, context, depth=depth + 1\n )\n\n if component_name:\n new_anchor = \"#\" + join(\"/components\", component_name, fragment)\n log.debug(\"setting new anchor: %r\", new_anchor)\n\n # Now the node is fully resolved. I can replace it with the\n # Deepcopy...\n self.yaml_components[component_name][fragment] = deepcopy(\n ancestor[needle]\n )\n\n # ... and update the reference in the original part.\n if needle == \"$ref\":\n parents[-1][needle] = new_anchor\n else:\n ancestor[needle] = {\"$ref\": new_anchor}" ]
[ "0.63169825", "0.577359", "0.5654486", "0.5466749", "0.5373837", "0.52702874", "0.5185981", "0.517904", "0.50982857", "0.50975597", "0.5090196", "0.5065294", "0.50485235", "0.50385857", "0.503404", "0.49969995", "0.4953721", "0.4944381", "0.49171883", "0.4908716", "0.49081615", "0.49032056", "0.49007183", "0.48865217", "0.48819378", "0.48814753", "0.48796782", "0.48725304", "0.48721215", "0.48710334", "0.48596057", "0.48466608", "0.48426533", "0.483267", "0.48283136", "0.48234183", "0.48190716", "0.4817447", "0.4804603", "0.48026103", "0.47854468", "0.4774788", "0.4751666", "0.47498792", "0.4737045", "0.47365338", "0.47362453", "0.4733651", "0.47325674", "0.47277603", "0.4724335", "0.47243068", "0.4712056", "0.4711474", "0.47080684", "0.46928048", "0.46905252", "0.46874917", "0.4672956", "0.46694985", "0.4639887", "0.46374893", "0.46258655", "0.46182436", "0.46178558", "0.46016207", "0.45890632", "0.45860258", "0.45844164", "0.45836812", "0.45807514", "0.4579608", "0.4577156", "0.4568767", "0.45562539", "0.4555326", "0.4535235", "0.45344803", "0.45333704", "0.45328733", "0.45176533", "0.4517449", "0.4517086", "0.45123965", "0.45098644", "0.45092526", "0.4507609", "0.44983444", "0.44973588", "0.4493638", "0.4491855", "0.44896445", "0.44895384", "0.4485766", "0.44818097", "0.44726947", "0.44718555", "0.44698447", "0.44657505", "0.4464082" ]
0.6471344
0
Builds the Property Spec.
def build_property_spec(client_factory, type="VirtualMachine", properties_to_collect=["name"], all_properties=False): property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = all_properties property_spec.pathSet = properties_to_collect property_spec.type = type return property_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_property_filter_spec(client_factory, property_specs, object_specs):\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_filter_spec.propSet = property_specs\r\n property_filter_spec.objectSet = object_specs\r\n return property_filter_spec", "def build(self, spec, prefix):\n make()", "def get_prop_spec(client_factory, spec_type, properties):\r\n prop_spec = client_factory.create('ns0:PropertySpec')\r\n prop_spec.type = spec_type\r\n prop_spec.pathSet = properties\r\n return prop_spec", "def get_prop_spec(client_factory, spec_type, properties):\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec", "def _build_pod_spec(self):\n logger.debug(\"Building Pod Spec\")\n crds = []\n try:\n crds = [\n yaml.load(Path(f).read_text())\n for f in [\n \"files/configs.config.gatekeeper.sh.yaml\",\n \"files/constrainttemplates.templates.gatekeeper.sh.yaml\",\n \"files/constraintpodstatuses.status.gatekeeper.sh.yaml\",\n \"files/constrainttemplatepodstatuses.status.gatekeeper.sh.yaml\",\n ]\n ]\n except yaml.YAMLError as exc:\n logger.error(\"Error in configuration file:\", exc)\n\n crd_objects = [\n CustomResourceDefintion(crd[\"metadata\"][\"name\"], crd[\"spec\"])\n for crd in crds\n ]\n\n config = self.model.config\n spec_template = {}\n with open(\"files/pod-spec.yaml.jinja2\") as fh:\n spec_template = Template(fh.read())\n\n try:\n image_details = self.image.fetch()\n except OCIImageResourceError as e:\n self.model.unit.status = e.status\n return\n\n template_args = {\n \"crds\": crd_objects,\n \"image_details\": image_details,\n \"imagePullPolicy\": config[\"imagePullPolicy\"],\n \"app_name\": self.app.name,\n \"audit_cli_args\": self._audit_cli_args(),\n \"namespace\": os.environ[\"JUJU_MODEL_NAME\"],\n }\n\n spec = yaml.load(spec_template.render(**template_args))\n\n print(f\"Pod spec: {spec}\")\n return spec", "def generate_property_template(self):\n template = {\n \"@id\": \"url or curie of the property\",\n \"@type\": \"rdf:Property\",\n \"rdfs:comment\": \"description of the property\",\n \"rdfs:label\": \"carmel case, should match @id\",\n \"schema:domainIncludes\": {\n \"@id\": \"class which use it as a property, could be list\"\n },\n \"schema:isPartOf\": {\n \"@id\": \"http://schema.biothings.io\"\n },\n \"schema:rangeIncludes\": {\n \"@id\": \"relates a property to a class that constitutes (one of) the expected type(s) for values of the property\"\n }\n }\n return template", "def property_setup(self, properties):\n return properties", "def _create_property_field(property_, alias_dictionary):\n name_for_methods = property_['name_for_methods']\n\n assert property_['default_value'] is not None, \\\n ('MakeComputedStyleBase requires an default value for all fields, none specified '\n 'for property ' + property_['name'])\n\n if property_['field_template'] in alias_dictionary:\n alias_template = property_['field_template']\n for field in alias_dictionary[alias_template]:\n if field != 'name':\n property_[field] = alias_dictionary[alias_template][field]\n\n if property_['field_template'] == 'keyword':\n type_name = property_['type_name']\n default_value = type_name + '::' + enum_value_name(property_['default_value'])\n assert property_['field_size'] is None, \\\n (\"'\" + property_['name'] + \"' is a keyword field, \"\n \"so it should not specify a field_size\")\n size = int(math.ceil(math.log(len(property_['keywords']), 2)))\n elif property_['field_template'] == 'multi_keyword':\n type_name = property_['type_name']\n default_value = type_name + '::' + enum_value_name(property_['default_value'])\n size = len(property_['keywords']) - 1 # Subtract 1 for 'none' keyword\n elif property_['field_template'] == 'external':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = None\n elif property_['field_template'] == 'primitive':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = 1 if type_name == 'bool' else property_[\"field_size\"] # pack bools with 1 bit.\n elif property_['field_template'] == 'pointer':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = None\n else:\n assert property_['field_template'] == 'monotonic_flag', \"Please put a valid value for field_template\"\n type_name = 'bool'\n default_value = 'false'\n size = 1\n\n if property_['wrapper_pointer_name']:\n assert property_['field_template'] in ['pointer', 'external']\n if property_['field_template'] == 'external':\n type_name = '{}<{}>'.format(property_['wrapper_pointer_name'], type_name)\n\n return Field(\n 'property',\n name_for_methods,\n property_name=property_['name'],\n inherited=property_['inherited'],\n independent=property_['independent'],\n type_name=type_name,\n wrapper_pointer_name=property_['wrapper_pointer_name'],\n field_template=property_['field_template'],\n size=size,\n default_value=default_value,\n custom_copy=property_['custom_copy'],\n custom_compare=property_['custom_compare'],\n mutable=property_['mutable'],\n getter_method_name=property_['getter'],\n setter_method_name=property_['setter'],\n initial_method_name=property_['initial'],\n computed_style_custom_functions=property_['computed_style_custom_functions'],\n )", "def render_specification_properties(spec, newline='\\n', ignore_props=None, prepend_items=None, append_items=None):\n\n spec_prop_list = []\n if prepend_items is not None:\n spec_prop_list += prepend_items\n ignore_keys = [] if ignore_props is None else ignore_props\n # Add link properties\n if isinstance(spec, LinkSpec):\n spec_prop_list.append('**Target Type** %s' %\n RSTDocument.get_reference(RSTSectionLabelHelper.get_section_label(\n spec['target_type']),\n spec['target_type']))\n # Add dataset properties\n if isinstance(spec, DatasetSpec):\n if spec.data_type_def is not None and spec.def_key() not in ignore_keys:\n spec_prop_list.append('**Neurodata Type:** %s' % str(spec.data_type_def))\n if spec.data_type_inc is not None and spec.inc_key() not in ignore_keys:\n extend_type = str(spec.data_type_inc)\n spec_prop_list.append('**Extends:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(extend_type),\n extend_type))\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('quantity', None) is not None and 'quantity' not in ignore_keys:\n spec_prop_list.append('**Quantity:** %s' % SpecToRST.quantity_to_string(spec['quantity']))\n if spec.get('dtype', None) is not None and 'dtype' not in ignore_keys:\n spec_prop_list.append('**Data Type:** %s' % SpecToRST.render_data_type(spec['dtype']))\n if spec.get('dims', None) is not None and 'dims' not in ignore_keys:\n spec_prop_list.append('**Dimensions:** %s' % str(spec['dims']))\n if spec.get('shape', None) is not None and 'shape' not in ignore_keys:\n spec_prop_list.append('**Shape:** %s' % str(spec['shape']))\n if spec.get('linkable', None) is not None and 'linnkable' not in ignore_keys:\n spec_prop_list.append('**Linkable:** %s' % str(spec['linkable']))\n # Add group properties\n if isinstance(spec, GroupSpec):\n if spec.data_type_def is not None and spec.def_key() not in ignore_keys:\n ntype = str(spec.data_type_def)\n spec_prop_list.append('**Neurodata Type:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(ntype),\n ntype))\n if spec.data_type_inc is not None and spec.inc_key() not in ignore_keys:\n extend_type = str(spec.data_type_inc)\n spec_prop_list.append('**Extends:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(extend_type),\n extend_type))\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('quantity', None) is not None and 'quantity' not in ignore_keys:\n spec_prop_list.append('**Quantity:** %s' % SpecToRST.quantity_to_string(spec['quantity']))\n if spec.get('linkable', None) is not None and 'linkable' not in ignore_keys:\n spec_prop_list.append('**Linkable:** %s' % str(spec['linkable']))\n # Add attribute spec properites\n if isinstance(spec, AttributeSpec):\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('dtype', None) is not None and 'dtype' not in ignore_keys:\n spec_prop_list.append('**Data Type:** %s' % SpecToRST.render_data_type(spec['dtype']))\n if spec.get('dims', None) is not None and 'dims' not in ignore_keys:\n spec_prop_list.append('**Dimensions:** %s' % str(spec['dims']))\n if spec.get('shape', None) is not None and 'shape' not in ignore_keys:\n spec_prop_list.append('**Shape:** %s' % str(spec['shape']))\n if spec.get('required', None) is not None and 'required' not in ignore_keys:\n spec_prop_list.append('**Required:** %s' % str(spec['required']))\n if spec.get('value', None) is not None and 'value' not in ignore_keys:\n spec_prop_list.append('**Value:** %s' % str(spec['value']))\n if spec.get('default_value', None) is not None and 'default_value' not in ignore_keys:\n spec_prop_list.append('**Default Value:** %s' % str(spec['default_value']))\n\n # Add common properties\n if spec.get('default_name', None) is not None:\n spec_prop_list.append('**Default Name:** %s' % str(spec['default_name']))\n if spec.get('name', None) is not None:\n spec_prop_list.append('**Name:** %s' % str(spec['name']))\n\n # Add custom items if necessary\n if append_items is not None:\n spec_prop_list += append_items\n\n # Render the specification properties list\n spec_doc = ''\n if len(spec_prop_list) > 0:\n spec_doc += newline\n for dp in spec_prop_list:\n spec_doc += newline + '- ' + dp\n spec_doc += newline\n # Return the rendered list\n return spec_doc", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()", "def _build_properties(self, k, v, definition):\n\n if isinstance(v, schema.Map):\n newdef = self._create_section(definition, k, term=k)\n\n if v.schema is None:\n # if it's a map for arbritary values, only include description\n field = nodes.line('', v.description)\n newdef.append(field)\n return\n\n newdeflist = self._create_def_list(newdef)\n\n sorted_schema = sorted(v.schema.items(),\n key=cmp_to_key(self._sort_by_type))\n for key, value in sorted_schema:\n self._build_properties(key, value, newdeflist)\n elif isinstance(v, schema.List):\n newdef = self._create_section(definition, k, term=k)\n\n # identify next section as list properties\n field = nodes.line()\n emph = nodes.emphasis('', 'List properties:')\n field.append(emph)\n newdef.append(field)\n\n newdeflist = self._create_def_list(newdef)\n\n self._build_properties('**', v.schema['*'], newdeflist)\n else:\n newdef = self._create_section(definition, k, term=k)\n if 'description' in v:\n field = nodes.line('', v['description'])\n newdef.append(field)\n else:\n field = nodes.line('', '++')\n newdef.append(field)", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['transform_fn'] = self.transform_or_spec\n if self._also_track_spec is not None:\n specs['also_track'] = self._also_track_spec\n return specs", "def create_property(self, key, prop):\n\n setting = self.new_property(key, prop)\n setting.create()\n return setting", "def build_property(value_token: ValueToken) -> property:\n def caller(_: Any) -> Any:\n return value_token.get_value()\n return property(caller)", "def __init__(self, spec):\n self.spec = spec", "def test_build_property(self):\n v1 = versions.Version(version='1.2.3.4', name='foo')\n expected = 4\n\n self.assertEqual(v1.build, expected)", "def _makeProperty( key, value ):\r\n property = PropertyValue()\r\n property.Name = key\r\n property.Value = value\r\n return property", "def _create_properties(self): # pylint: disable=no-self-use\n properties = {}\n properties[\"product\"] = \"eventhub.python\"\n properties[\"version\"] = __version__\n properties[\"framework\"] = \"Python {}.{}.{}\".format(*sys.version_info[0:3])\n properties[\"platform\"] = sys.platform\n return properties", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['bijector'] = self.transform_or_spec\n return specs", "def _determine_properties(self, paramdict):\n for var in paramdict:\n if is_dimensionless(paramdict[var]):\n self._all_params_unit[var] = \"none\"\n yield lems.Property(var, \"none\")\n else:\n dim = _determine_dimension(paramdict[var])\n self._all_params_unit[var] = dim\n yield lems.Property(var, dim)", "def build_specfile_header(spec):\n str = \"\"\n\n # first the mandatory sections\n mandatory_header_fields = {\n 'NAME' : '%%define name %s\\nName: %%{name}\\n',\n 'VERSION' : '%%define version %s\\nVersion: %%{version}\\n',\n 'PACKAGEVERSION' : '%%define release %s\\nRelease: %%{release}\\n',\n 'X_RPM_GROUP' : 'Group: %s\\n',\n 'SUMMARY' : 'Summary: %s\\n',\n 'LICENSE' : 'License: %s\\n',\n }\n\n str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )\n\n # now the optional tags\n optional_header_fields = {\n 'VENDOR' : 'Vendor: %s\\n',\n 'X_RPM_URL' : 'Url: %s\\n',\n 'SOURCE_URL' : 'Source: %s\\n',\n 'SUMMARY_' : 'Summary(%s): %s\\n',\n 'ARCHITECTURE' : 'BuildArch: %s\\n',\n 'X_RPM_DISTRIBUTION' : 'Distribution: %s\\n',\n 'X_RPM_ICON' : 'Icon: %s\\n',\n 'X_RPM_PACKAGER' : 'Packager: %s\\n',\n 'X_RPM_GROUP_' : 'Group(%s): %s\\n',\n\n 'X_RPM_REQUIRES' : 'Requires: %s\\n',\n 'X_RPM_PROVIDES' : 'Provides: %s\\n',\n 'X_RPM_CONFLICTS' : 'Conflicts: %s\\n',\n 'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\\n',\n\n 'X_RPM_SERIAL' : 'Serial: %s\\n',\n 'X_RPM_EPOCH' : 'Epoch: %s\\n',\n 'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\\n',\n 'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\\n',\n 'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\\n',\n 'X_RPM_PREFIX' : 'Prefix: %s\\n',\n\n # internal use\n 'X_RPM_BUILDROOT' : 'BuildRoot: %s\\n',\n }\n\n # fill in default values:\n # Adding a BuildRequires renders the .rpm unbuildable under systems which\n # are not managed by rpm, since the database to resolve this dependency is\n # missing (take Gentoo as an example)\n #if 'X_RPM_BUILDREQUIRES' not in spec:\n # spec['X_RPM_BUILDREQUIRES'] = 'scons'\n\n if 'X_RPM_BUILDROOT' not in spec:\n spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'\n\n str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )\n\n # Add any extra specfile definitions the user may have supplied.\n # These flags get no processing, they are just added.\n # github #3164: if we don't turn off debug package generation\n # the tests which build packages all fail. If there are no\n # extra flags, default to adding this one. If the user wants\n # to turn this back on, supply the flag set to None.\n\n if 'X_RPM_EXTRADEFS' not in spec:\n spec['X_RPM_EXTRADEFS'] = ['%global debug_package %{nil}']\n for extra in spec['X_RPM_EXTRADEFS']:\n str += extra + '\\n'\n\n return str", "def write_properties(self, prop_filename):\n # Collect list of all keys in self.plats that have True values,\n # but change \"windows\" to \"win64\" because build-sanity is annoying.\n sanity_plats = [\n (x if x != \"windows\" else \"win64\")\n for x in self.plats.keys() if self.plats[x]\n ]\n with open(prop_filename, \"w\") as prop:\n prop.write(\"CURRENT_BUILD_NUMBER={}\\n\".format(self.bld_num))\n prop.write(\"VERSION={}\\n\".format(self.version))\n prop.write(\"DISTROS={}\\n\".format(\" \".join(sanity_plats)))\n prop.write(\"TESTRUNNER_BRANCH={}\\n\".format(self.testrunner_branch))\n if self.use_magma:\n prop.write(\"EXTRA_TEST_PARAMS={}\\n\".format(\"bucket_storage=magma\"))", "def build_specfile_sections(spec):\n str = \"\"\n\n mandatory_sections = {\n 'DESCRIPTION' : '\\n%%description\\n%s\\n\\n', }\n\n str = str + SimpleTagCompiler(mandatory_sections).compile( spec )\n\n optional_sections = {\n 'DESCRIPTION_' : '%%description -l %s\\n%s\\n\\n',\n 'CHANGELOG' : '%%changelog\\n%s\\n\\n',\n 'X_RPM_PREINSTALL' : '%%pre\\n%s\\n\\n',\n 'X_RPM_POSTINSTALL' : '%%post\\n%s\\n\\n',\n 'X_RPM_PREUNINSTALL' : '%%preun\\n%s\\n\\n',\n 'X_RPM_POSTUNINSTALL' : '%%postun\\n%s\\n\\n',\n 'X_RPM_VERIFY' : '%%verify\\n%s\\n\\n',\n\n # These are for internal use but could possibly be overridden\n 'X_RPM_PREP' : '%%prep\\n%s\\n\\n',\n 'X_RPM_BUILD' : '%%build\\n%s\\n\\n',\n 'X_RPM_INSTALL' : '%%install\\n%s\\n\\n',\n 'X_RPM_CLEAN' : '%%clean\\n%s\\n\\n',\n }\n\n # Default prep, build, install and clean rules\n # TODO: optimize those build steps, to not compile the project a second time\n if 'X_RPM_PREP' not in spec:\n spec['X_RPM_PREP'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"' + '\\n%setup -q'\n\n if 'X_RPM_BUILD' not in spec:\n spec['X_RPM_BUILD'] = '[ ! -e \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && mkdir \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_INSTALL' not in spec:\n spec['X_RPM_INSTALL'] = 'scons --install-sandbox=\"$RPM_BUILD_ROOT\" \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_CLEAN' not in spec:\n spec['X_RPM_CLEAN'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"'\n\n str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )\n\n return str", "def test_should_return_correct_gremlin_for_property(self):\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).functional(true).makePropertyKey()'\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).functional(false).makePropertyKey()'\r\n self.property_spec['locking'] = False\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).functional(false).indexed().makePropertyKey()'\r\n self.property_spec['locking'] = False\r\n self.property_spec['indexed'] = True\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).makePropertyKey()'\r\n self.property_spec['functional'] = False\r\n self.property_spec['indexed'] = False\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).unique().makePropertyKey()'\r\n self.property_spec['functional'] = False\r\n self.property_spec['indexed'] = False\r\n self.property_spec['unique'] = True\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected, prop.gremlin", "def __init__(self, property_name='', *protocol_ids):\n\n self._full_path = ''\n\n if len(property_name) > 0 or len(protocol_ids) > 0:\n self._from_components(property_name, *protocol_ids)\n\n else:\n self._full_path = '{}'.format(ProtocolPath.property_separator)", "def __init__(self, value):\n if isinstance(value, bool):\n ptr = self.ffi.chfl_property_bool(c_bool(value))\n elif isinstance(value, (float, int)):\n ptr = self.ffi.chfl_property_double(c_double(value))\n elif isinstance(value, str):\n ptr = self.ffi.chfl_property_string(value.encode(\"utf8\"))\n elif _is_vector3d(value):\n value = chfl_vector3d(value[0], value[1], value[2])\n ptr = self.ffi.chfl_property_vector3d(value)\n else:\n raise ChemfilesError(\n f\"can not create a Property with a value of type '{type(value)}'\"\n )\n\n super(Property, self).__init__(ptr, is_const=False)", "def __init__(self, jsondict=None, strict=True):\n \n self.type = None\n \"\"\" Code that specifies the property DeviceDefinitionPropetyCode\n (Extensible).\n Type `CodeableConcept` (represented as `dict` in JSON). \"\"\"\n \n self.valueCode = None\n \"\"\" Property value as a code, e.g., NTP4 (synced to NTP).\n List of `CodeableConcept` items (represented as `dict` in JSON). \"\"\"\n \n self.valueQuantity = None\n \"\"\" Property value as a quantity.\n List of `Quantity` items (represented as `dict` in JSON). \"\"\"\n \n super(DeviceDefinitionProperty, self).__init__(jsondict=jsondict, strict=strict)", "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def get_specification(self) -> Dict:\n specification = {\n 'version': VERSION,\n 'metadata': {\n 'twoColumn': True,\n 'layout': self.autolayout\n },\n 'nodes': []\n }\n\n def strip_io(io_list: list, direction) -> list:\n \"\"\"\n Strips every input/output from metadata and leaves only\n `name` and `type` keys.\n \"\"\"\n return [\n {\n 'name': io['name'],\n 'type': io['type'],\n 'direction': direction\n }\n for io in io_list\n ]\n\n toremove = set()\n for key, node in self.nodes.items():\n try:\n node_cls = load_class(node.cls_name)\n except (ModuleNotFoundError, ImportError, Exception) as err:\n msg = f'Could not add {node_cls}. Reason:'\n _LOGGER.warn('-' * len(msg))\n _LOGGER.warn(msg)\n _LOGGER.warn(err)\n _LOGGER.warn('-' * len(msg))\n toremove.add(key)\n continue\n parameterschema = node_cls.form_parameterschema()\n\n properties = []\n for name, props in parameterschema['properties'].items():\n new_property = {'name': name}\n\n if 'default' in props:\n new_property['default'] = props['default']\n\n if 'description' in props:\n new_property['description'] = props['description']\n\n def add_default(default_val):\n if new_property.get('default') is None:\n new_property['default'] = default_val\n\n # Case for an input with range defined\n if 'enum' in props:\n new_property['type'] = 'select'\n new_property['values'] = list(map(str, props['enum']))\n add_default(new_property['values'][0])\n # Case for a single value input\n elif 'type' in props:\n if 'array' in props['type']:\n new_property['type'] = 'list'\n if 'items' in props and 'type' in props['items']:\n dtype = props['items']['type']\n new_property['dtype'] = dtype\n add_default([])\n elif 'boolean' in props['type']:\n new_property['type'] = 'checkbox'\n add_default(False)\n elif 'string' in props['type']:\n new_property['type'] = 'text'\n add_default('')\n elif 'integer' in props['type']:\n new_property['type'] = 'integer'\n add_default(0)\n elif 'number' in props['type']:\n new_property['type'] = 'number'\n add_default(0)\n elif 'object' in props['type']:\n # Object arguments should be defined in specification\n # as node inputs, rather than properties\n new_property = None\n else:\n new_property['type'] = 'text'\n add_default('')\n # If no type is specified then text is used\n else:\n new_property['type'] = 'text'\n add_default('')\n\n if new_property is not None:\n properties.append(new_property)\n\n specification['nodes'].append({\n 'name': node.name,\n 'type': node.type,\n 'category': node.category,\n 'properties': properties,\n 'interfaces': strip_io(\n self.io_mapping[node.type]['inputs'],\n 'input'\n ) + strip_io(\n self.io_mapping[node.type]['outputs'],\n 'output'\n )\n })\n\n for key in toremove:\n del self.nodes[key]\n return specification", "def make_pod_spec(self):\n spec = {\n 'containers': [{\n 'name': self.framework.model.app.name,\n 'imageDetails': {\n },\n 'ports': [{\n 'containerPort':\n self.framework.model.config['advertised-port'],\n 'protocol': 'TCP',\n }],\n }],\n }\n return spec", "def __init__(self, pathspec, properties={}):\n import numpy\n self.pathspec = pathspec\n super(ArraySpec,self).__init__(numpy.ndarray)\n self.properties = OrderedDict(properties)", "def __init__(self, property_id=None, name=None, address=None, ratings=None, location=None, phone=None, fax=None, category=None, business_model=None, rank=None, checkin=None, checkout=None, fees=None, policies=None, attributes=None, amenities=None, images=None, onsite_payments=None, rooms=None, rates=None, dates=None, descriptions=None, statistics=None, airports=None, registry_number=None, themes=None, all_inclusive=None, tax_id=None, chain=None, brand=None, spoken_languages=None, multi_unit=None): # noqa: E501 # noqa: E501\n\n self._property_id = None\n self._name = None\n self._address = None\n self._ratings = None\n self._location = None\n self._phone = None\n self._fax = None\n self._category = None\n self._business_model = None\n self._rank = None\n self._checkin = None\n self._checkout = None\n self._fees = None\n self._policies = None\n self._attributes = None\n self._amenities = None\n self._images = None\n self._onsite_payments = None\n self._rooms = None\n self._rates = None\n self._dates = None\n self._descriptions = None\n self._statistics = None\n self._airports = None\n self._registry_number = None\n self._themes = None\n self._all_inclusive = None\n self._tax_id = None\n self._chain = None\n self._brand = None\n self._spoken_languages = None\n self._multi_unit = None\n self.discriminator = None\n\n if property_id is not None:\n self.property_id = property_id\n if name is not None:\n self.name = name\n if address is not None:\n self.address = address\n if ratings is not None:\n self.ratings = ratings\n if location is not None:\n self.location = location\n if phone is not None:\n self.phone = phone\n if fax is not None:\n self.fax = fax\n if category is not None:\n self.category = category\n if business_model is not None:\n self.business_model = business_model\n if rank is not None:\n self.rank = rank\n if checkin is not None:\n self.checkin = checkin\n if checkout is not None:\n self.checkout = checkout\n if fees is not None:\n self.fees = fees\n if policies is not None:\n self.policies = policies\n if attributes is not None:\n self.attributes = attributes\n if amenities is not None:\n self.amenities = amenities\n if images is not None:\n self.images = images\n if onsite_payments is not None:\n self.onsite_payments = onsite_payments\n if rooms is not None:\n self.rooms = rooms\n if rates is not None:\n self.rates = rates\n if dates is not None:\n self.dates = dates\n if descriptions is not None:\n self.descriptions = descriptions\n if statistics is not None:\n self.statistics = statistics\n if airports is not None:\n self.airports = airports\n if registry_number is not None:\n self.registry_number = registry_number\n if themes is not None:\n self.themes = themes\n if all_inclusive is not None:\n self.all_inclusive = all_inclusive\n if tax_id is not None:\n self.tax_id = tax_id\n if chain is not None:\n self.chain = chain\n if brand is not None:\n self.brand = brand\n if spoken_languages is not None:\n self.spoken_languages = spoken_languages\n if multi_unit is not None:\n self.multi_unit = multi_unit", "def _gen_polarion_property_file(test_attrs, test_attrs_values,\n test_run, test_case_id,\n property_file=None):\n test_keys = [\"polarion-testcase-id\"] * len(test_case_id)\n properties_mapping = OrderedDict()\n properties_mapping[\"properties\"] = {key: value for key, value in\n zip(test_attrs, test_attrs_values)\n if value is not None\n }\n properties_mapping[\"casemap\"] = {\n test_run: [\n {key: value} for key, value in zip(test_keys, test_case_id)\n ]\n }\n\n if property_file is None:\n property_file = \"/tmp/{}.json\".format(test_run)\n\n with open(property_file, 'w') as prop_file:\n dump(properties_mapping, prop_file, sort_keys=False, indent=1)\n\n return property_file", "def test_property_cols():\n image_file = 'input/D00572501_z_c01_r3624p01_immasked.fits.fz'\n cat_file = 'input/D00572501_z_c01_r5473p01_piff.fits'\n psf_file = os.path.join('output','test_property_cols.piff')\n hsm_file = os.path.join('output','test_property_cols_hsm.fits')\n\n nstars = 25\n scale = 0.26\n size = 15\n order = 1\n stamp_size = 25\n\n config = {\n 'input' : {\n 'nstars': nstars,\n 'image_file_name' : image_file,\n 'image_hdu' : 1,\n 'weight_hdu' : 3,\n 'badpix_hdu' : 2,\n 'cat_file_name' : cat_file,\n 'x_col' : 'XWIN_IMAGE',\n 'y_col' : 'YWIN_IMAGE',\n 'sky_col' : 'BACKGROUND',\n 'stamp_size' : stamp_size,\n 'ra' : 'TELRA',\n 'dec' : 'TELDEC',\n 'gain' : 'GAINA',\n 'satur' : 'SATURATA',\n 'chipnum': 1,\n # Select ones with a variety of dtypes.\n 'property_cols' : ['SOURCE_ID', 'GI_COLOR', 'FLAGS', 'FLAG_COLOR', 'SPREAD_MODEL'],\n },\n 'select' : {\n 'type': 'Properties',\n 'where': 'np.abs(SPREAD_MODEL) < 3.e-4',\n\n 'reserve_frac' : 0.2,\n 'seed' : 1234,\n },\n 'psf' : {\n 'model' : {\n 'type' : 'PixelGrid',\n 'scale' : scale,\n 'size' : size,\n 'interp' : 'Lanczos(5)',\n },\n 'interp' : {\n 'type' : 'BasisPolynomial',\n 'order' : [1, 1, 1],\n 'keys': ['u', 'v', 'GI_COLOR'],\n },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats': [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n },\n ],\n },\n }\n\n piff.piffify(config)\n hsm = fitsio.read(hsm_file)\n cat = fitsio.read(cat_file)\n\n print('hsm dtype = ',hsm.dtype)\n print('cat dtype = ',cat.dtype)\n\n for key in hsm.dtype.names:\n print(key)\n if key in cat.dtype.names:\n assert hsm[key].dtype.type == cat[key].dtype.type\n elif key == 'reserve':\n assert hsm[key].dtype.type == np.dtype(bool).type\n elif key.startswith('flag'):\n assert hsm[key].dtype.type == np.dtype(int).type\n elif key == 'sky':\n # This one is read from the input catalog, but renamed\n assert hsm[key].dtype.type == np.float32\n else:\n assert hsm[key].dtype.type == np.dtype(float).type\n\n # Check that drawing the image works without specifying chipnum.\n # This is ok so long as the input is really only a single chip.\n # cf. Issue #140\n psf = piff.read(psf_file)\n im1 = psf.draw(35, 40, center=True, GI_COLOR=1)\n\n # If the input field didn't include chipnum, then it makes no difference for a single chip.\n del config['input']['chipnum']\n piff.piffify(config)\n psf = piff.read(psf_file)\n im2 = psf.draw(35, 40, center=True, GI_COLOR=1)\n assert im1 == im2", "def properties(self) -> Optional[pulumi.Input['CosmosDBSpecPropertiesArgs']]:\n return pulumi.get(self, \"properties\")", "def _create_properties_table(font, format, base):\n propstrings = bytearray()\n xlfd_props = create_xlfd_properties(font)\n xlfd_props['FONT'] = create_xlfd_name(xlfd_props)\n props = []\n props_struct = base.Struct(**_PROPS)\n for key, value in xlfd_props.items():\n prop = props_struct(\n name_offset=len(propstrings),\n isStringProp=isinstance(value, str),\n )\n propstrings += key.encode('ascii', 'replace') + b'\\0'\n if prop.isStringProp:\n prop.value = len(propstrings)\n value = from_quoted_string(value)\n propstrings += value.encode('ascii', 'replace') + b'\\0'\n else:\n prop.value = int(value)\n props.append(prop)\n table_bytes = (\n bytes(le.uint32(format))\n + bytes(base.uint32(len(props)))\n + bytes((props_struct * len(props))(*props))\n # pad to next int32 boundary\n + bytes(0 if len(props)&3 == 0 else 4-(len(props)&3))\n + bytes(base.uint32(len(propstrings)))\n + bytes(propstrings)\n )\n return table_bytes, format", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def get_generic_walker_properties(self, walker):\n etree.SubElement(walker, \"ParameterDeclarations\")\n bounding_box = etree.SubElement(walker, \"BoundingBox\")\n boundbox_center = etree.SubElement(bounding_box, \"Center\")\n boundbox_center.set(\"x\", \"1.5\")\n boundbox_center.set(\"y\", \"0.0\")\n boundbox_center.set(\"z\", \"0.9\")\n boundbox_dimemsion = etree.SubElement(bounding_box, \"Dimensions\")\n boundbox_dimemsion.set(\"width\", \"1.0\")\n boundbox_dimemsion.set(\"length\", \"1.0\")\n boundbox_dimemsion.set(\"height\", \"1.8\")\n properties_group = etree.SubElement(walker, \"Properties\")\n properties = etree.SubElement(properties_group, \"Property\")\n properties.set(\"name\", \"type\")\n properties.set(\"value\", \"simulation\")", "def assign_build_props(self, name, dependencies, worker=False):\n props = self.set_properties.copy()\n props[\"virtual_builder_name\"] = name\n props[\"package\"] = name\n props[\"dependencies\"] = dependencies\n if worker:\n props[\"workername\"] = worker\n return [\"build\", props]", "def build(self):\n return self.hyperparams.items()", "def new_property(self, key, prop):\n\n s = self._new_property()\n s.key = key\n s.property_f = prop\n return s", "def test_fields_from_property():\n prop_template = PropertyTemplate(name=\"cookie eating template\", bounds=IntegerBounds(0, 1000))\n cond_template = ConditionTemplate(name=\"Hunger template\",\n bounds=CategoricalBounds([\"hungry\", \"full\", \"peckish\"]))\n prop = Property(name=\"number of cookies eaten\",\n template=prop_template,\n origin='measured',\n value=NominalInteger(27))\n cond = Condition(name=\"hunger level\",\n template=cond_template,\n origin='specified',\n value=NominalCategorical(\"hungry\"))\n\n prop_and_conds = PropertyAndConditions(property=prop, conditions=[cond])\n assert prop_and_conds.name == prop.name\n assert prop_and_conds.template == prop.template\n assert prop_and_conds.origin == prop.origin\n assert prop_and_conds.value == prop.value", "def test_type_builder_handles_nested_properties():\n schema = [\n SchemaObject(\n name=\"ClassWithNestedClass\",\n properties=[\n SchemaObject(\n name=\"nestedValue\",\n properties=[\n SchemaValue(name=\"string_value\", value_type=\"string\"),\n SchemaEnum(\n name=\"enum_value\",\n value_type=\"string\",\n values=[\"hey\", \"new\", \"value\"],\n ),\n ],\n ),\n ],\n )\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 3\n assert build_result[0] == ClassDefinition(\n name=\"ClassWithNestedClass\",\n properties=[\n PropertyDefinition(\n name=\"nested_value\",\n key=\"nestedValue\",\n value_type=\"ClassWithNestedClassNestedValue\",\n known_type=False,\n ),\n ],\n depends_on={\"ClassWithNestedClassNestedValue\"},\n )\n assert build_result[1] == ClassDefinition(\n name=\"ClassWithNestedClassNestedValue\",\n properties=[\n PropertyDefinition(\n name=\"string_value\",\n key=\"string_value\",\n value_type=\"str\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"enum_value\",\n key=\"enum_value\",\n value_type=\"ClassWithNestedClassNestedValueEnumValue\",\n known_type=False,\n ),\n ],\n depends_on={\"ClassWithNestedClassNestedValueEnumValue\"},\n )\n assert build_result[2] == EnumDefinition(\n name=\"ClassWithNestedClassNestedValueEnumValue\",\n values=[(\"HEY\", \"hey\"), (\"NEW\", \"new\"), (\"VALUE\", \"value\")],\n depends_on=set(),\n )", "def make_cake_spec():\n ###############################################################################################\n # Templates\n tmpl = make_cake_templates()\n\n ###############################################################################################\n # Objects\n cake = MaterialSpec(\n name=\"Abstract Cake\",\n template=tmpl[\"Dessert\"],\n process=ProcessSpec(\n name='Icing, in General',\n template=tmpl[\"Icing\"],\n tags=[\n 'spreading'\n ],\n notes='The act of covering a baked output with frosting'\n ),\n file_links=FileLink(\n filename=\"Becky's Butter Cake\",\n url='https://www.landolakes.com/recipe/16730/becky-s-butter-cake/'\n ),\n tags=[\n 'cake::butter cake',\n 'dessert::baked::cake',\n 'iced::chocolate'\n ],\n notes='Butter cake recipe reminiscent of the 1-2-3-4 cake that Grandma may have baked.'\n )\n\n ########################\n frosting = MaterialSpec(\n name=\"Abstract Frosting\",\n template=tmpl[\"Dessert\"],\n process=ProcessSpec(\n name='Mixing Frosting, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining ingredients to make a sweet frosting'\n ),\n tags=[\n 'frosting::chocolate',\n 'topping::chocolate'\n ],\n notes='Chocolate frosting'\n )\n IngredientSpec(\n name=\"{} input\".format(frosting.name),\n tags=list(frosting.tags),\n notes='Seems like a lot of frosting',\n labels=['coating'],\n process=cake.process,\n material=frosting,\n absolute_quantity=NominalReal(nominal=0.751, units='kg')\n )\n\n baked_cake = MaterialSpec(\n name=\"Abstract Baked Cake\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Baking, in General',\n template=tmpl[\"Baking in an oven\"],\n tags=[\n 'oven::baking'\n ],\n notes='Using heat to convert batter into a solid matrix'\n ),\n tags=[\n ],\n notes='The cakey part of the cake'\n )\n IngredientSpec(\n name=\"{} input\".format(baked_cake.name),\n tags=list(baked_cake.tags),\n labels=['substrate'],\n process=cake.process,\n material=baked_cake\n )\n\n ########################\n batter = MaterialSpec(\n name=\"Abstract Batter\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Batter, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The fluid that converts to cake with heat'\n )\n IngredientSpec(\n name=\"{} input\".format(batter.name),\n tags=list(batter.tags),\n labels=['precursor'],\n process=baked_cake.process,\n material=batter\n )\n\n ########################\n wetmix = MaterialSpec(\n name=\"Abstract Wet Mix\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Wet, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining wet ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The wet fraction of a batter'\n )\n IngredientSpec(\n name=\"{} input\".format(wetmix.name),\n tags=list(wetmix.tags),\n labels=['wet'],\n process=batter.process,\n material=wetmix\n )\n\n drymix = MaterialSpec(\n name=\"Abstract Dry Mix\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Dry, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining dry ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The dry fraction of a batter'\n )\n IngredientSpec(\n name=\"{} input\".format(drymix.name),\n tags=list(drymix.tags),\n labels=['dry'],\n process=batter.process,\n material=drymix,\n absolute_quantity=NominalReal(nominal=3.052, units='cups')\n )\n\n ########################\n flour = MaterialSpec(\n name=\"Abstract Flour\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Flour, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing all purpose flour'\n ),\n tags=[\n ],\n notes='All-purpose flour'\n )\n IngredientSpec(\n name=\"{} input\".format(flour.name),\n tags=list(flour.tags),\n labels=['dry'],\n process=drymix.process,\n material=flour,\n volume_fraction=NominalReal(nominal=0.9829, units='') # 3 cups\n )\n\n baking_powder = MaterialSpec(\n name=\"Abstract Baking Powder\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Baking Powder, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing baking powder'\n ),\n tags=[\n ],\n notes='Leavening agent for cake'\n )\n IngredientSpec(\n name=\"{} input\".format(baking_powder.name),\n tags=list(baking_powder.tags),\n labels=['leavening', 'dry'],\n process=drymix.process,\n material=baking_powder,\n volume_fraction=NominalReal(nominal=0.0137, units='') # 2 teaspoons\n )\n\n salt = MaterialSpec(\n name=\"Abstract Salt\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Salt, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing salt'\n ),\n tags=[\n ],\n notes='Plain old NaCl'\n )\n IngredientSpec(\n name=\"{} input\".format(salt.name),\n tags=list(salt.tags),\n labels=['dry', 'seasoning'],\n process=drymix.process,\n material=salt,\n volume_fraction=NominalReal(nominal=0.0034, units='') # 1/2 teaspoon\n )\n\n sugar = MaterialSpec(\n name=\"Abstract Sugar\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Sugar, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing all purpose flour'\n ),\n tags=[\n ],\n notes='Sugar'\n )\n IngredientSpec(\n name=\"{} input\".format(sugar.name),\n tags=list(sugar.tags),\n labels=['wet', 'sweetener'],\n process=wetmix.process,\n material=sugar,\n absolute_quantity=NominalReal(nominal=2, units='cups')\n )\n\n butter = MaterialSpec(\n name=\"Abstract Butter\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Butter, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing butter'\n ),\n tags=[\n ],\n notes='Shortening for making rich, buttery baked goods'\n )\n IngredientSpec(\n name=\"{} input\".format(butter.name),\n tags=list(butter.tags),\n labels=['wet', 'shortening'],\n process=wetmix.process,\n material=butter,\n absolute_quantity=NominalReal(nominal=1, units='cups')\n )\n IngredientSpec(\n name=\"{} input\".format(butter.name),\n tags=list(butter.tags),\n labels=['shortening'],\n process=frosting.process,\n material=butter,\n mass_fraction=NominalReal(nominal=0.1434, units='') # 1/2 c @ 0.911 g/cc\n )\n\n eggs = MaterialSpec(\n name=\"Abstract Eggs\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Eggs, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing eggs'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(eggs.name),\n tags=list(eggs.tags),\n labels=['wet'],\n process=wetmix.process,\n material=eggs,\n absolute_quantity=NominalReal(nominal=4, units='')\n )\n\n vanilla = MaterialSpec(\n name=\"Abstract Vanilla\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Vanilla, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing vanilla'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(vanilla.name),\n tags=list(vanilla.tags),\n labels=['wet', 'flavoring'],\n process=wetmix.process,\n material=vanilla,\n absolute_quantity=NominalReal(nominal=2, units='teaspoons')\n )\n IngredientSpec(\n name=\"{} input\".format(vanilla.name),\n tags=list(vanilla.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=vanilla,\n mass_fraction=NominalReal(nominal=0.0231, units='') # 2 tsp @ 0.879 g/cc\n )\n\n milk = MaterialSpec(\n name=\"Abstract Milk\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Milk, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing milk'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(milk.name),\n tags=list(milk.tags),\n labels=['wet'],\n process=batter.process,\n material=milk,\n absolute_quantity=NominalReal(nominal=1, units='cup')\n )\n IngredientSpec(\n name=\"{} input\".format(milk.name),\n tags=list(milk.tags),\n labels=[],\n process=frosting.process,\n material=milk,\n mass_fraction=NominalReal(nominal=0.0816, units='') # 1/4 c @ 1.037 g/cc\n )\n\n chocolate = MaterialSpec(\n name=\"Abstract Chocolate\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Chocolate, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing chocolate'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(chocolate.name),\n tags=list(chocolate.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=chocolate,\n mass_fraction=NominalReal(nominal=0.1132, units='') # 3 oz.\n )\n\n powder_sugar = MaterialSpec(\n name=\"Abstract Powdered Sugar\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Powdered Sugar, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing powdered sugar'\n ),\n tags=[\n ],\n notes='Granulated sugar mixed with corn starch'\n )\n IngredientSpec(\n name=\"{} input\".format(powder_sugar.name),\n tags=list(powder_sugar.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=powder_sugar,\n mass_fraction=NominalReal(nominal=0.6387, units='') # 4 c @ 30 g/ 0.25 cups\n )\n return cake", "def __init__(self, properties):\n self.attributes = {}\n self.output_info = {}\n for key, node in properties.walk():\n self.attributes[key[1]] = node.get_value().strip(\" '\")", "def __init__(self, sheet_type, properties):\n super(SheetSpec,self).__init__(sheet_type)\n\n if 'level' not in properties:\n raise Exception(\"SheetSpec always requires 'level' property.\")\n\n\n properties = [(k, properties[k]) for k in self.name_ordering\n if k in properties]\n\n self.sheet_type = sheet_type\n self.properties = OrderedDict(properties)", "def build():", "def _buildProcVars (self):\n\t\talias = {val:key for key, val in proc.ALIAS.iteritems()}\n\t\tfor prop in sorted(self.props.keys()):\n\t\t\tval = self.props[prop]\n\t\t\tif not prop in ['id', 'tag', 'tmpdir', 'forks', 'cache', 'workdir', 'echo', 'runner',\n\t\t\t\t\t\t\t'errorhow', 'errorntry', 'defaultSh', 'exportdir', 'exporthow', 'exportow',\n\t\t\t\t\t\t\t'indir', 'outdir', 'length', 'args']:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif prop == 'args':\n\t\t\t\tself.props['procvars']['proc.args'] = val\n\t\t\t\tfor k, v in val.iteritems():\n\t\t\t\t\tself.props['procvars']['proc.args.' + k] = v\n\t\t\t\t\tself.log('%s => %s' % (k, v), 'info', 'p.args')\n\t\t\telse:\n\t\t\t\tself.props['procvars']['proc.' + prop] = val\n\t\t\t\tif alias.has_key (prop): \n\t\t\t\t\tself.props['procvars']['proc.' + alias[prop]] = val\n\t\t\t\t\tself.log ('%s (%s) => %s' % (prop, alias[prop], val), 'info', 'p.props')\n\t\t\t\telse:\n\t\t\t\t\tself.log ('%s => %s' % (prop, val), 'info', 'p.props')", "def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\r\n prop_filter_spec = \\\r\n client_factory.create('ns0:PropertyFilterSpec')\r\n prop_filter_spec.propSet = prop_spec\r\n prop_filter_spec.objectSet = obj_spec\r\n return prop_filter_spec", "def propertyListGenerator(name, cls):\n\n memo = dict()\n\n def propertyValueFromNodeGetter(instance):\n \"\"\"Get the actual property value from an instance.\n\n instance - a ComputedGraph location that the property is tied to.\n \"\"\"\n\n subspace = nodeGetter(instance)\n\n if SynchronousPropertyAccess.SynchronousPropertyAccess.getCurrent() is not None:\n if not subspace.loaded:\n subspace.keyspace.waitLoaded()\n else:\n subspace.keyspace.ensureSubscribed()\n\n val = subspace.value\n\n if val is None:\n return default()\n\n return val[0]\n\n def propertyValueFromNodeSetter(instance, val):\n \"\"\"Set the property value 'name' in instance 'instance' to 'val'\n\n We must be in 'synchronous' mode for this to work. We'll load the\n keyspace if its not loaded.\n \"\"\"\n\n subspace = nodeGetter(instance)\n\n if SynchronousPropertyAccess.SynchronousPropertyAccess.getCurrent() is not None:\n if not subspace.loaded:\n subspace.keyspace.waitLoaded()\n\n if subspace.value != (val,):\n subspace.value = (val,)\n\n def nodeGetter(instance):\n \"\"\"Produces a CGSS.Node.Node object corresponding to this property's value.\n\n We use the hash of the result of the documentGetter function to decide which keyspace\n we want to use, and then we hash the pair (instance, name) to decide which key\n to use.\n \"\"\"\n if (instance, name) not in memo:\n subspace = subspaceFunction(instance)\n\n if subspace is None:\n assert False, \"Instance %s produced an empty subspace\" % instance\n\n memo[(instance,name)] = subspace.subspace(name)\n return memo[(instance,name)]\n\n return [\n (name, ComputedGraph.Property(propertyValueFromNodeGetter,propertyValueFromNodeSetter))\n ]", "def make_pod_spec():\n md = metadata()\n cfg = config()\n\n if cfg.get(\"enable-sidecar\"):\n with open(\"reactive/spec_template_ha.yaml\") as spec_file:\n pod_spec_template = spec_file.read()\n else:\n with open(\"reactive/spec_template.yaml\") as spec_file:\n pod_spec_template = spec_file.read()\n\n data = {\n \"name\": md.get(\"name\"),\n \"docker_image\": cfg.get(\"mongodb-image\"),\n \"sc_docker_image\": cfg.get(\"sidecar-image\"),\n \"pod_labels\": \"juju-app={}\".format(cfg.get(\"advertised-hostname\")),\n }\n\n data.update(cfg)\n return pod_spec_template % data", "def to_spec(self) -> dict[str, typing.Any]:\n spec = {\n \"name\": self.name,\n \"title\": self.title,\n \"comment\": self.comment,\n \"references\": self.references,\n \"institution\": self.institution,\n \"hierarchical\": self.hierarchical,\n \"last_update\": self.last_update.isoformat(),\n }\n if self.version is not None:\n spec[\"version\"] = self.version\n categories = {}\n for cat in self.values():\n code, cat_spec = cat.to_spec()\n categories[code] = cat_spec\n spec[\"categories\"] = categories\n\n return spec", "def build_obj_spec(\n obj_key,\n parameter_dict,\n experiment_name=None,\n obj_type=\"policy\",\n output=\"./policy_yamls/\",\n):\n # Set name via timestamp if not specified\n if obj_type not in [\"policy\", \"estimator\", \"dataset\"]:\n print(\"Invalid type: {}\".format(obj_type))\n return None\n\n if experiment_name == None:\n now = datetime.now()\n current_time = now.strftime(\"%H%M%S\")\n experiment_name = \"experiment_{}\".format(current_time)\n\n # Build dict structure\n obj_dict = {\n \"name\": experiment_name,\n \"type\": obj_type,\n \"key\": obj_key,\n \"parameters\": parameter_dict,\n }\n\n # Set output folder\n output_folder = os.path.join(output, experiment_name)\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n with open(\n os.path.join(output_folder, \"{}_spec.yaml\".format(obj_type)), \"w\"\n ) as file:\n yaml.dump(obj_dict, file)\n\n return obj_dict", "def test_dev_props(name, properties):\n assert properties['x']\n assert properties['y']", "def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec", "def test_properties_get(self):\n pass", "def get_spec(self):\n from schematics.types import ModelType\n spec = {\n 'id': self.name,\n 'description': self.description,\n 'addressable': self.array,\n 'required': self.required,\n }\n if self.type.has_schema:\n spec['schema'] = self.type.get_spec()\n else:\n spec.update(self.type.get_spec())\n\n return spec", "def get_spec(self):\n return {\n 'data': [\n self.data_transform_spec,\n self.edge_transform_spec,\n self.node_group_spec,\n self.edge_group_spec\n ]\n }", "def spec(self):\n return self._spec", "def spec(self):\n return self._spec", "def test_type_builder_handles_specific_non_camel_case_property_names():\n schema = [\n SchemaObject(\n name=\"NonStandardObject\",\n properties=[\n SchemaValue(name=\"baseURL\", value_type=\"string\"),\n SchemaValue(name=\"DEFAULTS\", value_type=\"any\"),\n SchemaValue(name=\"thisPR\", value_type=\"null\"),\n SchemaValue(name=\"instanceID\", value_type=\"number\"),\n ],\n )\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 1\n assert build_result[0] == ClassDefinition(\n name=\"NonStandardObject\",\n properties=[\n PropertyDefinition(\n name=\"base_url\", key=\"baseURL\", value_type=\"str\", known_type=True,\n ),\n PropertyDefinition(\n name=\"defaults\", key=\"DEFAULTS\", value_type=\"Any\", known_type=True,\n ),\n PropertyDefinition(\n name=\"this_pr\", key=\"thisPR\", value_type=\"Any\", known_type=True\n ),\n PropertyDefinition(\n name=\"instance_id\", key=\"instanceID\", value_type=\"int\", known_type=True\n ),\n ],\n depends_on=set(),\n )", "def __init__(self, property_config_info_list=None, object_def_name=None, description=None, search_boost=None, owner_type=None, value_type=None, enum_values=None, namespace=None, editable_in_version=None, blob_mime_type=None, safe_name=None, owner_id=None, array=None, editable_in_microversion=None, id=None, name=None): # noqa: E501 # noqa: E501\n\n self._property_config_info_list = None\n self._object_def_name = None\n self._description = None\n self._search_boost = None\n self._owner_type = None\n self._value_type = None\n self._enum_values = None\n self._namespace = None\n self._editable_in_version = None\n self._blob_mime_type = None\n self._safe_name = None\n self._owner_id = None\n self._array = None\n self._editable_in_microversion = None\n self._id = None\n self._name = None\n self.discriminator = None\n\n if property_config_info_list is not None:\n self.property_config_info_list = property_config_info_list\n if object_def_name is not None:\n self.object_def_name = object_def_name\n if description is not None:\n self.description = description\n if search_boost is not None:\n self.search_boost = search_boost\n if owner_type is not None:\n self.owner_type = owner_type\n if value_type is not None:\n self.value_type = value_type\n if enum_values is not None:\n self.enum_values = enum_values\n if namespace is not None:\n self.namespace = namespace\n if editable_in_version is not None:\n self.editable_in_version = editable_in_version\n if blob_mime_type is not None:\n self.blob_mime_type = blob_mime_type\n if safe_name is not None:\n self.safe_name = safe_name\n if owner_id is not None:\n self.owner_id = owner_id\n if array is not None:\n self.array = array\n if editable_in_microversion is not None:\n self.editable_in_microversion = editable_in_microversion\n if id is not None:\n self.id = id\n if name is not None:\n self.name = name", "def test_list_properties(self):\n pass", "def __init__(self, p_description: str, property_name: str):\n self._p_description = p_description\n self._property_name = property_name\n self._property_data = None\n self._vectorized_data = None\n self._categories = None", "def get_model_with_properties():\n \n m = ConcreteModel()\n\n # ------------------------------------------------------------------\n # Data\n # ------------------------------------------------------------------\n\n m.np = 25 # Number of possible tays\n m.c = 4 # Number of components\n m.lc = 1 # Light component\n m.hc = 4 # Heavy component\n\n #### Constant parameters\n m.Rgas = 8.314 # Ideal gas constant in J/mol K\n m.Tref = 298.15 # Reference temperature in K\n\n #### Product specifications\n m.xspec_lc = 0.99 # Final liquid composition for methanol (1)\n m.xspec_hc = 0.99 # Fnal liquid composition for butanol (4)\n m.xspec_inter2 = 0.99 # Final liquid composition for ethanol (2)\n m.xspec_inter3 = 0.99 # Final liquid composition for propanol (3)\n m.Ddes = 50 # Final flowrate in distillate in mol/s\n m.Bdes = 50 # Final flowrate in bottoms in mol/s\n m.Sdes = 50 # Final flowrate in side product streams in mol/s\n\n # #### Known initial values\n m.Fi = m.Ddes + m.Bdes + 2 * m.Sdes # Side feed flowrate in mol/s\n m.Vi = 400 # Initial value for vapor flowrate in mol/s\n m.Li = 400 # Initial value for liquid flowrate in mol/s\n\n m.Tf = 358 # Side feed temperature in K\n\n m.Preb = 1.2 # Reboiler pressure in bar\n m.Pbot = 1.12 # Bottom-most tray pressure in bar\n m.Ptop = 1.08 # Top-most tray pressure in bar\n m.Pcon = 1.05 # Condenser pressure in bar\n m.Pf = 1.02\n\n m.rr0 = 0.893 # Internal reflux ratio initial value\n m.bu0 = 0.871 # Internal reflux ratio initial value\n\n\n #### Scaling factors\n m.Hscale = 1e3 \n m.Qscale = 1e-3 \n\n \n #### Constants for the calculation of liquid heat capacity\n m.cpc = {} # Constant 1 for liquid heat capacity \n m.cpc2 = {} # Constant 2 for liquid heat capacity \n m.cpc[1] = m.Rgas \n m.cpc[2] = 1\n m.cpc2['A', 1] = 1 / 100\n m.cpc2['B', 1] = 1 / 1e4\n m.cpc2['A', 2] = 1\n m.cpc2['B', 2] = 1\n\n\n # ------------------------------------------------------------------\n # Physical Properties\n #\n # Notation:\n # MW ........................ molecular weight in g/gmol\n # TB ........................ boiling point temperature in K\n # TC ........................ critical temperature in K\n # PC ........................ critical pressure in bar\n # w ........................ acentric factor\n # lden ...................... liquid density g/m3,\n # dHvap ..................... heat of vaporization in J/mol.\n # vpA, vpB, vpC, and vpD .... vapor pressure constants\n # cpA, cpB, cpC, and cpD .... heat capacity constants J/mol:\n # 1 for liq and 2 for vapor phase\n #\n # Reference A: R.C. Reid, J.M. Prausnitz and B.E. Poling,\n # \"The Properties of gases and liquids\", 1987 and 2004 Eds.\n #\n # ------------------------------------------------------------------\n\n m.prop = {} # Properties of components:\n cpL = {} # Ruczika-D method for liquid heat capacity calculation\n # (Reference A, page 6.20)\n sumA = {}\n sumB = {}\n sumC = {}\n cpL['a', 'C(H3)(C)'] = 4.19845\n cpL['b', 'C(H3)(C)'] = -0.312709\n cpL['c', 'C(H3)(C)'] = 0.178609\n cpL['a', 'C(H2)(C2)'] = 2.7345\n cpL['b', 'C(H2)(C2)'] = 0.122732\n cpL['c', 'C(H2)(C2)'] = -0.123482\n cpL['a', 'C(H2)(C)(O)'] = 0.517007\n cpL['b', 'C(H2)(C)(O)'] = 1.26631\n cpL['c', 'C(H2)(C)(O)'] = -0.0939713\n cpL['a', 'O(H)(C)'] = 16.1555\n cpL['b', 'O(H)(C)'] = -11.938\n cpL['c', 'O(H)(C)'] = 2.85117\n cpL['a', 'C(H3)(O)'] = 3.70344\n cpL['b', 'C(H3)(O)'] = -1.12884\n cpL['c', 'C(H3)(O)'] = 0.51239\n sumA[1] = (cpL['a', 'C(H3)(O)']\n + cpL['a', 'O(H)(C)']) \n sumB[1] = (cpL['b', 'C(H3)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[1] = (cpL['c', 'C(H3)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[2] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[2] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[2] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[3] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[3] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[3] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[4] = (cpL['a', 'C(H3)(C)']\n + 2 * cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[4] = (cpL['b', 'C(H3)(C)']\n + 2 * cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[4] = (cpL['c', 'C(H3)(C)']\n + 2 * cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n\n ## Methanol: component 1\n m.prop[1, 'MW'] = 32.042\n m.prop[1, 'TB'] = 337.7\n m.prop[1, 'TC'] = 512.6\n m.prop[1, 'PC'] = 80.9\n m.prop[1, 'w'] = 0.556\n m.prop[1, 'lden'] = 792e3\n m.prop[1, 'dHvap'] = 38.376e3\n m.prop[1, 'vpA'] = -8.54796\n m.prop[1, 'vpB'] = 0.76982\n m.prop[1, 'vpC'] = -3.10850\n m.prop[1, 'vpD'] = 1.54481\n m.prop[1, 'cpA', 1] = sumA[1]\n m.prop[1, 'cpB', 1] = sumB[1]\n m.prop[1, 'cpC', 1] = sumC[1]\n m.prop[1, 'cpD', 1] = 0\n m.prop[1, 'cpA', 2] = 2.115e1\n m.prop[1, 'cpB', 2] = 7.092e-2\n m.prop[1, 'cpC', 2] = 2.587e-5\n m.prop[1, 'cpD', 2] = -2.852e-8\n\n\n ## Ethanol: component 2\n m.prop[2, 'MW'] = 46.069\n m.prop[2, 'TB'] = 351.4\n m.prop[2, 'TC'] = 513.9\n m.prop[2, 'PC'] = 61.4\n m.prop[2, 'w'] = 0.644\n m.prop[2, 'lden'] = 789.3e3\n m.prop[2, 'dHvap'] = 42.698e3\n m.prop[2, 'vpA'] = -8.51838\n m.prop[2, 'vpB'] = 0.34163\n m.prop[2, 'vpC'] = -5.73683\n m.prop[2, 'vpD'] = 8.32581\n m.prop[2, 'cpA', 1] = sumA[2]\n m.prop[2, 'cpB', 1] = sumB[2]\n m.prop[2, 'cpC', 1] = sumC[2]\n m.prop[2, 'cpD', 1] = 0\n m.prop[2, 'cpA', 2] = 9.014\n m.prop[2, 'cpB', 2] = 2.141e-1\n m.prop[2, 'cpC', 2] = -8.390e-5\n m.prop[2, 'cpD', 2] = 1.373e-9\n\n\n ## Propanol: component 3\n m.prop[3, 'MW'] = 60.096\n m.prop[3, 'TB'] = 370.3\n m.prop[3, 'TC'] = 536.8\n m.prop[3, 'PC'] = 51.7\n m.prop[3, 'w'] = 0.623\n m.prop[3, 'lden'] = 804e3\n m.prop[3, 'dHvap'] = 47.763e3\n m.prop[3, 'vpA'] = -8.05594\n m.prop[3, 'vpB'] = 4.25183e-2\n m.prop[3, 'vpC'] = -7.51296\n m.prop[3, 'vpD'] = 6.89004\n m.prop[3, 'cpA', 1] = sumA[3]\n m.prop[3, 'cpB', 1] = sumB[3]\n m.prop[3, 'cpC', 1] = sumC[3]\n m.prop[3, 'cpD', 1] = 0\n m.prop[3, 'cpA', 2] = 2.47\n m.prop[3, 'cpB', 2] = 3.325e-1\n m.prop[3, 'cpC', 2] = -1.855e-4\n m.prop[3, 'cpD', 2] = 4.296e-8\n\n\n ## Butanol: component 4\n m.prop[4, 'MW'] = 74.123\n m.prop[4, 'TB'] = 390.9\n m.prop[4, 'TC'] = 563.1\n m.prop[4, 'PC'] = 44.2\n m.prop[4, 'w'] = 0.593\n m.prop[4, 'lden'] = 810e3\n m.prop[4, 'dHvap'] = 52.607e3\n m.prop[4, 'vpA'] = -8.00756\n m.prop[4, 'vpB'] = 0.53783\n m.prop[4, 'vpC'] = -9.34240\n m.prop[4, 'vpD'] = 6.68692\n m.prop[4, 'cpA', 1] = sumA[4]\n m.prop[4, 'cpB', 1] = sumB[4]\n m.prop[4, 'cpC', 1] = sumC[4]\n m.prop[4, 'cpD', 1] = 0\n m.prop[4, 'cpA', 2] = 3.266\n m.prop[4, 'cpB', 2] = 4.18e-1\n m.prop[4, 'cpC', 2] = -2.242e-4\n m.prop[4, 'cpD', 2] = 4.685e-8\n\n\n return m", "def properties(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___Expression]:", "def to_glyphspec(self):\n d = self.vm_props(withvalues=True)\n d[\"type\"] = self.__view_model__\n\n # TODO: Remove this when we rename the BokehJS fill color attribute\n # from \"fill\" to \"fill_color\"\n if \"fill_color\" in d:\n d[\"fill\"] = d.pop(\"fill_color\")\n\n # Iterate over all the DataSpec properties and convert them, using the\n # fact that DataSpecs store the dict-ified version on the object.\n for attrname, dspec in self.dataspecs().iteritems():\n d[attrname] = dspec.to_dict(self)\n return d", "def properties(self) -> Optional[pulumi.Input['EventhubSpecPropertiesArgs']]:\n return pulumi.get(self, \"properties\")", "def build(self) -> None:", "def get_properties():", "def test_properties_distribution_get(self):\n pass", "def test_get_property_matches(self):\n\n results = GenomePropertiesResultsWithMatches(*self.test_genome_property_results, properties_tree=self.test_tree)\n\n self.assertEqual(results.get_property_matches('GenProp0236'), None)\n self.assertEqual(len(results.get_property_matches('GenProp0232')), 9)\n self.assertEqual(len(results.get_property_matches('GenProp0232', top=True)), 2)\n self.assertEqual(len(results.get_property_matches('GenProp0232', sample='C_luteolum_DSM_273')), 4)", "def properties(self):\n raise NotImplementedError", "def _build_study_spec(study_spec: study_pb2.StudySpec, state: int,\n creation_time: datetime.datetime) -> study_pb2.StudySpec:\n study_spec.state = state\n study_spec.creation_time.FromDatetime(creation_time)\n return study_spec", "def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()", "def generate_object_specs(self):\n return [[] for _ in xrange(self.batch_size)]", "def getInputSpecification(cls):\n inputSpecification = super(MonteCarlo, cls).getInputSpecification()\n\n samplerInitInput = InputData.parameterInputFactory(\"samplerInit\")\n limitInput = InputData.parameterInputFactory(\"limit\", contentType=InputTypes.IntegerType)\n samplerInitInput.addSub(limitInput)\n initialSeedInput = InputData.parameterInputFactory(\"initialSeed\", contentType=InputTypes.IntegerType)\n samplerInitInput.addSub(initialSeedInput)\n distInitInput = InputData.parameterInputFactory(\"distInit\", contentType=InputTypes.StringType)\n distSubInput = InputData.parameterInputFactory(\"distribution\")\n distSubInput.addParam(\"name\", InputTypes.StringType)\n distSubInput.addSub(InputData.parameterInputFactory(\"initialGridDisc\", contentType=InputTypes.IntegerType))\n distSubInput.addSub(InputData.parameterInputFactory(\"tolerance\", contentType=InputTypes.FloatType))\n\n distInitInput.addSub(distSubInput)\n samplerInitInput.addSub(distInitInput)\n samplingTypeInput = InputData.parameterInputFactory(\"samplingType\", contentType=InputTypes.StringType)\n samplerInitInput.addSub(samplingTypeInput)\n reseedEachIterationInput = InputData.parameterInputFactory(\"reseedEachIteration\", contentType=InputTypes.StringType)\n samplerInitInput.addSub(reseedEachIterationInput)\n\n inputSpecification.addSub(samplerInitInput)\n\n return inputSpecification", "def build_specfile_filesection(spec, files):\n str = '%files\\n'\n\n if 'X_RPM_DEFATTR' not in spec:\n spec['X_RPM_DEFATTR'] = '(-,root,root)'\n\n str = str + '%%defattr %s\\n' % spec['X_RPM_DEFATTR']\n\n supported_tags = {\n 'PACKAGING_CONFIG' : '%%config %s',\n 'PACKAGING_CONFIG_NOREPLACE' : '%%config(noreplace) %s',\n 'PACKAGING_DOC' : '%%doc %s',\n 'PACKAGING_UNIX_ATTR' : '%%attr %s',\n 'PACKAGING_LANG_' : '%%lang(%s) %s',\n 'PACKAGING_X_RPM_VERIFY' : '%%verify %s',\n 'PACKAGING_X_RPM_DIR' : '%%dir %s',\n 'PACKAGING_X_RPM_DOCDIR' : '%%docdir %s',\n 'PACKAGING_X_RPM_GHOST' : '%%ghost %s', }\n\n for file in files:\n # build the tagset\n tags = {}\n for k in supported_tags.keys():\n try:\n v = file.GetTag(k)\n if v:\n tags[k] = v\n except AttributeError:\n pass\n\n # compile the tagset\n str = str + SimpleTagCompiler(supported_tags, mandatory=0).compile( tags )\n\n str = str + ' '\n str = str + file.GetTag('PACKAGING_INSTALL_LOCATION')\n str = str + '\\n\\n'\n\n return str", "def __init__(self, genus, species, properties=None):\n if properties is None:\n properties = []\n self.genus = genus\n self.species = species\n self.bin_nom = '{0} {1}'.format(genus.title(), species)\n self.load_locations = {}\n self.properties = properties", "def getInputSpecification(cls):\n inputSpecification = super(Custom1D, cls).getInputSpecification()\n inputSpecification.addSub(InputData.parameterInputFactory(\"workingDir\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"functionType\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"dataFilename\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"functionID\", contentType=InputTypes.StringType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"variableID\", contentType=InputTypes.StringType))\n\n return inputSpecification", "def _expand_spec(spec, **kwargs):\n fixed_params = {}\n variable_params = {}\n for k, v in spec.items():\n if isinstance(v, list):\n variable_params[k] = v\n elif isinstance(v, dict):\n # Try handling as distribution\n res = sample_values(v)\n if res is not None:\n variable_params[k] = res\n else:\n fixed_params[k] = v\n else:\n fixed_params[k] = v\n\n params = list(ParameterGrid(variable_params))\n [p.update(fixed_params) for p in params]\n return params", "def to_spec(self) -> dict[str, typing.Any]:\n # we can't call Categorization.to_spec here because we need to control ordering\n # in the returned dict so that we get nicely ordered yaml files.\n spec = {\n \"name\": self.name,\n \"title\": self.title,\n \"comment\": self.comment,\n \"references\": self.references,\n \"institution\": self.institution,\n \"hierarchical\": self.hierarchical,\n \"last_update\": self.last_update.isoformat(),\n }\n if self.version is not None:\n spec[\"version\"] = self.version\n spec[\"total_sum\"] = self.total_sum\n if self.canonical_top_level_category is not None:\n spec[\n \"canonical_top_level_category\"\n ] = self.canonical_top_level_category.codes[0]\n\n spec[\"categories\"] = {}\n for cat in self.values():\n code, cat_spec = cat.to_spec()\n spec[\"categories\"][code] = cat_spec\n\n return spec", "def _createSpecificProperty(self, filter_name):\n import uno\n from com.sun.star.beans import PropertyValue\n if filter_name == \"impress_html_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('IsExportNotes', 0, True, 0),\n PropertyValue('PublishMode', 0, 0, 0),\n PropertyValue('Width', 0, 640, 0),\n PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"impress_pdf_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('ExportNotesPages', 0, True, 0),\n PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif \"pdf_Export\" in filter_name :\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif filter_name in (\"draw_html_Export\", \"HTML (StarCalc)\"):\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"Text (encoded)\":\n property = PropertyValue('FilterFlags', 0, 'UTF8,LF', 0)\n else:\n return []\n\n return [property, ]", "def __init__(self, *specs: Specification) -> None:\n self.specs = specs", "def write_properties(props):\n root = Element('{%s}coreProperties' % COREPROPS_NS)\n for attr in (\"creator\", \"title\", \"description\", \"subject\", \"identifier\",\n \"language\"):\n SubElement(root, '{%s}%s' % (DCORE_NS, attr)).text = getattr(props, attr)\n\n for attr in (\"created\", \"modified\"):\n value = datetime_to_W3CDTF(getattr(props, attr))\n SubElement(root, '{%s}%s' % (DCTERMS_NS, attr),\n {'{%s}type' % XSI_NS:'%s:W3CDTF' % DCTERMS_PREFIX}).text = value\n\n for attr in (\"lastModifiedBy\", \"category\", \"contentStatus\", \"version\",\n \"revision\", \"keywords\"):\n SubElement(root, '{%s}%s' % (COREPROPS_NS, attr)).text = getattr(props, attr)\n\n if props.lastPrinted is not None:\n SubElement(root, \"{%s}lastPrinted\" % COREPROPS_NS).text = datetime_to_W3CDTF(props.lastPrinted\n )\n return tostring(root)", "def __init__(self):\n self.properties = {}", "def _spec_for(self, cap_tol, when, params, relabel=None):\n cap = self.capability_for(cap_tol)\n spec = mplane.model.Specification(capability=cap)\n\n # set temporal scope\n spec.set_when(when)\n\n # fill in parameters\n # spec.set_single_values() # this is automatic now\n for pname in spec.parameter_names():\n if spec.get_parameter_value(pname) is None:\n if pname in params:\n spec.set_parameter_value(pname, params[pname])\n else:\n raise KeyError(\"missing parameter \"+pname)\n\n # regenerate token based on parameters and temporal scope\n spec.retoken()\n\n # generate label\n if relabel:\n spec.set_label(relabel)\n else:\n spec.set_label(cap.get_label() + \"-\" + str(self._ssn))\n self._ssn += 1\n\n return (cap, spec)", "def properties(self):", "def properties(self):", "def properties(self):", "def create(self, validated_data):\n new_spec = Specification(key = validated_data.get('key'),\n value = validated_data.get('value'),\n category = validated_data.get('category'),\n car = validated_data.get('car'),)\n new_spec.save()\n\n return new_spec", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def test_01_BuildObjects(self):\n # print(PrettyFormatAny.form(self.m_pyhouse_obj._Config, 'A3-01-A - Config', 190))\n # print(__file__)\n # print(PrettyFormatAny.form(self.m_pyhouse_obj._Config.YamlTree, 'Location', 190))\n # self.assertEqual(self.m_pyhouse_obj._Config.YamlConfigDir, '/etc/pyhouse/')", "def _build(self):", "def _build(self):", "def test_properties_evolution_get(self):\n pass", "def new_property(self, node, offset, properties):\n # Does this need refactoring to pass in the node object?\n property_len, property_nameoff = struct.unpack_from('!II', self.string, offset=offset)\n offset += struct.calcsize('!II')\n property_name = properties[property_nameoff]\n property_val = self.string[offset:offset + property_len]\n offset += calc_length_word_align(property_len)\n property_val = self.process_property('0', property_name, property_val, node)\n node.add_property(property_name, property_val)\n return offset", "def _write_elements_properties(self, f, size):\n #return self._write_elements_properties2(f, size)\n msg = []\n missing_properties = []\n\n eids_written = []\n #pids = sorted(self.properties.keys())\n\n ptypes = [self.properties_shell.pshell,\n self.properties_shell.pcomp,\n self.pshear,\n self.prod,\n self.properties_solid.psolid,\n\n #self.properties_bar.pbar,\n #self.properties_bar.pbarl,\n #self.properties_beam.pbeam,\n #self.properties_beam.pbeaml,\n ]\n\n n = 0\n pids_all = None # the actual properties\n for t in ptypes:\n if t.n and n == 0:\n pids_all = t.property_id\n n = 1\n elif t.n:\n print pids_all\n print t.property_id\n try:\n pids_all = concatenate(pids_all, t.property_id)\n except ValueError:\n pids_all = array(list(pids_all) + list(t.property_id))\n\n etypes = (self.elements_shell._get_types() +\n self.elements_solid._get_types() +\n [self.crod, self.cshear])\n\n #pids_set = None\n if pids_all is None:\n f.write('$MISSING_ELEMENTS because there are no properties\\n')\n for t in etypes:\n #print \"t.type =\", t.type\n t.write_bdf(f, size=size)\n return\n\n # there are properties\n pids_set = set(list(pids_all))\n\n n = 0\n pids = None\n for t in etypes:\n #print \"t.type =\", t.type\n if t.n and n == 0:\n eids = t.element_id\n pids = t.property_id\n n = 1\n elif t.n:\n try:\n eids = concatenate(eids, t.element_id)\n #except AttributeError:\n #eids = array(list(eids) + list(t.element_id))\n except TypeError:\n #print eids\n #print t.element_id\n eids = array(list(eids) + list(t.element_id))\n except ValueError:\n #print eids\n #print t.element_id\n eids = array(list(eids) + list(t.element_id))\n #asdf\n\n try:\n pids = concatenate(pids, t.property_id)\n except AttributeError:\n pids = array(list(pids) + list(t.property_id))\n except TypeError:\n pids = array(list(pids) + list(t.property_id))\n except ValueError:\n pids = array(list(pids) + list(t.property_id))\n #else:\n #print t.type\n\n elements_by_pid = {}\n if pids is not None:\n pids_unique = unique(pids)\n print \"pids_unique =\", pids_unique\n pids_unique.sort()\n if len(pids_unique) > 0:\n f.write('$ELEMENTS_WITH_PROPERTIES\\n')\n\n for pid in pids_all:\n i = where(pid==pids)[0]\n eids2 = eids[i]\n\n for t in ptypes:\n if t.n and pid in t.property_id:\n print \"prop.type =\", t.type\n t.write_bdf(f, size=size, property_ids=[pid])\n pids_set.remove(pid)\n n = 0\n for t in etypes:\n if not t.n:\n continue\n eids3 = intersect1d(t.element_id, eids2, assume_unique=False)\n #print \"eids3[pid=%s]\" %(pid), eids3\n if n == 0 and len(eids3):\n elements_by_pid[pid] = eids3\n n = 1\n elif len(eids3):\n try:\n c = concatenate(elements_by_pid[pid], eids3)\n except TypeError:\n c = array(list(elements_by_pid[pid]) + list(eids3))\n except ValueError:\n c = array(list(elements_by_pid[pid]) + list(eids3))\n elements_by_pid[pid] = c\n else:\n continue\n try:\n t.write_bdf(f, size=size, element_ids=eids3)\n except TypeError:\n print \"t.type =\", t.type\n raise\n del eids3\n #for pid, elements in elements_by_pid.items():\n #print \"pid=%s n=%s\" % (pid, len(elements))\n #print elements_by_pid\n\n # missing properties\n if pids_set:\n pids_list = list(pids_set)\n f.write('$UNASSOCIATED_PROPERTIES\\n')\n for pid in pids_list:\n for t in ptypes:\n if t.n and pid in t.property_id:\n t.write_bdf(f, size=size, property_ids=[pid])\n\n #..todo:: finish...\n f.write('$UNASSOCIATED_ELEMENTS\\n')\n # missing elements...", "def __init__(self, *properties):\n self._properties = properties", "def custom_props():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 7772400\r\n section.page_height = 10058400\r\n document.add_heading('Custom Properties', level=1)\r\n\r\n customproperties = get_qlik_sense.get_customprop()\r\n num_of_customproperties = len(customproperties)\r\n table = document.add_table(rows=num_of_customproperties+1, cols=3)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n row.cells[1].text = 'choice values'\r\n row.cells[2].text = 'object types'\r\n\r\n for customproperty in range(num_of_customproperties):\r\n row = table.rows[customproperty+1]\r\n row.cells[0].text = str(customproperties[customproperty][0])\r\n row.cells[1].text = ', '.join(customproperties[customproperty][1])\r\n row.cells[2].text = ', '.join(customproperties[customproperty][2])\r\n document.add_page_break()" ]
[ "0.6053929", "0.595817", "0.5930383", "0.5877144", "0.5749173", "0.56912214", "0.5589146", "0.5584564", "0.5541774", "0.5472907", "0.54651445", "0.53387296", "0.5295169", "0.5217021", "0.5211193", "0.51826376", "0.51688266", "0.5148735", "0.5138071", "0.5137829", "0.51148397", "0.5113543", "0.51090896", "0.5107701", "0.50745106", "0.50550956", "0.50529927", "0.5051491", "0.50379395", "0.5028919", "0.50274223", "0.499268", "0.49821126", "0.49586505", "0.49409962", "0.49347147", "0.49196234", "0.49123868", "0.490611", "0.49025446", "0.48902586", "0.48880786", "0.48783356", "0.48615688", "0.4858944", "0.48215002", "0.4821092", "0.48063374", "0.48046517", "0.480251", "0.47989088", "0.47938088", "0.47817516", "0.47783238", "0.4775363", "0.4773885", "0.47683716", "0.47586176", "0.47525167", "0.47525167", "0.47499305", "0.47487044", "0.47428992", "0.4738145", "0.47366464", "0.4734699", "0.47332817", "0.47278756", "0.47146475", "0.47109678", "0.47108874", "0.4710478", "0.47094476", "0.4705093", "0.4699388", "0.4699348", "0.4690347", "0.46899012", "0.46711865", "0.4665197", "0.46627226", "0.4662317", "0.4656812", "0.4651816", "0.4649162", "0.46402568", "0.46347308", "0.46267322", "0.46267322", "0.46267322", "0.46263337", "0.46222907", "0.462185", "0.46198037", "0.46198037", "0.46183816", "0.46160638", "0.46155757", "0.4609677", "0.4607773" ]
0.69546396
0
Builds the object Spec.
def build_object_spec(client_factory, root_folder, traversal_specs): object_spec = client_factory.create('ns0:ObjectSpec') object_spec.obj = root_folder object_spec.skip = False object_spec.selectSet = traversal_specs return object_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build(self, spec, prefix):\n make()", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()", "def __init__(self, spec):\n self.spec = spec", "def build():", "def build(self) -> None:", "def _build_pod_spec(self):\n logger.debug(\"Building Pod Spec\")\n crds = []\n try:\n crds = [\n yaml.load(Path(f).read_text())\n for f in [\n \"files/configs.config.gatekeeper.sh.yaml\",\n \"files/constrainttemplates.templates.gatekeeper.sh.yaml\",\n \"files/constraintpodstatuses.status.gatekeeper.sh.yaml\",\n \"files/constrainttemplatepodstatuses.status.gatekeeper.sh.yaml\",\n ]\n ]\n except yaml.YAMLError as exc:\n logger.error(\"Error in configuration file:\", exc)\n\n crd_objects = [\n CustomResourceDefintion(crd[\"metadata\"][\"name\"], crd[\"spec\"])\n for crd in crds\n ]\n\n config = self.model.config\n spec_template = {}\n with open(\"files/pod-spec.yaml.jinja2\") as fh:\n spec_template = Template(fh.read())\n\n try:\n image_details = self.image.fetch()\n except OCIImageResourceError as e:\n self.model.unit.status = e.status\n return\n\n template_args = {\n \"crds\": crd_objects,\n \"image_details\": image_details,\n \"imagePullPolicy\": config[\"imagePullPolicy\"],\n \"app_name\": self.app.name,\n \"audit_cli_args\": self._audit_cli_args(),\n \"namespace\": os.environ[\"JUJU_MODEL_NAME\"],\n }\n\n spec = yaml.load(spec_template.render(**template_args))\n\n print(f\"Pod spec: {spec}\")\n return spec", "def _build(self):", "def _build(self):", "def build(self):\n pass", "def build(self):\n pass", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def build(self):\n raise NotImplementedError", "def build (self):\n raise NotImplementedError", "def _build_impl(self):", "def build(_):", "def build(self):", "def build(self):", "def build(self):", "def test_01_BuildObjects(self):\n # print(PrettyFormatAny.form(self.m_pyhouse_obj, 'A1-01-A - Main', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'A1-02-B - House', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'A1-01-C - Location', 190))\n self.assertIsInstance(self.m_pyhouse_obj, PyHouseInformation)\n self.assertIsInstance(self.m_pyhouse_obj.House, HouseInformation)\n self.assertIsInstance(self.m_pyhouse_obj.House.Location, LocationInformationPrivate)", "def make_cake_spec():\n ###############################################################################################\n # Templates\n tmpl = make_cake_templates()\n\n ###############################################################################################\n # Objects\n cake = MaterialSpec(\n name=\"Abstract Cake\",\n template=tmpl[\"Dessert\"],\n process=ProcessSpec(\n name='Icing, in General',\n template=tmpl[\"Icing\"],\n tags=[\n 'spreading'\n ],\n notes='The act of covering a baked output with frosting'\n ),\n file_links=FileLink(\n filename=\"Becky's Butter Cake\",\n url='https://www.landolakes.com/recipe/16730/becky-s-butter-cake/'\n ),\n tags=[\n 'cake::butter cake',\n 'dessert::baked::cake',\n 'iced::chocolate'\n ],\n notes='Butter cake recipe reminiscent of the 1-2-3-4 cake that Grandma may have baked.'\n )\n\n ########################\n frosting = MaterialSpec(\n name=\"Abstract Frosting\",\n template=tmpl[\"Dessert\"],\n process=ProcessSpec(\n name='Mixing Frosting, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining ingredients to make a sweet frosting'\n ),\n tags=[\n 'frosting::chocolate',\n 'topping::chocolate'\n ],\n notes='Chocolate frosting'\n )\n IngredientSpec(\n name=\"{} input\".format(frosting.name),\n tags=list(frosting.tags),\n notes='Seems like a lot of frosting',\n labels=['coating'],\n process=cake.process,\n material=frosting,\n absolute_quantity=NominalReal(nominal=0.751, units='kg')\n )\n\n baked_cake = MaterialSpec(\n name=\"Abstract Baked Cake\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Baking, in General',\n template=tmpl[\"Baking in an oven\"],\n tags=[\n 'oven::baking'\n ],\n notes='Using heat to convert batter into a solid matrix'\n ),\n tags=[\n ],\n notes='The cakey part of the cake'\n )\n IngredientSpec(\n name=\"{} input\".format(baked_cake.name),\n tags=list(baked_cake.tags),\n labels=['substrate'],\n process=cake.process,\n material=baked_cake\n )\n\n ########################\n batter = MaterialSpec(\n name=\"Abstract Batter\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Batter, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The fluid that converts to cake with heat'\n )\n IngredientSpec(\n name=\"{} input\".format(batter.name),\n tags=list(batter.tags),\n labels=['precursor'],\n process=baked_cake.process,\n material=batter\n )\n\n ########################\n wetmix = MaterialSpec(\n name=\"Abstract Wet Mix\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Wet, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining wet ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The wet fraction of a batter'\n )\n IngredientSpec(\n name=\"{} input\".format(wetmix.name),\n tags=list(wetmix.tags),\n labels=['wet'],\n process=batter.process,\n material=wetmix\n )\n\n drymix = MaterialSpec(\n name=\"Abstract Dry Mix\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Dry, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining dry ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The dry fraction of a batter'\n )\n IngredientSpec(\n name=\"{} input\".format(drymix.name),\n tags=list(drymix.tags),\n labels=['dry'],\n process=batter.process,\n material=drymix,\n absolute_quantity=NominalReal(nominal=3.052, units='cups')\n )\n\n ########################\n flour = MaterialSpec(\n name=\"Abstract Flour\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Flour, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing all purpose flour'\n ),\n tags=[\n ],\n notes='All-purpose flour'\n )\n IngredientSpec(\n name=\"{} input\".format(flour.name),\n tags=list(flour.tags),\n labels=['dry'],\n process=drymix.process,\n material=flour,\n volume_fraction=NominalReal(nominal=0.9829, units='') # 3 cups\n )\n\n baking_powder = MaterialSpec(\n name=\"Abstract Baking Powder\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Baking Powder, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing baking powder'\n ),\n tags=[\n ],\n notes='Leavening agent for cake'\n )\n IngredientSpec(\n name=\"{} input\".format(baking_powder.name),\n tags=list(baking_powder.tags),\n labels=['leavening', 'dry'],\n process=drymix.process,\n material=baking_powder,\n volume_fraction=NominalReal(nominal=0.0137, units='') # 2 teaspoons\n )\n\n salt = MaterialSpec(\n name=\"Abstract Salt\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Salt, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing salt'\n ),\n tags=[\n ],\n notes='Plain old NaCl'\n )\n IngredientSpec(\n name=\"{} input\".format(salt.name),\n tags=list(salt.tags),\n labels=['dry', 'seasoning'],\n process=drymix.process,\n material=salt,\n volume_fraction=NominalReal(nominal=0.0034, units='') # 1/2 teaspoon\n )\n\n sugar = MaterialSpec(\n name=\"Abstract Sugar\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Sugar, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing all purpose flour'\n ),\n tags=[\n ],\n notes='Sugar'\n )\n IngredientSpec(\n name=\"{} input\".format(sugar.name),\n tags=list(sugar.tags),\n labels=['wet', 'sweetener'],\n process=wetmix.process,\n material=sugar,\n absolute_quantity=NominalReal(nominal=2, units='cups')\n )\n\n butter = MaterialSpec(\n name=\"Abstract Butter\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Butter, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing butter'\n ),\n tags=[\n ],\n notes='Shortening for making rich, buttery baked goods'\n )\n IngredientSpec(\n name=\"{} input\".format(butter.name),\n tags=list(butter.tags),\n labels=['wet', 'shortening'],\n process=wetmix.process,\n material=butter,\n absolute_quantity=NominalReal(nominal=1, units='cups')\n )\n IngredientSpec(\n name=\"{} input\".format(butter.name),\n tags=list(butter.tags),\n labels=['shortening'],\n process=frosting.process,\n material=butter,\n mass_fraction=NominalReal(nominal=0.1434, units='') # 1/2 c @ 0.911 g/cc\n )\n\n eggs = MaterialSpec(\n name=\"Abstract Eggs\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Eggs, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing eggs'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(eggs.name),\n tags=list(eggs.tags),\n labels=['wet'],\n process=wetmix.process,\n material=eggs,\n absolute_quantity=NominalReal(nominal=4, units='')\n )\n\n vanilla = MaterialSpec(\n name=\"Abstract Vanilla\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Vanilla, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing vanilla'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(vanilla.name),\n tags=list(vanilla.tags),\n labels=['wet', 'flavoring'],\n process=wetmix.process,\n material=vanilla,\n absolute_quantity=NominalReal(nominal=2, units='teaspoons')\n )\n IngredientSpec(\n name=\"{} input\".format(vanilla.name),\n tags=list(vanilla.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=vanilla,\n mass_fraction=NominalReal(nominal=0.0231, units='') # 2 tsp @ 0.879 g/cc\n )\n\n milk = MaterialSpec(\n name=\"Abstract Milk\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Milk, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing milk'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(milk.name),\n tags=list(milk.tags),\n labels=['wet'],\n process=batter.process,\n material=milk,\n absolute_quantity=NominalReal(nominal=1, units='cup')\n )\n IngredientSpec(\n name=\"{} input\".format(milk.name),\n tags=list(milk.tags),\n labels=[],\n process=frosting.process,\n material=milk,\n mass_fraction=NominalReal(nominal=0.0816, units='') # 1/4 c @ 1.037 g/cc\n )\n\n chocolate = MaterialSpec(\n name=\"Abstract Chocolate\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Chocolate, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing chocolate'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(chocolate.name),\n tags=list(chocolate.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=chocolate,\n mass_fraction=NominalReal(nominal=0.1132, units='') # 3 oz.\n )\n\n powder_sugar = MaterialSpec(\n name=\"Abstract Powdered Sugar\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Powdered Sugar, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing powdered sugar'\n ),\n tags=[\n ],\n notes='Granulated sugar mixed with corn starch'\n )\n IngredientSpec(\n name=\"{} input\".format(powder_sugar.name),\n tags=list(powder_sugar.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=powder_sugar,\n mass_fraction=NominalReal(nominal=0.6387, units='') # 4 c @ 30 g/ 0.25 cups\n )\n return cake", "def generate_object_specs(self):\n return [[] for _ in xrange(self.batch_size)]", "def _build(self):\n raise NotImplementedError()", "def _build(self, **kwargs):", "def build_obj_spec(\n obj_key,\n parameter_dict,\n experiment_name=None,\n obj_type=\"policy\",\n output=\"./policy_yamls/\",\n):\n # Set name via timestamp if not specified\n if obj_type not in [\"policy\", \"estimator\", \"dataset\"]:\n print(\"Invalid type: {}\".format(obj_type))\n return None\n\n if experiment_name == None:\n now = datetime.now()\n current_time = now.strftime(\"%H%M%S\")\n experiment_name = \"experiment_{}\".format(current_time)\n\n # Build dict structure\n obj_dict = {\n \"name\": experiment_name,\n \"type\": obj_type,\n \"key\": obj_key,\n \"parameters\": parameter_dict,\n }\n\n # Set output folder\n output_folder = os.path.join(output, experiment_name)\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n with open(\n os.path.join(output_folder, \"{}_spec.yaml\".format(obj_type)), \"w\"\n ) as file:\n yaml.dump(obj_dict, file)\n\n return obj_dict", "def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n rspec = RSpec(version=rspec_version)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n top_auth = resource_hrn.split('.')[0]\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],top_auth)\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n #print \"sfa_leases\", sfa_leases\n if sfa_leases:\n # SFAWRAP BUG ???\n # rspec.version.add_leases bugs with an empty set of leases\n # slice_id = leases[0]['slice_id']\n # TypeError: list indices must be integers, not str\n rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n \n return rspec.toxml()", "def build(self):\n\n raise NotImplementedError(\"Implement build() method\")", "def build(self):\n raise NotImplementedError(\"This is an interface method. Implement it in subclass.\")", "def __init__(self, *specs: Specification) -> None:\n self.specs = specs", "def __init__(self, name=None, debug_mode=False, features=None, ui=None, is_default=False, created=None, modified=None, id=None, team_id=None, team=None, portals=None, product_groups=None, product_types=None, product_sizes=None, product_size_materials=None, product_size_materials_rel=None):\n self.swagger_types = {\n 'name': 'str',\n 'debug_mode': 'bool',\n 'features': 'object',\n 'ui': 'object',\n 'is_default': 'bool',\n 'created': 'datetime',\n 'modified': 'datetime',\n 'id': 'str',\n 'team_id': 'str',\n 'team': 'Team',\n 'portals': 'list[Portal]',\n 'product_groups': 'list[ProductGroup]',\n 'product_types': 'list[ProductType]',\n 'product_sizes': 'list[ProductSize]',\n 'product_size_materials': 'list[ProductSizeMaterial]',\n 'product_size_materials_rel': 'list[TeamBuilderConfigProductSizeMaterial]'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'debug_mode': 'debugMode',\n 'features': 'features',\n 'ui': 'ui',\n 'is_default': 'isDefault',\n 'created': 'created',\n 'modified': 'modified',\n 'id': 'id',\n 'team_id': 'teamId',\n 'team': 'team',\n 'portals': 'portals',\n 'product_groups': 'productGroups',\n 'product_types': 'productTypes',\n 'product_sizes': 'productSizes',\n 'product_size_materials': 'productSizeMaterials',\n 'product_size_materials_rel': 'productSizeMaterialsRel'\n }\n\n self._name = name\n self._debug_mode = debug_mode\n self._features = features\n self._ui = ui\n self._is_default = is_default\n self._created = created\n self._modified = modified\n self._id = id\n self._team_id = team_id\n self._team = team\n self._portals = portals\n self._product_groups = product_groups\n self._product_types = product_types\n self._product_sizes = product_sizes\n self._product_size_materials = product_size_materials\n self._product_size_materials_rel = product_size_materials_rel", "def getBuilder():", "def get_specification(self) -> Dict:\n specification = {\n 'version': VERSION,\n 'metadata': {\n 'twoColumn': True,\n 'layout': self.autolayout\n },\n 'nodes': []\n }\n\n def strip_io(io_list: list, direction) -> list:\n \"\"\"\n Strips every input/output from metadata and leaves only\n `name` and `type` keys.\n \"\"\"\n return [\n {\n 'name': io['name'],\n 'type': io['type'],\n 'direction': direction\n }\n for io in io_list\n ]\n\n toremove = set()\n for key, node in self.nodes.items():\n try:\n node_cls = load_class(node.cls_name)\n except (ModuleNotFoundError, ImportError, Exception) as err:\n msg = f'Could not add {node_cls}. Reason:'\n _LOGGER.warn('-' * len(msg))\n _LOGGER.warn(msg)\n _LOGGER.warn(err)\n _LOGGER.warn('-' * len(msg))\n toremove.add(key)\n continue\n parameterschema = node_cls.form_parameterschema()\n\n properties = []\n for name, props in parameterschema['properties'].items():\n new_property = {'name': name}\n\n if 'default' in props:\n new_property['default'] = props['default']\n\n if 'description' in props:\n new_property['description'] = props['description']\n\n def add_default(default_val):\n if new_property.get('default') is None:\n new_property['default'] = default_val\n\n # Case for an input with range defined\n if 'enum' in props:\n new_property['type'] = 'select'\n new_property['values'] = list(map(str, props['enum']))\n add_default(new_property['values'][0])\n # Case for a single value input\n elif 'type' in props:\n if 'array' in props['type']:\n new_property['type'] = 'list'\n if 'items' in props and 'type' in props['items']:\n dtype = props['items']['type']\n new_property['dtype'] = dtype\n add_default([])\n elif 'boolean' in props['type']:\n new_property['type'] = 'checkbox'\n add_default(False)\n elif 'string' in props['type']:\n new_property['type'] = 'text'\n add_default('')\n elif 'integer' in props['type']:\n new_property['type'] = 'integer'\n add_default(0)\n elif 'number' in props['type']:\n new_property['type'] = 'number'\n add_default(0)\n elif 'object' in props['type']:\n # Object arguments should be defined in specification\n # as node inputs, rather than properties\n new_property = None\n else:\n new_property['type'] = 'text'\n add_default('')\n # If no type is specified then text is used\n else:\n new_property['type'] = 'text'\n add_default('')\n\n if new_property is not None:\n properties.append(new_property)\n\n specification['nodes'].append({\n 'name': node.name,\n 'type': node.type,\n 'category': node.category,\n 'properties': properties,\n 'interfaces': strip_io(\n self.io_mapping[node.type]['inputs'],\n 'input'\n ) + strip_io(\n self.io_mapping[node.type]['outputs'],\n 'output'\n )\n })\n\n for key in toremove:\n del self.nodes[key]\n return specification", "def test_01_BuildObjects(self):\n # print(PrettyFormatAny.form(self.m_pyhouse_obj._Config, 'A3-01-A - Config', 190))\n # print(__file__)\n # print(PrettyFormatAny.form(self.m_pyhouse_obj._Config.YamlTree, 'Location', 190))\n # self.assertEqual(self.m_pyhouse_obj._Config.YamlConfigDir, '/etc/pyhouse/')", "def build_model():", "def __init__(self, engine: Engine, spec_id: int):\n\n self.engine = engine\n self.data = engine.get_spec(spec_id)\n assert self.data is not None\n\n # build the talent rows\n self.talent_rows = {}\n self.levels = {}\n for tier_data in self.data[\"talent_tiers\"]:\n # store talents as one-indexed\n talent_row = {}\n for talent_data in tier_data[\"talents\"]:\n ttip = talent_data[\"spell_tooltip\"]\n f_str = \"({}, {}) {}\".format(\n talent_data[\"talent\"][\"name\"],\n ttip[\"cast_time\"],\n ttip[\"description\"],\n )\n talent_row[talent_data[\"column_index\"] + 1] = {\n \"text\": f_str,\n \"raw\": talent_data,\n }\n\n # save tiers as one-indexed\n index = level_to_index(tier_data[\"level\"])\n self.talent_rows[index] = talent_row\n self.levels[index] = tier_data[\"level\"]\n\n # store this spec's talent macros\n self.macros = {}\n for row_idx, row_data in self.talent_rows.items():\n macro = build_row_macro(row_idx, row_data)\n if macro is not None:\n self.macros[row_idx] = macro\n\n self.name = self.data[\"name\"]\n\n # build a data structure for serialization\n media = engine.get_spec_media(spec_id)\n assert media is not None\n self.to_serialize = {\n \"icon\": media[\"assets\"][0][\"value\"],\n \"name\": self.name,\n \"slug\": self.name.lower().replace(\" \", \"_\"),\n \"role\": self.data[\"role\"][\"name\"],\n \"has_macros\": bool(self.macros),\n }\n self.to_serialize[\"talent_rows\"] = []\n for row, data in self.talent_rows.items():\n rdata: dict = {\n \"index\": row,\n \"level\": self.levels[row],\n \"macro\": None,\n }\n if row in self.macros:\n rdata[\"macro\"] = self.macros[row][0]\n rdata[\"macro_lines\"] = self.macros[row][1]\n rdata[\"talents\"] = {}\n for talent_idx, talent_data in data.items():\n tdata = Talent(\n self.engine, talent_data[\"raw\"][\"talent\"][\"id\"]\n ).to_serialize\n tdata[\"active\"] = is_talent_active(talent_data[\"raw\"])\n rdata[\"talents\"][talent_idx] = tdata\n self.to_serialize[\"talent_rows\"].append(rdata)", "def setUp(self):\n self.nC4H10O = Species(\n label='n-C4H10O',\n conformer=Conformer(\n E0=(-317.807, 'kJ/mol'),\n modes=[\n IdealGasTranslation(mass=(74.07, \"g/mol\")),\n NonlinearRotor(inertia=([41.5091, 215.751, 233.258], \"amu*angstrom^2\"), symmetry=1),\n HarmonicOscillator(frequencies=(\n [240.915, 341.933, 500.066, 728.41, 809.987, 833.93, 926.308, 948.571, 1009.3, 1031.46, 1076,\n 1118.4, 1184.66, 1251.36, 1314.36, 1321.42, 1381.17, 1396.5, 1400.54, 1448.08, 1480.18, 1485.34,\n 1492.24, 1494.99, 1586.16, 2949.01, 2963.03, 2986.19, 2988.1, 2995.27, 3026.03, 3049.05, 3053.47,\n 3054.83, 3778.88], \"cm^-1\")),\n HinderedRotor(inertia=(0.854054, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[0.25183, -1.37378, -2.8379, 0.0305112, 0.0028088],\n [0.458307, 0.542121, -0.599366, -0.00283925, 0.0398529]], \"kJ/mol\")),\n HinderedRotor(inertia=(8.79408, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[0.26871, -0.59533, -8.15002, -0.294325, -0.145357],\n [1.1884, 0.99479, -0.940416, -0.186538, 0.0309834]], \"kJ/mol\")),\n HinderedRotor(inertia=(7.88153, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[-4.67373, 2.03735, -6.25993, -0.27325, -0.048748],\n [-0.982845, 1.76637, -1.57619, 0.474364, -0.000681718]], \"kJ/mol\")),\n HinderedRotor(inertia=(2.81525, \"amu*angstrom^2\"), symmetry=3, barrier=(2.96807, \"kcal/mol\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n molecular_weight=(74.07, \"g/mol\"),\n transport_data=TransportData(sigma=(5.94, 'angstrom'), epsilon=(559, 'K')),\n energy_transfer_model=SingleExponentialDown(alpha0=(447.5 * 0.011962, \"kJ/mol\"), T0=(300, \"K\"), n=0.85),\n )\n\n self.nC4H10O.from_smiles('CCCCO')\n\n self.nC4H8 = Species(\n label='n-C4H8',\n conformer=Conformer(\n E0=(-17.8832, 'kJ/mol'),\n modes=[\n IdealGasTranslation(mass=(56.06, \"g/mol\")),\n NonlinearRotor(inertia=([22.2748, 122.4, 125.198], \"amu*angstrom^2\"), symmetry=1),\n HarmonicOscillator(frequencies=(\n [308.537, 418.67, 636.246, 788.665, 848.906, 936.762, 979.97, 1009.48, 1024.22, 1082.96, 1186.38,\n 1277.55, 1307.65, 1332.87, 1396.67, 1439.09, 1469.71, 1484.45, 1493.19, 1691.49, 2972.12, 2994.31,\n 3018.48, 3056.87, 3062.76, 3079.38, 3093.54, 3174.52], \"cm^-1\")),\n HinderedRotor(inertia=(5.28338, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[-0.579364, -0.28241, -4.46469, 0.143368, 0.126756],\n [1.01804, -0.494628, -0.00318651, -0.245289, 0.193728]], \"kJ/mol\")),\n HinderedRotor(inertia=(2.60818, \"amu*angstrom^2\"), symmetry=3, fourier=(\n [[0.0400372, 0.0301986, -6.4787, -0.0248675, -0.0324753],\n [0.0312541, 0.0538, -0.493785, 0.0965968, 0.125292]], \"kJ/mol\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n )\n\n self.nC4H8.from_smiles('CCC=C')\n\n self.H2O = Species(\n label='H2O',\n conformer=Conformer(\n E0=(-269.598, 'kJ/mol'),\n modes=[\n IdealGasTranslation(mass=(18.01, \"g/mol\")),\n NonlinearRotor(inertia=([0.630578, 1.15529, 1.78586], \"amu*angstrom^2\"), symmetry=2),\n HarmonicOscillator(frequencies=([1622.09, 3771.85, 3867.85], \"cm^-1\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n )\n\n self.H2O.from_smiles('O')\n\n self.N2 = Species(\n label='N2',\n molecular_weight=(28.04, \"g/mol\"),\n transport_data=TransportData(sigma=(3.41, \"angstrom\"), epsilon=(124, \"K\")),\n energy_transfer_model=None,\n )\n\n self.N2.from_smiles('N#N')\n\n logging.error('to TS')\n\n self.TS = TransitionState(\n label='TS',\n conformer=Conformer(\n E0=(-42.4373, \"kJ/mol\"),\n modes=[\n IdealGasTranslation(mass=(74.07, \"g/mol\")),\n NonlinearRotor(inertia=([40.518, 232.666, 246.092], \"u*angstrom**2\"), symmetry=1, quantum=False),\n HarmonicOscillator(frequencies=(\n [134.289, 302.326, 351.792, 407.986, 443.419, 583.988, 699.001, 766.1, 777.969, 829.671, 949.753,\n 994.731, 1013.59, 1073.98, 1103.79, 1171.89, 1225.91, 1280.67, 1335.08, 1373.9, 1392.32, 1417.43,\n 1469.51, 1481.61, 1490.16, 1503.73, 1573.16, 2972.85, 2984.3, 3003.67, 3045.78, 3051.77, 3082.37,\n 3090.44, 3190.73, 3708.52], \"kayser\")),\n HinderedRotor(inertia=(2.68206, \"amu*angstrom^2\"), symmetry=3, barrier=(3.35244, \"kcal/mol\")),\n HinderedRotor(inertia=(9.77669, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[0.208938, -1.55291, -4.05398, -0.105798, -0.104752],\n [2.00518, -0.020767, -0.333595, 0.137791, -0.274578]], \"kJ/mol\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n frequency=(-2038.34, 'cm^-1'),\n )\n\n self.reaction = Reaction(\n label='dehydration',\n reactants=[self.nC4H10O],\n products=[self.nC4H8, self.H2O],\n transition_state=self.TS,\n kinetics=Arrhenius(A=(0.0387, 'm^3/(mol*s)'), n=2.7, Ea=(2.6192e4, 'J/mol'), T0=(1, 'K'))\n )\n\n self.network = Network(\n label='n-butanol',\n isomers=[Configuration(self.nC4H10O)],\n reactants=[],\n products=[Configuration(self.nC4H8, self.H2O)],\n path_reactions=[self.reaction],\n bath_gas={self.N2: 1.0},\n )\n\n self.pdepnetwork = deepcopy(self.network)\n self.pdepnetwork.__class__ = PDepNetwork\n self.pdepnetwork.source = [self.pdepnetwork.isomers[0].species[0]]\n self.pdepnetwork.index = 1\n self.pdepnetwork.explored = []", "def build_property_spec(client_factory, type=\"VirtualMachine\",\r\n properties_to_collect=[\"name\"],\r\n all_properties=False):\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = all_properties\r\n property_spec.pathSet = properties_to_collect\r\n property_spec.type = type\r\n return property_spec", "def _make_specs(cls, doc_id=None, specs=None):\n final_specs = {}\n if doc_id:\n final_specs['_id'] = cls._id_type(doc_id)\n if specs:\n final_specs.update(specs)\n cls._add_shard(final_specs)\n return final_specs", "def __init__(self, build_specification):\n _reader_interface = self.reader_factory(mode=\"json\")\n _reader = _reader_interface(build_specification)\n\n _objects = [] # empty roster of objects, fill is a value Object is created\n for item in _reader.data:\n class_kwargs = _reader.data[item]\n module_name = class_kwargs.get(\"module\", None)\n if module_name is not None:\n module_dict = dict(name=f\".{module_name}\", package=\"pubsub\")\n try:\n the_module = import_module(**module_dict)\n try:\n the_class = getattr(the_module, module_name.capitalize())\n if the_class:\n _objects.append(the_class(**class_kwargs))\n except AttributeError:\n print(\n f\"Skipping module {the_module.__name__}; class {module_name.capitalize()} not found.\"\n )\n except ModuleNotFoundError:\n print(\n f'Skipping module {module_dict[\"package\"] + module_dict[\"name\"]}; module not found.'\n )\n\n # connect the publish-subscribe mechanism\n subscribers = [item for item in _objects if isinstance(item, ISubscriber)]\n for item in subscribers:\n print(f\"{item.name} has an ISubscriber interface\")\n\n publishers = [item for item in _objects if isinstance(item, IPublisher)]\n for item in publishers:\n print(f\"{item.name} has an IPublisher interface\")\n for who in subscribers:\n item.connect(who)\n\n for item in publishers:\n print(f\"{item.name} responds to a command and pubishes:\")\n item.publish()", "def create(self, spec, force_cache=False, image_dir=\"~/.hyperkit\"):", "def __init__(self, json=None, verbose=True):\n\n self.verbose = verbose\n if json:\n self.composite_ = self.build_engine_from_json(json=json, verbose=verbose)\n self.prebuilt_ = True\n else:\n self.prebuilt_ = False", "def to_spec(self) -> dict[str, typing.Any]:\n spec = {\n \"name\": self.name,\n \"title\": self.title,\n \"comment\": self.comment,\n \"references\": self.references,\n \"institution\": self.institution,\n \"hierarchical\": self.hierarchical,\n \"last_update\": self.last_update.isoformat(),\n }\n if self.version is not None:\n spec[\"version\"] = self.version\n categories = {}\n for cat in self.values():\n code, cat_spec = cat.to_spec()\n categories[code] = cat_spec\n spec[\"categories\"] = categories\n\n return spec", "def generate(self):\n fleet_config = self._build_base_object()\n fleet_config['LaunchSpecifications'] = list(self._build_launch_specs_object())\n return fleet_config", "def _build_study_spec(study_spec: study_pb2.StudySpec, state: int,\n creation_time: datetime.datetime) -> study_pb2.StudySpec:\n study_spec.state = state\n study_spec.creation_time.FromDatetime(creation_time)\n return study_spec", "def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n import time\n start_time = None\n end_time = None\n\n # Default duration for WiLab is 2 hours\n duration_default = 120\n for lease in leases:\n if 'end_time' in lease:\n end_time = lease['end_time']\n start_time = lease['start_time']\n break\n\n if start_time is None:\n # start_time = Now\n start_time = time.time()\n\n if end_time is None:\n end_time = int(start_time + duration_default*60)\n #raise Exception, \"end_time is mandatory in leases\"\n\n # duration in seconds from now till end_time\n duration = end_time - start_time\n # duration in minutes\n duration = duration / 60\n duration = int(duration)\n if duration < duration_default:\n duration = duration_default\n Log.tmp(\"start_time = \",start_time)\n Log.tmp(\"end_time = \",end_time)\n Log.tmp(\"duration = \",duration)\n # RSpec will have expires date = now + duration\n rspec = RSpec(version=rspec_version, ttl=duration, expires=end_time)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n i = 0\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n\n # The only change for WiLab compared to Generic SFAWrapParser\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],cm[1])\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource['client_id'] = \"PC\" + str(i)\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n i = i + 1\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n #sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n ##print \"sfa_leases\", sfa_leases\n #if sfa_leases:\n # # SFAWRAP BUG ???\n # # rspec.version.add_leases bugs with an empty set of leases\n # # slice_id = leases[0]['slice_id']\n # # TypeError: list indices must be integers, not str\n # rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n return rspec.toxml()", "def get_spec(self):\n from schematics.types import ModelType\n spec = {\n 'id': self.name,\n 'description': self.description,\n 'addressable': self.array,\n 'required': self.required,\n }\n if self.type.has_schema:\n spec['schema'] = self.type.get_spec()\n else:\n spec.update(self.type.get_spec())\n\n return spec", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'name': 'str',\n 'store_data': 'object',\n 'discovered': 'datetime',\n 'extraction_failure': 'bool',\n 'in_trash': 'bool',\n 'is_extracted': 'bool',\n 'meta_available': 'bool',\n 'size': 'int',\n 'start_time': 'datetime',\n 'end_time': 'datetime',\n 'duration': 'float',\n 'messages': 'int',\n 'tags': 'list[Tag]'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'name': 'name',\n 'store_data': 'store_data',\n 'discovered': 'discovered',\n 'extraction_failure': 'extraction_failure',\n 'in_trash': 'in_trash',\n 'is_extracted': 'is_extracted',\n 'meta_available': 'meta_available',\n 'size': 'size',\n 'start_time': 'start_time',\n 'end_time': 'end_time',\n 'duration': 'duration',\n 'messages': 'messages',\n 'tags': 'tags'\n }\n\n self._detail_type = None\n self._name = None\n self._store_data = None\n self._discovered = None\n self._extraction_failure = None\n self._in_trash = None\n self._is_extracted = None\n self._meta_available = None\n self._size = None\n self._start_time = None\n self._end_time = None\n self._duration = None\n self._messages = None\n self._tags = None", "def build_specfile_header(spec):\n str = \"\"\n\n # first the mandatory sections\n mandatory_header_fields = {\n 'NAME' : '%%define name %s\\nName: %%{name}\\n',\n 'VERSION' : '%%define version %s\\nVersion: %%{version}\\n',\n 'PACKAGEVERSION' : '%%define release %s\\nRelease: %%{release}\\n',\n 'X_RPM_GROUP' : 'Group: %s\\n',\n 'SUMMARY' : 'Summary: %s\\n',\n 'LICENSE' : 'License: %s\\n',\n }\n\n str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )\n\n # now the optional tags\n optional_header_fields = {\n 'VENDOR' : 'Vendor: %s\\n',\n 'X_RPM_URL' : 'Url: %s\\n',\n 'SOURCE_URL' : 'Source: %s\\n',\n 'SUMMARY_' : 'Summary(%s): %s\\n',\n 'ARCHITECTURE' : 'BuildArch: %s\\n',\n 'X_RPM_DISTRIBUTION' : 'Distribution: %s\\n',\n 'X_RPM_ICON' : 'Icon: %s\\n',\n 'X_RPM_PACKAGER' : 'Packager: %s\\n',\n 'X_RPM_GROUP_' : 'Group(%s): %s\\n',\n\n 'X_RPM_REQUIRES' : 'Requires: %s\\n',\n 'X_RPM_PROVIDES' : 'Provides: %s\\n',\n 'X_RPM_CONFLICTS' : 'Conflicts: %s\\n',\n 'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\\n',\n\n 'X_RPM_SERIAL' : 'Serial: %s\\n',\n 'X_RPM_EPOCH' : 'Epoch: %s\\n',\n 'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\\n',\n 'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\\n',\n 'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\\n',\n 'X_RPM_PREFIX' : 'Prefix: %s\\n',\n\n # internal use\n 'X_RPM_BUILDROOT' : 'BuildRoot: %s\\n',\n }\n\n # fill in default values:\n # Adding a BuildRequires renders the .rpm unbuildable under systems which\n # are not managed by rpm, since the database to resolve this dependency is\n # missing (take Gentoo as an example)\n #if 'X_RPM_BUILDREQUIRES' not in spec:\n # spec['X_RPM_BUILDREQUIRES'] = 'scons'\n\n if 'X_RPM_BUILDROOT' not in spec:\n spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'\n\n str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )\n\n # Add any extra specfile definitions the user may have supplied.\n # These flags get no processing, they are just added.\n # github #3164: if we don't turn off debug package generation\n # the tests which build packages all fail. If there are no\n # extra flags, default to adding this one. If the user wants\n # to turn this back on, supply the flag set to None.\n\n if 'X_RPM_EXTRADEFS' not in spec:\n spec['X_RPM_EXTRADEFS'] = ['%global debug_package %{nil}']\n for extra in spec['X_RPM_EXTRADEFS']:\n str += extra + '\\n'\n\n return str", "def __init__(self, goal):\n self._name = goal.get('name', '')\n self._description = goal.get('description', '')\n self._build_type = goal.get('buildType', 'minSizeRel')\n self._build_vars = goal.get('buildVars', {})\n self._build_goal = goal.get('buildGoal', self._name)\n self._artifacts = goal.get('artifacts', [])\n self._builds = {}\n for b in goal['builds']:\n vars = b.get('buildVars', self._build_vars)\n type = b.get('buildType', self._build_type)\n build_goal = b.get('buildGoal', self._build_goal)\n description = b.get('description', '')\n arch = b['arch']\n script = b.get('script', None)\n artifacts = b.get('artifacts', self._artifacts)\n self._builds[arch] = BuildSpec(goal=build_goal,\n type=type,\n vars=vars,\n description=description,\n arch=arch,\n script=script,\n artifacts=artifacts)", "def test_constructor(self):\n # Record the model types of all the models to be created\n all_model_types = model_type_to_display_name.keys()\n\n # Record the attribute / value pairs that are common to all models.\n common_attr_value_dict = {\"data\": self.fake_df,\n \"name_spec\": self.fake_names,\n \"design\": self.fake_design,\n \"ind_var_names\": self.fake_names[\"x\"],\n \"alt_id_col\": self.alt_id_col,\n \"obs_id_col\": self.obs_id_col,\n \"choice_col\": self.choice_col,\n \"specification\": self.fake_specification,\n \"alt_IDs\": self.fake_df[\"alt_id\"].values,\n \"choices\": self.fake_df[\"choice\"].values}\n\n # Create a shape name dictionary to relate the various models to the\n # names of their shape parameters.\n shape_name_dict = {\"MNL\": None,\n \"Asym\": self.fake_shape_names[:2],\n \"Cloglog\": None,\n \"Scobit\": self.fake_shape_names,\n \"Uneven\": self.fake_shape_names,\n \"Nested Logit\": None,\n \"Mixed Logit\": None}\n\n # Create a shape reference position dictionary to relate the various\n # models to their shape reference positions.\n shape_ref_dict = {}\n for key in shape_name_dict:\n shape_ref_dict[key] = (None if key != \"Asym\" else\n self.fake_shape_ref_pos)\n\n # Create an intercept_names and intercept_ref_position dictionary to\n # relate the various models to their respective kwargs.\n intercept_names_dict = {}\n intercept_ref_dict = {}\n for key in shape_name_dict:\n if key in [\"MNL\", \"Nested Logit\", \"Mixed Logit\"]:\n intercept_names_dict[key] = None\n intercept_ref_dict[key] = None\n else:\n intercept_names_dict[key] = self.fake_intercept_names\n intercept_ref_dict[key] = self.fake_intercept_ref_pos\n\n # Create a nest_names dictionary to relate the various models to their\n # nest_name attributes\n nest_name_dict = {}\n nest_spec_dict = {}\n for key in shape_name_dict:\n if key != \"Nested Logit\":\n nest_name_dict[key] = None\n nest_spec_dict[key] = None\n else:\n nest_name_dict[key] = list(self.fake_nest_spec.keys())\n nest_spec_dict[key] = self.fake_nest_spec\n\n # Create dictionaries for the mixing_id_col, mixing_vars, and\n # mixing_pos attributes\n mixing_id_col_dict = {}\n mixing_vars_dict = {}\n mixing_pos_dict = {}\n\n for key in shape_name_dict:\n if key != \"Mixed Logit\":\n mixing_id_col_dict[key] = None\n mixing_vars_dict[key] = None\n mixing_pos_dict[key] = None\n else:\n mixing_id_col_dict[key] = self.obs_id_col\n mixing_vars_dict[key] = self.fake_names[\"x\"]\n mixing_pos_dict[key] = [0]\n\n # Record the attribute / value pairs that vary across models\n varying_attr_value_dict = {\"model_type\": model_type_to_display_name,\n \"intercept_names\": intercept_names_dict,\n \"intercept_ref_position\":\n intercept_ref_dict,\n \"shape_names\": shape_name_dict,\n \"shape_ref_position\": shape_ref_dict,\n \"nest_names\": nest_name_dict,\n \"nest_spec\": nest_spec_dict,\n \"mixing_id_col\": mixing_id_col_dict,\n \"mixing_vars\": mixing_vars_dict,\n \"mixing_pos\": mixing_pos_dict}\n\n # Set up the keyword arguments that are needed for each of the model\n # types\n variable_kwargs = {}\n for model_name in all_model_types:\n variable_kwargs[model_name] = {}\n variable_kwargs[model_name][\"intercept_names\"] =\\\n intercept_names_dict[model_name]\n variable_kwargs[model_name][\"intercept_ref_pos\"] =\\\n intercept_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_ref_pos\"] =\\\n shape_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_names\"] =\\\n shape_name_dict[model_name]\n variable_kwargs[model_name][\"nest_spec\"] =\\\n nest_spec_dict[model_name]\n variable_kwargs[model_name][\"mixing_id_col\"] =\\\n mixing_id_col_dict[model_name]\n variable_kwargs[model_name][\"mixing_vars\"] =\\\n mixing_vars_dict[model_name]\n\n # Execute the test for each model type\n for model_name in all_model_types:\n # Update the model type in the list of constructor args\n self.constructor_args[-1] = model_name\n\n # Use this specific model's keyword arguments\n self.constructor_kwargs.update(variable_kwargs[model_name])\n\n # Construct the model object\n model_obj = pylogit.create_choice_model(*self.constructor_args,\n **self.constructor_kwargs)\n\n # Make sure that the constructor has all of the required attributes\n for attr in common_attr_value_dict:\n value = common_attr_value_dict[attr]\n if isinstance(value, pd.DataFrame):\n self.assertTrue(value.equals(model_obj.data))\n elif isinstance(value, np.ndarray):\n npt.assert_allclose(value,\n model_obj.__getattribute__(attr))\n else:\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n for attr in varying_attr_value_dict:\n value = varying_attr_value_dict[attr][model_name]\n\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n return None", "def build_specfile_sections(spec):\n str = \"\"\n\n mandatory_sections = {\n 'DESCRIPTION' : '\\n%%description\\n%s\\n\\n', }\n\n str = str + SimpleTagCompiler(mandatory_sections).compile( spec )\n\n optional_sections = {\n 'DESCRIPTION_' : '%%description -l %s\\n%s\\n\\n',\n 'CHANGELOG' : '%%changelog\\n%s\\n\\n',\n 'X_RPM_PREINSTALL' : '%%pre\\n%s\\n\\n',\n 'X_RPM_POSTINSTALL' : '%%post\\n%s\\n\\n',\n 'X_RPM_PREUNINSTALL' : '%%preun\\n%s\\n\\n',\n 'X_RPM_POSTUNINSTALL' : '%%postun\\n%s\\n\\n',\n 'X_RPM_VERIFY' : '%%verify\\n%s\\n\\n',\n\n # These are for internal use but could possibly be overridden\n 'X_RPM_PREP' : '%%prep\\n%s\\n\\n',\n 'X_RPM_BUILD' : '%%build\\n%s\\n\\n',\n 'X_RPM_INSTALL' : '%%install\\n%s\\n\\n',\n 'X_RPM_CLEAN' : '%%clean\\n%s\\n\\n',\n }\n\n # Default prep, build, install and clean rules\n # TODO: optimize those build steps, to not compile the project a second time\n if 'X_RPM_PREP' not in spec:\n spec['X_RPM_PREP'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"' + '\\n%setup -q'\n\n if 'X_RPM_BUILD' not in spec:\n spec['X_RPM_BUILD'] = '[ ! -e \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && mkdir \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_INSTALL' not in spec:\n spec['X_RPM_INSTALL'] = 'scons --install-sandbox=\"$RPM_BUILD_ROOT\" \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_CLEAN' not in spec:\n spec['X_RPM_CLEAN'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"'\n\n str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )\n\n return str", "def build_model(self):\n pass", "def build_model(self):\n pass", "def make(self):\n pass", "def build_model(self):\n raise NotImplementedError", "def object_specs(self):\n if self._object_specs is None:\n self.object_specs = self.generate_object_specs()\n \n return self._object_specs", "def build(self):\n # Clean all fields.\n self._clean_fields()\n\n # Build", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['bijector'] = self.transform_or_spec\n return specs", "def build(c):", "def build(self, obj):\n if isinstance(obj, self.art_type):\n return obj\n elif isinstance(obj, (tuple, list, dict, set)):\n if obj.__class__ is tuple:\n return self.build_tuple(obj)\n elif obj.__class__ is dict:\n return self.build_dict(obj)\n elif obj.__class__ is list:\n return self.build_list(obj)\n else:\n return self.build_set(obj)\n elif isinstance(obj, SageObject):\n return self.build_from_magic_method(obj)\n else:\n return self.build_from_string(obj)", "def _build(specs_dict: dict, **kwargs: bool):\n return [\n Card(face, suit, value=specs_dict.get(face).get(suit), **kwargs)\n for face in specs_dict.keys()\n for suit in specs_dict.get(face).keys()\n ]", "def build(self, args: Args) -> OpenSCADObject:\n raise NotImplementedError(\"This must be overwritten\")", "def __init__(self):\n self.drones = ZergUnit(UnitTypeId.DRONE, to_count=0)\n self.lings = ZergUnit(UnitTypeId.ZERGLING, to_count=999)\n self.queens = ZergUnit(UnitTypeId.QUEEN, to_count=3)\n self.roaches = ZergUnit(UnitTypeId.ROACH, to_count=100, priority=True)\n self.ravagers = ZergUnit(UnitTypeId.RAVAGER, to_count=0)\n self.defense_spines = DefensiveBuilding(\n unit_type=UnitTypeId.SPINECRAWLER, position_type=DefensePosition.Entrance, to_base_index=1, to_count=3\n )\n self.gas = StepBuildGas(to_count=3)\n\n unit_building = BuildOrder(\n [\n Step(None, self.drones, skip_until=self.should_build_drones),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.defense_spines),\n Step(\n RequiredAll([UnitExists(UnitTypeId.ROACHWARREN), UnitExists(UnitTypeId.ROACH)]),\n self.ravagers,\n skip_until=self.should_build_ravagers,\n ),\n Step(UnitExists(UnitTypeId.ROACHWARREN), self.roaches),\n Step(\n RequiredAll(\n [\n UnitExists(UnitTypeId.SPAWNINGPOOL),\n UnitExists(\n UnitTypeId.ROACHWARREN,\n include_pending=True,\n include_not_ready=True,\n include_killed=True,\n ),\n ]\n ),\n self.lings,\n ),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.queens),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.lings),\n ]\n )\n\n buildings: BuildOrder = BuildOrder(\n [\n Step(None, ActBuilding(UnitTypeId.SPAWNINGPOOL, to_count=1)),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), ActBuilding(UnitTypeId.ROACHWARREN, to_count=1)),\n Step(None, self.gas, skip_until=self.should_build_gas),\n ]\n )\n\n super().__init__(buildings, unit_building)", "def build(self, *args, **kwargs):\n return", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['transform_fn'] = self.transform_or_spec\n if self._also_track_spec is not None:\n specs['also_track'] = self._also_track_spec\n return specs", "def build_document(self):\n pass", "def _build_model(self):\n raise NotImplementedError()", "def build(cls, serial, thisUpdate, nextUpdate, names_and_objs, keypair, certs, version = 0):\n\n filelist = []\n for name, obj in names_and_objs:\n filelist.append((name.rpartition(\"/\")[2], sha256(obj.get_DER())))\n filelist.sort(key = lambda x: x[0])\n\n obj = cls.POW_class()\n obj.setVersion(version)\n obj.setManifestNumber(serial)\n obj.setThisUpdate(thisUpdate)\n obj.setNextUpdate(nextUpdate)\n obj.setAlgorithm(rpki.oids.id_sha256)\n obj.addFiles(filelist)\n\n self = cls(POW = obj)\n self.sign(keypair, certs)\n return self", "def make_pod_spec(self):\n spec = {\n 'containers': [{\n 'name': self.framework.model.app.name,\n 'imageDetails': {\n },\n 'ports': [{\n 'containerPort':\n self.framework.model.config['advertised-port'],\n 'protocol': 'TCP',\n }],\n }],\n }\n return spec", "def __init__(self, jsondict=None, strict=True):\n \n self.systemType = None\n \"\"\" The standard that is used to operate and communicate.\n Type `str`. \"\"\"\n \n self.version = None\n \"\"\" The version of the standard that is used to operate and communicate.\n Type `str`. \"\"\"\n \n super(DeviceDefinitionSpecialization, self).__init__(jsondict=jsondict, strict=strict)", "def test_create_namespaced_build(self):\n pass", "def define(cls, spec):\n super(kkr_bs_wc, cls).define(spec)\n # here inputs are defined\n spec.input(\n 'wf_parameters',\n valid_type=Dict,\n required=False,\n default=lambda: Dict(dict=cls._wf_default),\n help='Parameters of the bandstructure workflow (see output of kkr_bs_wc.get_wf_default() for more details).'\n )\n spec.input(\n 'options',\n valid_type=Dict,\n required=False,\n default=lambda: Dict(dict=cls._options_default),\n help='Computer options (walltime etc.) passed onto KkrCalculation'\n )\n spec.input(\n 'remote_data',\n valid_type=RemoteData,\n required=True,\n help='Parent folder of previoously converged KkrCalculation'\n )\n spec.input('kkr', valid_type=Code, required=True, help='KKRhost code, needed to run the qdos KkrCalculation')\n spec.input(\n 'kpoints',\n valid_type=KpointsData,\n required=False,\n help=\n 'K-points data for the calculation. If not given the seekpath library is used to find the irreducable k-points of a structure.'\n )\n spec.input('label', valid_type=Str, required=False, help='label for the workflow')\n spec.input('description', valid_type=Str, required=False, help='description for the workflow')\n spec.input(\n 'initial_noco_angles',\n valid_type=Dict,\n required=False,\n help=\"\"\"Initial non-collinear angles for the magnetic moments. See KkrCalculation for details.\n If this is found in the input potentially extracted nonco angles from the parent calulation are overwritten!\"\"\"\n )\n # maybe overwrite some settings from the KKRhost convergence run\n spec.input(\n 'params_kkr_overwrite',\n valid_type=Dict,\n required=False,\n help='Overwrite some input parameters of the parent KKR calculation.'\n )\n\n # Here outputs are defined\n spec.output('results_wf', valid_type=Dict, required=True)\n spec.output('BS_Data', valid_type=ArrayData, required=True)\n\n # Here outlines are being specified\n spec.outline(\n # For initialiging workflow\n cls.start,\n cls.validate_input,\n cls.set_params_BS,\n cls.get_BS,\n cls.return_results\n )\n # definition of exit code in case something goes wrong in this workflow\n spec.exit_code(161, 'ERROR_NO_INPUT_REMOTE_DATA', 'No remote_data was provided as Input')\n spec.exit_code(\n 162, 'ERROR_KKRCODE_NOT_CORRECT', 'The code you provided for kkr does not use the plugin kkr.kkr'\n )\n spec.exit_code(\n 163, 'ERROR_CALC_PARAMETERS_INVALID',\n 'calc_parameters given are not consistent! Hint: did you give an unknown keyword?'\n )\n spec.exit_code(164, 'ERROR_CALC_PARAMETERS_INCOMPLETE', 'calc_parameters not complete')\n spec.exit_code(165, 'ERROR_BS_CALC_FAILED', 'KKR Band Structure calculation failed')\n spec.exit_code(166, 'ERROR_NO_KPOINTS_EXTRACTED', 'No K-POINTS can be extracted from the structure data')\n spec.exit_code(\n 167, 'ERROR_INCORRECT_KPOINTS_EXTRACTED',\n 'No K-POINTS can be extracted from the primtive structure data rather conventional structure data'\n )\n spec.exit_code(\n 168, 'ERROR_INVALID_REMOTE_DATA_TPYE',\n 'Input remote_data node neither output of a KKR/voronoi calculation nor of kkr_scf_wc workflow'\n )", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'key': 'str',\n 'display_name': 'str',\n 'description': 'str',\n 'glossary_key': 'str',\n 'parent_term_key': 'str',\n 'is_allowed_to_have_child_terms': 'bool',\n 'path': 'str',\n 'lifecycle_state': 'str',\n 'time_created': 'datetime',\n 'time_updated': 'datetime',\n 'created_by_id': 'str',\n 'updated_by_id': 'str',\n 'owner': 'str',\n 'workflow_status': 'str',\n 'uri': 'str',\n 'associated_object_count': 'int',\n 'associated_objects': 'list[TermAssociatedObject]'\n }\n\n self.attribute_map = {\n 'key': 'key',\n 'display_name': 'displayName',\n 'description': 'description',\n 'glossary_key': 'glossaryKey',\n 'parent_term_key': 'parentTermKey',\n 'is_allowed_to_have_child_terms': 'isAllowedToHaveChildTerms',\n 'path': 'path',\n 'lifecycle_state': 'lifecycleState',\n 'time_created': 'timeCreated',\n 'time_updated': 'timeUpdated',\n 'created_by_id': 'createdById',\n 'updated_by_id': 'updatedById',\n 'owner': 'owner',\n 'workflow_status': 'workflowStatus',\n 'uri': 'uri',\n 'associated_object_count': 'associatedObjectCount',\n 'associated_objects': 'associatedObjects'\n }\n\n self._key = None\n self._display_name = None\n self._description = None\n self._glossary_key = None\n self._parent_term_key = None\n self._is_allowed_to_have_child_terms = None\n self._path = None\n self._lifecycle_state = None\n self._time_created = None\n self._time_updated = None\n self._created_by_id = None\n self._updated_by_id = None\n self._owner = None\n self._workflow_status = None\n self._uri = None\n self._associated_object_count = None\n self._associated_objects = None", "def test_factory_methods(self):\n\n po = ProjectObject.gen_bounding_box_object(id=\"1\", bounds=self.bounds)\n self.assertEqual(po.project_type, \"bounding_box\")\n self.assertAlmostEqual(po.bounds, self.bounds)\n self.assertEqual(po.id, \"1\")\n\n po = ProjectObject.gen_voxels_object(id=\"2\", voxels=self.voxels)\n self.assertEqual(po.project_type, \"voxels\")\n self.assertAlmostEqual(po.voxels.bounds(), self.voxels.bounds())\n self.assertEqual(po.id, \"2\")\n\n po = ProjectObject.gen_meshes_object(id=\"3\", meshes=self.meshes)\n self.assertEqual(po.project_type, \"meshes\")\n self.assertEqual(\n po.meshes.num_primitive_meshes(), self.meshes.num_primitive_meshes()\n )\n self.assertEqual(po.id, \"3\")", "def test_quick_build(self):\n pass", "def __init__(self, pathspec, properties={}):\n import numpy\n self.pathspec = pathspec\n super(ArraySpec,self).__init__(numpy.ndarray)\n self.properties = OrderedDict(properties)", "def generate(self):\n\t\traise BuilderException(\"You can not use this class directly!\")", "def create(self, validated_data):\n new_spec = Specification(key = validated_data.get('key'),\n value = validated_data.get('value'),\n category = validated_data.get('category'),\n car = validated_data.get('car'),)\n new_spec.save()\n\n return new_spec", "def getBuilder(name):", "def getBuilder(name):", "def build(self):\n self.build_inputs()\n self.build_image_embeddings()\n self.build_seq_embeddings()\n self.build_encoder()\n self.build_prediction_model()\n self.setup_encoder_initializer()\n self.setup_global_step()\n self.list_trainable_variables()", "def build_mock(self, root=None, arch=None):\n\n scrub_mock = [\n self.mock,\n '--root=%s' % self.root,\n '--arch=%s' % self.arch,\n '--scrub=all']\n output, errors = self._run_command(scrub_mock)\n\n init_mock = [\n self.mock,\n '--root=%s' % self.root,\n '--arch=%s' % self.arch,\n '--init']\n output, errors = self._run_command(init_mock)\n print output, errors\n \"\"\"\n status = self._parse_build_status(errors)\n Do something with status.\n Not sure if it's even useful at this point\n \"\"\"", "def __init__(self):\n self.swagger_types = {\n 'annotations': 'dict(str, str)',\n 'end_time': 'int',\n 'hosts': 'list[str]',\n 'is_ephemeral': 'bool',\n 'is_user_event': 'bool',\n 'name': 'str',\n 'start_time': 'int',\n 'summarized_events': 'int',\n 'table': 'str',\n 'tags': 'list[str]'\n }\n\n self.attribute_map = {\n 'annotations': 'annotations',\n 'end_time': 'endTime',\n 'hosts': 'hosts',\n 'is_ephemeral': 'isEphemeral',\n 'is_user_event': 'isUserEvent',\n 'name': 'name',\n 'start_time': 'startTime',\n 'summarized_events': 'summarizedEvents',\n 'table': 'table',\n 'tags': 'tags'\n }\n\n self._annotations = None\n self._end_time = None\n self._hosts = None\n self._is_ephemeral = False\n self._is_user_event = False\n self._name = None\n self._start_time = None\n self._summarized_events = None\n self._table = None\n self._tags = None", "def _build(self, prefilt=None):\n self.make_filiation()\n if prefilt is not None:\n self.prefilter(filt=prefilt)\n self.make_trees()\n return", "def __init__(self):\n self.swagger_types = {\n 'owner_id': 'str',\n 'created_at': 'datetime',\n 'identifier': 'str',\n 'identifier_type': 'str',\n 'default_language': 'str',\n 'optional_identifier': 'str',\n 'id': 'str',\n 'v': 'float',\n 'id': 'str',\n 'case_records': 'list[str]'\n }\n\n self.attribute_map = {\n 'owner_id': '_ownerId',\n 'created_at': '_createdAt',\n 'identifier': 'identifier',\n 'identifier_type': 'identifierType',\n 'default_language': 'defaultLanguage',\n 'optional_identifier': 'optionalIdentifier',\n 'id': '_id',\n 'v': '__v',\n 'case_records': 'caseRecords'\n }\n\n self._owner_id = None\n self._created_at = None\n self._identifier = None\n self._identifier_type = None\n self._default_language = None\n self._optional_identifier = None\n self._id = None\n self._v = None\n self._id = None\n self._case_records = None", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'uses_git': 'bool',\n 'git_remote_url': 'str',\n 'git_username': 'str',\n 'git_password': 'str',\n 'git_username_user_attribute': 'str',\n 'git_password_user_attribute': 'str',\n 'git_service_name': 'str',\n 'deploy_secret': 'str',\n 'unset_deploy_secret': 'bool',\n 'pull_request_mode': 'str',\n 'validation_required': 'bool',\n 'allow_warnings': 'bool',\n 'is_example': 'bool',\n 'can': 'dict(str, bool)'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'uses_git': 'uses_git',\n 'git_remote_url': 'git_remote_url',\n 'git_username': 'git_username',\n 'git_password': 'git_password',\n 'git_username_user_attribute': 'git_username_user_attribute',\n 'git_password_user_attribute': 'git_password_user_attribute',\n 'git_service_name': 'git_service_name',\n 'deploy_secret': 'deploy_secret',\n 'unset_deploy_secret': 'unset_deploy_secret',\n 'pull_request_mode': 'pull_request_mode',\n 'validation_required': 'validation_required',\n 'allow_warnings': 'allow_warnings',\n 'is_example': 'is_example',\n 'can': 'can'\n }\n\n self._id = None\n self._name = None\n self._uses_git = None\n self._git_remote_url = None\n self._git_username = None\n self._git_password = None\n self._git_username_user_attribute = None\n self._git_password_user_attribute = None\n self._git_service_name = None\n self._deploy_secret = None\n self._unset_deploy_secret = None\n self._pull_request_mode = None\n self._validation_required = None\n self._allow_warnings = None\n self._is_example = None\n self._can = None", "def __init__(self):\n BuildSystemBase.__init__(self, \"makefile\")", "def build(self):\n if not hasattr(self, 'subtitle'):\n self.subtitle = self.data_code['subtitle']\n #print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (\n #self.ntimes, self.nelements, self.ntotal, self.subtitle))\n if self.is_built:\n return\n nnodes = 1\n\n #self.names = []\n #self.nelements //= nnodes\n self.nelements //= self.ntimes\n self.ntotal = self.nelements * nnodes * 2\n #self.ntotal\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n self._times = np.zeros(self.ntimes, 'float32')\n #self.ntotal = self.nelements * nnodes\n\n self.element_node = np.zeros((self.ntotal, 2), 'int32')\n\n # the number is messed up because of the offset for the element's properties\n if not self.nelements * nnodes * 2 == self.ntotal:\n msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (\n self.ntimes, self.nelements, nnodes, self.nelements * nnodes,\n self.ntotal)\n raise RuntimeError(msg)\n\n # [angle, sc, sd, se, sf]\n self.data = np.zeros((self.ntimes, self.ntotal, 5), 'complex64')", "def test_init(self):\r\n p = TreeBuilder({})\r\n self.assertEqual(p.Name, 'TreeBuilder')\r\n self.assertEqual(p.Params, {})", "def __init__(self):\n self.swagger_types = {\n 'is_waiting': 'bool',\n 'is_active': 'bool',\n 'is_acd': 'bool',\n 'is_preferred': 'bool',\n 'is_screenshare': 'bool',\n 'is_cobrowse': 'bool',\n 'is_voicemail': 'bool',\n 'is_flagged': 'bool',\n 'is_monitored': 'bool',\n 'filter_wrap_up_notes': 'bool',\n 'match_all': 'bool'\n }\n\n self.attribute_map = {\n 'is_waiting': 'isWaiting',\n 'is_active': 'isActive',\n 'is_acd': 'isAcd',\n 'is_preferred': 'isPreferred',\n 'is_screenshare': 'isScreenshare',\n 'is_cobrowse': 'isCobrowse',\n 'is_voicemail': 'isVoicemail',\n 'is_flagged': 'isFlagged',\n 'is_monitored': 'isMonitored',\n 'filter_wrap_up_notes': 'filterWrapUpNotes',\n 'match_all': 'matchAll'\n }\n\n self._is_waiting = None\n self._is_active = None\n self._is_acd = None\n self._is_preferred = None\n self._is_screenshare = None\n self._is_cobrowse = None\n self._is_voicemail = None\n self._is_flagged = None\n self._is_monitored = None\n self._filter_wrap_up_notes = None\n self._match_all = None", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'identifier': 'int',\n 'success': 'bool',\n 'description': 'str',\n 'duration': 'float',\n 'bag_name': 'str',\n 'bag_store_name': 'str',\n 'results': 'object',\n 'bag': 'BagSummary'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'identifier': 'identifier',\n 'success': 'success',\n 'description': 'description',\n 'duration': 'duration',\n 'bag_name': 'bag_name',\n 'bag_store_name': 'bag_store_name',\n 'results': 'results',\n 'bag': 'bag'\n }\n\n self._detail_type = None\n self._identifier = None\n self._success = None\n self._description = None\n self._duration = None\n self._bag_name = None\n self._bag_store_name = None\n self._results = None\n self._bag = None", "def spec(self):\n return self._spec", "def spec(self):\n return self._spec", "def to_spec(self) -> dict[str, typing.Any]:\n # we can't call Categorization.to_spec here because we need to control ordering\n # in the returned dict so that we get nicely ordered yaml files.\n spec = {\n \"name\": self.name,\n \"title\": self.title,\n \"comment\": self.comment,\n \"references\": self.references,\n \"institution\": self.institution,\n \"hierarchical\": self.hierarchical,\n \"last_update\": self.last_update.isoformat(),\n }\n if self.version is not None:\n spec[\"version\"] = self.version\n spec[\"total_sum\"] = self.total_sum\n if self.canonical_top_level_category is not None:\n spec[\n \"canonical_top_level_category\"\n ] = self.canonical_top_level_category.codes[0]\n\n spec[\"categories\"] = {}\n for cat in self.values():\n code, cat_spec = cat.to_spec()\n spec[\"categories\"][code] = cat_spec\n\n return spec", "def build_model(self) -> DM:\n model = DM()\n model[self.modelroot] = content = DM()\n\n content['key'] = self.key\n content['id'] = self.id\n content['system-family'] = self.family\n for cp in self.parameters:\n content.append('calculation-parameter', DM(cp))\n\n self._set_model(model)\n return model", "def build(self):\n self._remove_swarm_keys()\n self._remove_pod_keys()\n self._set_image()\n self._translate_docker_properties()", "def __init__(self, options, build_revision):\n\n self.options = options\n self._src_dir = os.path.abspath(options.src_dir)\n self._chrome_dir = os.path.join(self._src_dir, 'chrome')\n # TODO: This scode should not be grabbing so deeply into WebKit.\n # Worse, this code ends up looking at top-of-tree WebKit\n # instead of the revision in DEPS.\n self._webkit_dir = os.path.join(self._src_dir, 'third_party', 'WebKit',\n 'Source', 'WebCore')\n self._v8_dir = os.path.join(self._src_dir, 'v8')\n # TODO: need to get the build *output* directory passed in instead so Linux\n # and Mac don't have to walk up a directory to get to the right directory.\n if chromium_utils.IsWindows():\n self._build_dir = os.path.join(options.build_dir, options.target)\n self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'win')\n elif chromium_utils.IsLinux():\n self._build_dir = os.path.join(os.path.dirname(options.build_dir),\n 'out', options.target)\n self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'linux')\n elif chromium_utils.IsMac():\n self._build_dir = os.path.join(os.path.dirname(options.build_dir),\n 'xcodebuild', options.target)\n self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'mac')\n else:\n raise NotImplementedError(\n 'Platform \"%s\" is not currently supported.' % sys.platform)\n self._staging_dir = slave_utils.GetStagingDir(self._src_dir)\n\n self._symbol_dir_base = options.dirs['symbol_dir_base']\n self._www_dir_base = options.dirs['www_dir_base']\n self._build_name = slave_utils.SlaveBuildName(self._src_dir)\n self._symbol_dir_base = os.path.join(self._symbol_dir_base,\n self._build_name)\n self._www_dir_base = os.path.join(self._www_dir_base, self._build_name)\n\n self._version_file = os.path.join(self._chrome_dir, 'VERSION')\n\n if options.default_chromium_revision:\n self._chromium_revision = options.default_chromium_revision\n else:\n self._chromium_revision = slave_utils.SubversionRevision(self._chrome_dir)\n if options.default_webkit_revision:\n self._webkit_revision = options.default_webkit_revision\n else:\n self._webkit_revision = slave_utils.SubversionRevision(self._webkit_dir)\n if options.default_v8_revision:\n self._v8_revision = options.default_v8_revision\n else:\n self._v8_revision = slave_utils.SubversionRevision(self._v8_dir)\n self.last_change_file = os.path.join(self._staging_dir, 'LAST_CHANGE')\n # The REVISIONS file will record the revisions information of the main\n # components Chromium/WebKit/V8.\n self.revisions_path = os.path.join(self._staging_dir, 'REVISIONS')\n self._build_revision = build_revision\n # Will be initialized in GetLastBuildRevision.\n self.last_chromium_revision = None\n self.last_webkit_revision = None\n self.last_v8_revision = None\n\n self._files_file = os.path.join(self._tool_dir,\n archive_utils.FILES_FILENAME)\n self._test_files = self.BuildOldFilesList(TEST_FILE_NAME)\n\n self._dual_upload = options.factory_properties.get('dual_upload', False)\n self._archive_files = None", "def build(self):\n level_dict = {'method': '',\n 'basis': None,\n 'auxiliary_basis': None,\n 'dispersion': None,\n 'cabs': None,\n 'method_type': None,\n 'software': None,\n 'software_version': None,\n 'compatible_ess': None,\n 'solvation_method': None,\n 'solvent': None,\n 'solvation_scheme_level': None,\n 'args': None}\n allowed_keys = list(level_dict.keys())\n\n if isinstance(self.repr, str):\n if ' ' in self.repr:\n # illegal inputs like 'dlpno-ccsd(t)/def2-svp def2-svp/c' or 'b3 lyp'\n raise ValueError(f'{self.repr} has empty spaces. Please use a dictionary format '\n f'to clearly specify method, basis, auxiliary basis, and dispersion in this case. '\n f'See documentation for more details.')\n if self.repr.count('/') >= 2:\n # illegal inputs like 'dlpno-ccsd(t)/def2-svp/def2-svp/c'\n raise ValueError(f'{self.repr} has multiple slashes. Please use a dictionary format '\n f'to specify method, basis, auxiliary basis, and dispersion in this case. '\n f'See documentation for more details.')\n if '/' not in self.repr:\n # e.g., 'AM1', 'XTB', 'CBS-QB3'\n # Note that this function is not designed to distinguish between composite and semi-empirical methods.\n level_dict['method'] = self.repr\n else:\n splits = self.repr.split('/')\n level_dict['method'] = splits[0]\n level_dict['basis'] = splits[1]\n\n elif isinstance(self.repr, dict):\n # also treats representations of LevelOfTheory.as_dict from a restart file\n if 'method' not in self.repr.keys():\n raise ValueError(f'The repr dictionary argument must at least have a \"method\" key, got:\\n{self.repr}')\n for key, value in self.repr.items():\n if key in allowed_keys and value:\n level_dict[key] = value\n elif key not in allowed_keys:\n raise ValueError(f'Got an illegal key \"{key}\" in level of theory dictionary representation'\n f'\\n{self.repr}')\n\n elif isinstance(self.repr, Level):\n level_dict = self.repr.as_dict()\n\n else:\n raise ValueError(f'The repr argument must be either a string, a dictionary or a Level type.\\n'\n f'Got {self.repr} which is a {type(self.repr)}.')\n\n self.repr = None # reset\n self.__init__(**level_dict)", "def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()", "def setUp(self):\n self.definition = {\n 'required': ['data'],\n 'type': 'object',\n 'properties': {\n 'notifications': {\n 'items': {'type': 'string'},\n 'type': 'array'\n },\n 'data': {\n 'type': 'array',\n 'items': {\n 'required': ['email', 'display_name'],\n 'type': 'object',\n 'properties': {\n 'phone_number': {'type': 'string'},\n 'display_name': {'type': 'string'},\n 'email': {'type': 'string'},\n 'first_name': {'type': 'string'},\n 'last_name': {'type': 'string'},\n 'admin': {'type': 'string'},\n 'vehicles': {\n 'type': 'array',\n 'items': {\n 'required': ['vehicle_info_id', 'is_default'],\n 'type': 'object',\n 'example': {\n 'is_default': True,\n 'make_pretty': 'BMW',\n 'vehicle_info_id': 1234,\n 'year': 2011,\n 'make': 'bmw',\n 'model': '1-series-m',\n 'model_pretty': '1 Series M'\n },\n 'properties': {\n 'is_default': {'type': 'boolean'},\n 'make_pretty': {'type': 'string'},\n 'vehicle_info_id': {'type': 'integer'},\n 'year': {'type': 'string'},\n 'make': {'type': 'string'},\n 'model': {'type': 'string'},\n 'model_pretty': {'type': 'string'}\n }\n }\n }\n }\n }\n }\n }\n }\n\n self.actual = {\n u'notifications': [],\n u'data':\n [\n {\n u'phone_number': None,\n u'first_name': u'Lawrence',\n u'last_name': u'Kiss',\n u'display_name': u'Lawrence Kiss',\n u'vehicles': [\n {\n u'is_default': True,\n u'make_pretty': u'BMW',\n u'vehicle_info_id': 1234,\n u'year': u'2016',\n u'make': u'bmw',\n u'model': u'1-series-m',\n u'model-pretty': u'1 Series M'\n }\n ],\n u'email': u'[email protected]',\n },\n ]\n }", "def build_all(self):\n self.android_build()\n self.generate_patch_build('')\n self.generate_specs_build()\n self.generate_interfaces()" ]
[ "0.70841205", "0.66101444", "0.6534153", "0.6428157", "0.630121", "0.6247484", "0.6246544", "0.6246544", "0.62344396", "0.62344396", "0.62138087", "0.62038517", "0.61900073", "0.61408484", "0.6102088", "0.607086", "0.607086", "0.607086", "0.60343456", "0.599119", "0.58868694", "0.5854568", "0.58508486", "0.5838978", "0.5826844", "0.58148676", "0.58032656", "0.5801583", "0.5707092", "0.5694444", "0.56598616", "0.56196076", "0.5601306", "0.55915755", "0.5573882", "0.55234265", "0.55216193", "0.55186594", "0.55113375", "0.54886925", "0.5487181", "0.5485086", "0.54815674", "0.5478306", "0.5472114", "0.54707366", "0.5462443", "0.54602957", "0.5450126", "0.5444104", "0.54339415", "0.54339415", "0.54317766", "0.5426633", "0.5418842", "0.54155344", "0.5392302", "0.53900766", "0.53862256", "0.5382415", "0.53728265", "0.53701735", "0.5365603", "0.53638476", "0.5360134", "0.53550905", "0.5354869", "0.53536075", "0.5339549", "0.53292", "0.53165805", "0.5301636", "0.5295433", "0.5285617", "0.5282623", "0.5277901", "0.5276092", "0.5272866", "0.5272866", "0.52701473", "0.5270033", "0.5236692", "0.5229104", "0.52276665", "0.5224597", "0.52227837", "0.52192664", "0.52165335", "0.5212332", "0.52122486", "0.5211716", "0.5211716", "0.5208917", "0.52080256", "0.5205839", "0.52054584", "0.51992637", "0.5198731", "0.51801574", "0.51773274" ]
0.6370524
4
Builds the Property Filter Spec.
def build_property_filter_spec(client_factory, property_specs, object_specs): property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_filter_spec.propSet = property_specs property_filter_spec.objectSet = object_specs return property_filter_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\r\n prop_filter_spec = \\\r\n client_factory.create('ns0:PropertyFilterSpec')\r\n prop_filter_spec.propSet = prop_spec\r\n prop_filter_spec.objectSet = obj_spec\r\n return prop_filter_spec", "def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec", "def _createSpecificProperty(self, filter_name):\n import uno\n from com.sun.star.beans import PropertyValue\n if filter_name == \"impress_html_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('IsExportNotes', 0, True, 0),\n PropertyValue('PublishMode', 0, 0, 0),\n PropertyValue('Width', 0, 640, 0),\n PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"impress_pdf_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('ExportNotesPages', 0, True, 0),\n PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif \"pdf_Export\" in filter_name :\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif filter_name in (\"draw_html_Export\", \"HTML (StarCalc)\"):\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"Text (encoded)\":\n property = PropertyValue('FilterFlags', 0, 'UTF8,LF', 0)\n else:\n return []\n\n return [property, ]", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters", "def build_property_spec(client_factory, type=\"VirtualMachine\",\r\n properties_to_collect=[\"name\"],\r\n all_properties=False):\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = all_properties\r\n property_spec.pathSet = properties_to_collect\r\n property_spec.type = type\r\n return property_spec", "def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterPropList, self).__init__(*args, **kwargs)\n\n # Construct the regular expression tag evaluators.\n nameregextag = self.thistag.find('PropNameRegex')\n if nameregextag != None:\n self.nameregex = RegexTag(nameregextag)\n else:\n self.nameregex = None\n\n valueregextag = self.thistag.find('PropValueRegex')\n if valueregextag != None:\n self.valueregex = RegexTag(valueregextag)\n else:\n self.valueregex = None\n\n # Make sure that at least one regular expression is specified.\n if self.nameregex == None and self.valueregex == None:\n raise ValueError('Required tag missing: '\\\n 'PropNameRegex or PropValueRegex')\n\n # Get the \"look for the first match\" flag.\n self.matchfirst = self.get_boolean('matchFirst')\n logger.debug('matchfirst = {0}'.format(self.matchfirst))\n\n # Get the path name.\n self.path = self.context.tokens['Path']\n logger.debug('path = {0}'.format(self.path))", "def _build_filter_part(self, cls, filters, order_by=None, select=None):\r\n import types\r\n query_parts = []\r\n\r\n order_by_filtered = False\r\n\r\n if order_by:\r\n if order_by[0] == \"-\":\r\n order_by_method = \"DESC\";\r\n order_by = order_by[1:]\r\n else:\r\n order_by_method = \"ASC\";\r\n\r\n if select:\r\n if order_by and order_by in select:\r\n order_by_filtered = True\r\n query_parts.append(\"(%s)\" % select)\r\n\r\n if isinstance(filters, str) or isinstance(filters, unicode):\r\n query = \"WHERE %s AND `__type__` = '%s'\" % (filters, cls.__name__)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n query += \" ORDER BY itemName() %s\" % order_by_method\r\n elif order_by != None:\r\n query += \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n return query\r\n\r\n for filter in filters:\r\n filter_parts = []\r\n filter_props = filter[0]\r\n if type(filter_props) != list:\r\n filter_props = [filter_props]\r\n for filter_prop in filter_props:\r\n (name, op) = filter_prop.strip().split(\" \", 1)\r\n value = filter[1]\r\n property = cls.find_property(name)\r\n if name == order_by:\r\n order_by_filtered = True\r\n if types.TypeType(value) == types.ListType:\r\n filter_parts_sub = []\r\n for val in value:\r\n val = self.encode_value(property, val)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts_sub.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts_sub.append(self._build_filter(property, name, op, val))\r\n filter_parts.append(\"(%s)\" % (\" OR \".join(filter_parts_sub)))\r\n else:\r\n val = self.encode_value(property, value)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts.append(self._build_filter(property, name, op, val))\r\n query_parts.append(\"(%s)\" % (\" or \".join(filter_parts)))\r\n\r\n\r\n type_query = \"(`__type__` = '%s'\" % cls.__name__\r\n for subclass in self._get_all_decendents(cls).keys():\r\n type_query += \" or `__type__` = '%s'\" % subclass\r\n type_query +=\")\"\r\n query_parts.append(type_query)\r\n\r\n order_by_query = \"\"\r\n\r\n if order_by:\r\n if not order_by_filtered:\r\n query_parts.append(\"`%s` LIKE '%%'\" % order_by)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n order_by_query = \" ORDER BY itemName() %s\" % order_by_method\r\n else:\r\n order_by_query = \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n\r\n if len(query_parts) > 0:\r\n return \"WHERE %s %s\" % (\" AND \".join(query_parts), order_by_query)\r\n else:\r\n return \"\"", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters", "def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterRevProp, self).__init__(*args, **kwargs)\n\n # Construct regular expression tag evaluators.\n nameregextag = self.thistag.find('PropNameRegex')\n if nameregextag != None:\n self.nameregex = RegexTag(nameregextag)\n else:\n self.nameregex = None\n\n valueregextag = self.thistag.find('PropValueRegex')\n if valueregextag != None:\n self.valueregex = RegexTag(valueregextag)\n else:\n self.valueregex = None\n\n # Make sure that at least one regular expression is specified.\n if self.nameregex == None and self.valueregex == None:\n raise ValueError('Required tag missing: '\\\n 'PropNameRegex or PropValueRegex')\n\n # Save the revision property details.\n self.propname = self.context.tokens['RevPropName']\n logger.debug('propname = {0}'.format(self.propname))\n self.propvalue = self.context.tokens['RevPropValue']\n logger.debug('propvalue = \"{0}\"'.format(self.propvalue))", "def _build(self, prefilt=None):\n self.make_filiation()\n if prefilt is not None:\n self.prefilter(filt=prefilt)\n self.make_trees()\n return", "def _generate_stats(self, host_state, filter_properties):\n\n filter_function = None\n\n if ('filter_function' in host_state.capabilities and\n host_state.capabilities['filter_function'] is not None):\n filter_function = str(\n host_state.capabilities['filter_function'])\n\n stats = utils.generate_stats(host_state, filter_properties)\n\n stats['filter_function'] = filter_function\n\n return stats", "def _propertyFilter(self, entity, params):\n\n if 'property_conditions' not in params:\n raise ProtocolError()\n\n conditions = params['property_conditions']\n\n for field, allowed_values in conditions.iteritems():\n if entity.__getattribute__(field) not in allowed_values:\n return False\n\n return True", "def __init__(self, filter_methods: ConfigNodePropertyArray=None, filter_enable_safe_user_agents: ConfigNodePropertyBoolean=None, filter_safe_user_agents: ConfigNodePropertyArray=None, filter_excluded_paths: ConfigNodePropertyArray=None): # noqa: E501\n self.openapi_types = {\n 'filter_methods': ConfigNodePropertyArray,\n 'filter_enable_safe_user_agents': ConfigNodePropertyBoolean,\n 'filter_safe_user_agents': ConfigNodePropertyArray,\n 'filter_excluded_paths': ConfigNodePropertyArray\n }\n\n self.attribute_map = {\n 'filter_methods': 'filter.methods',\n 'filter_enable_safe_user_agents': 'filter.enable.safe.user.agents',\n 'filter_safe_user_agents': 'filter.safe.user.agents',\n 'filter_excluded_paths': 'filter.excluded.paths'\n }\n\n self._filter_methods = filter_methods\n self._filter_enable_safe_user_agents = filter_enable_safe_user_agents\n self._filter_safe_user_agents = filter_safe_user_agents\n self._filter_excluded_paths = filter_excluded_paths", "def _write_filter_params(self, spec):\n spec.switch_write_focus(self.REGIONS.FILTER_PARAMS.value)\n for param in self._filter_params:\n spec.write_value(param, data_type=DataType.FLOAT_64)", "def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts", "def get_filters(self):", "def test_fields_from_property():\n prop_template = PropertyTemplate(name=\"cookie eating template\", bounds=IntegerBounds(0, 1000))\n cond_template = ConditionTemplate(name=\"Hunger template\",\n bounds=CategoricalBounds([\"hungry\", \"full\", \"peckish\"]))\n prop = Property(name=\"number of cookies eaten\",\n template=prop_template,\n origin='measured',\n value=NominalInteger(27))\n cond = Condition(name=\"hunger level\",\n template=cond_template,\n origin='specified',\n value=NominalCategorical(\"hungry\"))\n\n prop_and_conds = PropertyAndConditions(property=prop, conditions=[cond])\n assert prop_and_conds.name == prop.name\n assert prop_and_conds.template == prop.template\n assert prop_and_conds.origin == prop.origin\n assert prop_and_conds.value == prop.value", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def properties(self, filters={}):\n return self.__get_list_client(Property)(filters=filters)", "def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeClass = kwargs.get(\"rspSubtreeClass\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n orderBy = kwargs.get(\"orderBy\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeClass is not None:\n opts+= \"&rsp-subtree-class=%s\" % rspSubtreeClass\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n if orderBy is not None:\n opts+= \"&order-by=%s\" % orderBy\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['transform_fn'] = self.transform_or_spec\n if self._also_track_spec is not None:\n specs['also_track'] = self._also_track_spec\n return specs", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def __init__(self, filter_spec = [ [{},False] ]):\n\n Qt.QObject.__init__(self)\n\n\n # key = property name of Element object\n # value = displayed column name for tables showing choices and matches\n self.elem_property_vs_col_name = \\\n {'name':'Name', 'devname':'Dev. Name', 'cell':'Cell',\n 'family':'Family', 'girder':'Girder', 'group':'Group',\n 'index':'Lat. Index', 'length':'Eff.Len', 'phylen':'Phys. Len.',\n 'pv':'PV', 'sb':'sb', 'se':'se', 'symmetry':'Symmetry',\n 'virtual':'Virtual', 'sequence':'Sequence'}\n\n # key = property name of Element object & exclusion flag\n # value = displayed column name for table showing filters\n self.filter_property_vs_col_name = \\\n self.elem_property_vs_col_name.copy()\n self.filter_property_vs_col_name.update({'exclude':'Excl.'}) # adding extra column\n\n # Specify the default column order you want for tables showing\n # choices and matches.\n self.elem_property_list = ['family', 'name', 'devname', 'cell',\n 'girder', 'symmetry', 'group', 'virtual',\n 'sb', 'se', 'pv', 'length', 'phylen',\n 'index', 'sequence']\n self.col_name_list = [self.elem_property_vs_col_name[prop]\n for prop in self.elem_property_list]\n self.choice_dict = dict.fromkeys(self.elem_property_list)\n\n # Specify the default column order you want for table showing\n # filters.\n self.filter_property_list = self.elem_property_list[:]\n self.filter_property_list.insert(0, 'exclude')\n self.filter_col_name_list = [self.filter_property_vs_col_name[prop]\n for prop in self.filter_property_list]\n self.filter_dict = dict.fromkeys(self.filter_property_list)\n\n self.numeric_filter_list = ['index', 'phylen', 'length', 'sb', 'se']\n self.not_implemented_filter_list = ['sequence']\n\n self.filter_spec = filter_spec\n\n self.allElements = ap.getElements('*')\n\n # Initialization of matching data information\n self.matched = [ [True]*len(self.allElements) ]\n self.combine_matched_list()\n self.update_choice_dict()\n\n # Apply initial filters provided by a user, if any.\n if self.filter_spec:\n isCaseSensitive = False\n self.filterData(range(len(self.filter_spec)), isCaseSensitive)\n\n self.selectedElements = []", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(AreaResource, self).build_filters(filters)\n \n if \"level\" in filters:\n orm_filters[\"layout__level\"] = int(filters[\"level\"])\n \n return orm_filters", "def build_filters(self, view, filters=None):\n query_builder = self.get_query_builder(backend=self, view=view)\n return query_builder.build_query(**(filters if filters else {}))", "def create_filter_from_args(self, args: dict) -> Filter:\n keys = set(args.keys())\n filter_args = {}\n\n if \"name\" in keys:\n value = args.get('name')\n if value != \"\":\n filter_args.update({\"text_filter\": args.get('name')})\n if \"product_in\" in keys:\n value = args.get('product_in')\n if value != \"\":\n filter_args.update({\"product_in\": 'true' if value == \"yes\" else 'false'})\n if \"human_in\" in keys:\n value = args.get('human_in')\n if value != \"\":\n filter_args.update({\"human_in\": 'true' if value == \"yes\" else 'false'})\n if \"institutional\" in keys:\n value = args.get('institutional')\n if value != \"\":\n filter_args.update({\"institutional\": 'true' if value == \"yes\" else 'false'})\n if \"format\" in keys:\n value = args.get('format')\n if value != \"\":\n filter_args.update({\"picture_format\": 'true' if value == \"vertical\" else 'false'})\n if \"credit\" in keys:\n value = args.get('credit')\n if value != \"\":\n filter_args.update({\"author_credits\": value})\n if \"limited_use\" in keys:\n value = args.get('limited_use')\n if value != \"\":\n filter_args.update({\"limited_usage\": 'true' if value == \"yes\" else 'false'})\n if \"tags\" in keys:\n value = args.get('tags')\n if value != \"\":\n filter_args.update({\"limited_usage\": value.split(';')})\n\n f = Filter(**filter_args)\n return f", "def _build_query_filters(self, query: dict, filters: list) -> dict:\n\n for filter_tuple in filters:\n if not isinstance(filter_tuple, tuple) or len(filter_tuple) != 3:\n LOG.error(\"polling_filters tuple %s : invalid format or does not contain 3 elements - skipping this filter\", filter_tuple)\n continue\n if isinstance(filter_tuple[2], list) :\n # If \"value\" is a list of values then create a rule (json object) for each \n # value and use \"OR\" condition.\n condition = {'condition': \"OR\",\n 'rules': []}\n for value in filter_tuple[2]:\n rule = {}\n # Prepend fieldname with \"table.\" string\n rule['field'] = f\"table.{filter_tuple[0]}\"\n rule['operator'] = filter_tuple[1]\n rule['value'] = value\n condition['rules'].append(rule)\n query['rules'].append(condition)\n else:\n # Create a single rule for this tuple\n rule = {}\n field_name = f\"table.{filter_tuple[0]}\"\n rule['field'] = field_name\n rule['operator'] = filter_tuple[1]\n rule['value'] = filter_tuple[2]\n query['rules'].append(rule)\n return query", "def get_prop_spec(client_factory, spec_type, properties):\r\n prop_spec = client_factory.create('ns0:PropertySpec')\r\n prop_spec.type = spec_type\r\n prop_spec.pathSet = properties\r\n return prop_spec", "def make_filter_specification(cls, filter_string):\n try:\n return parse_filter(filter_string)\n except ParseException as err:\n raise ValueError('Expression parameters have errors. %s' % err)", "def _init_optimizer_params(self):\n order = [\n [Peaking.__name__, True, True], # Peaking\n [LowShelf.__name__, True, True], # Low shelfs\n [HighShelf.__name__, True, True], # High shelfs\n [Peaking.__name__, True, False], # Peaking with fixed q\n [LowShelf.__name__, True, False], # Low shelfs with fixed q\n [HighShelf.__name__, True, False], # High shelfs with fixed q\n [Peaking.__name__, False, True], # Peaking with fixed fc\n [LowShelf.__name__, False, True], # Low shelfs with fixed fc\n [HighShelf.__name__, False, True], # High shelfs with fixed fc\n [Peaking.__name__, False, False], # Peaking with fixed fc and q\n [LowShelf.__name__, False, False], # Low shelfs with fixed fc and q\n [HighShelf.__name__, False, False], # High shelfs with fixed fc and q\n ]\n\n def init_order(filter_ix):\n filt = self.filters[filter_ix]\n ix = order.index([filt.__class__.__name__, filt.optimize_fc, filt.optimize_q])\n val = ix * 100\n if filt.optimize_fc:\n val += 1 / np.log2(filt.max_fc / filt.min_fc)\n return val\n\n # Initialize filter params as list of empty lists, one per filter\n filter_params = [[]] * len(self.filters)\n # Indexes to self.filters sorted by filter init order\n filter_argsort = sorted(list(range(len(self.filters))), key=init_order, reverse=True)\n remaining_target = self.target.copy()\n for ix in filter_argsort: # Iterate sorted filter indexes\n filt = self.filters[ix] # Get filter\n filter_params[ix] = filt.init(remaining_target) # Init filter and place params to list of lists\n remaining_target -= filt.fr # Adjust target\n filter_params = np.concatenate(filter_params).flatten() # Flatten params list\n return filter_params", "def test_query_with_property_filter(config):\n p = PostgreSQLProvider(config)\n feature_collection = p.query(properties=[(\"waterway\", \"stream\")])\n features = feature_collection.get('features', None)\n stream_features = list(\n filter(lambda feature: feature['properties']['waterway'] == 'stream',\n features))\n assert (len(features) == len(stream_features))\n\n feature_collection = p.query()\n features = feature_collection.get('features', None)\n stream_features = list(\n filter(lambda feature: feature['properties']['waterway'] == 'stream',\n features))\n other_features = list(\n filter(lambda feature: feature['properties']['waterway'] != 'stream',\n features))\n assert (len(features) != len(stream_features))\n assert (len(other_features) != 0)", "def get_prop_spec(client_factory, spec_type, properties):\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec", "def filtered_by(self, property):\n C = copy.deepcopy(self)\n C.filter_by(property)\n return C", "def build_filters(self, filters=None):\n filters.pop('username')\n return super(UserResource, self).build_filters(filters)", "def __generateFilter(self, selectionPairs):\n filter = None\n for (selSyntax, argSyntax) in selectionPairs:\n if self._arg.has_key(argSyntax) and self._arg[argSyntax] != '':\n if filter is None:\n filter = {}\n filter[selSyntax] = self._arg[argSyntax]\n \n return filter", "def testUsingFilterTool(self):\n pass", "def build(self, spec, prefix):\n make()", "def AddFilter(query, property_filter, value):\n p = property_filter.split()[0]\n # pylint: disable-msg=W0212\n assert p in query._model_class.properties()\n query.filter(property_filter, value)", "def _build_filters(self, criteria: Q):\n composed_query = query.Q()\n\n if criteria.connector == criteria.AND:\n for child in criteria.children:\n if isinstance(child, Q):\n composed_query = composed_query & self._build_filters(child)\n else:\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n lookup = lookup_class(stripped_key, child[1])\n if criteria.negated:\n composed_query = composed_query & ~lookup.as_expression()\n else:\n composed_query = composed_query & lookup.as_expression()\n else:\n for child in criteria.children:\n if isinstance(child, Q):\n composed_query = composed_query | self._build_filters(child)\n else:\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n lookup = lookup_class(stripped_key, child[1])\n if criteria.negated:\n composed_query = composed_query | ~lookup.as_expression()\n else:\n composed_query = composed_query | lookup.as_expression()\n\n return composed_query", "def write_filter_spec(filters, filename):\n data = export_filters(filters)\n with open(filename, 'w') as fp:\n json.dump(data, fp, indent = 4)", "def assert_filter_builds_to(self, expect, filter, _chain_filters=None):\n final_query = {'bool': {'must_not': [RESEARCH.to_dict()]}}\n\n if expect:\n final_query['bool']['must'] = expect\n main, nested = filter.build(_chain_filters)\n assert final_query == main.to_dict()\n\n return main, nested", "def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(UserResource, self).build_filters(filters)\n \n if \"area\" in filters:\n area_id = filters['area']\n area = Area.objects.get(id = area_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentArea = area)]\n \n elif \"environment\" in filters:\n environment_id = filters['environment']\n environment = Environment.objects.get(id = environment_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentEnvironment = environment)]\n \n return orm_filters", "def _CheckFilter(self, filter, values):\n try:\n match = Query.FILTER_REGEX.match(filter)\n if not match:\n raise datastore_errors.BadFilterError(\n 'Could not parse filter string: %s' % str(filter))\n except TypeError:\n raise datastore_errors.BadFilterError(\n 'Could not parse filter string: %s' % str(filter))\n\n property = match.group(1)\n operator = match.group(3)\n if operator is None:\n operator = '='\n\n if isinstance(values, tuple):\n values = list(values)\n elif not isinstance(values, list):\n values = [values]\n if isinstance(values[0], datastore_types._RAW_PROPERTY_TYPES):\n raise datastore_errors.BadValueError(\n 'Filtering on %s properties is not supported.' % typename(values[0]))\n\n if operator in self.INEQUALITY_OPERATORS:\n if self.__inequality_prop and property != self.__inequality_prop:\n raise datastore_errors.BadFilterError(\n 'Only one property per query may have inequality filters (%s).' %\n ', '.join(self.INEQUALITY_OPERATORS))\n elif len(self.__orderings) >= 1 and self.__orderings[0][0] != property:\n raise datastore_errors.BadFilterError(\n 'Inequality operators (%s) must be on the same property as the '\n 'first sort order, if any sort orders are supplied' %\n ', '.join(self.INEQUALITY_OPERATORS))\n\n if (self.__kind is None and\n property != datastore_types._KEY_SPECIAL_PROPERTY):\n raise datastore_errors.BadFilterError(\n 'Only %s filters are allowed on kindless queries.' %\n datastore_types._KEY_SPECIAL_PROPERTY)\n\n if property in datastore_types._SPECIAL_PROPERTIES:\n if property == datastore_types._KEY_SPECIAL_PROPERTY:\n for value in values:\n if not isinstance(value, Key):\n raise datastore_errors.BadFilterError(\n '%s filter value must be a Key; received %s (a %s)' %\n (datastore_types._KEY_SPECIAL_PROPERTY, value, typename(value)))\n\n return match", "def property_setup(self, properties):\n return properties", "def condition_filters(self):\r\n return filters.Filters(self)", "def buildRegFilterList(self, filename, listname='regFilterList'):", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def create_filter(args: dict) -> dict | None:\n if 'ip' in args:\n args['networkInterfaces.ipv4'] = args.pop('ip')\n expression_list = []\n for arg in args:\n value = args.get(arg)\n if arg == 'riskScore':\n restriction = \"GREATER_THAN_OR_EQUAL_TO\"\n values_list = [arg_to_number(value)]\n else:\n restriction = \"IN\"\n values_list = argToList(value)\n\n values_res = [{\"value\": val} for val in values_list]\n expression = {\n \"propertyName\": arg,\n \"restrictionType\": restriction,\n \"propertyValues\": values_res\n }\n expression_list.append(expression)\n if expression_list:\n return {\"criteria\": {\"criteriaList\": [{\"expressionList\": expression_list}], \"predicateType\": \"AND\"}}\n else:\n return None", "def _build_filters(self, criteria: Q):\n # Decide the function based on the connector type\n func = and_ if criteria.connector == criteria.AND else or_\n params = []\n for child in criteria.children:\n if isinstance(child, Q):\n # Call the function again with the child\n params.append(self._build_filters(child))\n else:\n # Find the lookup class and the key\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n\n # Instantiate the lookup class and get the expression\n lookup = lookup_class(stripped_key, child[1], self.model_cls)\n if criteria.negated:\n params.append(~lookup.as_expression())\n else:\n params.append(lookup.as_expression())\n\n return func(*params)", "def amh_attr_filter_query(self):\n \n attr_filter_query = \"\"\"\n WITH {final_cte_name} as (\n -- Pull list of devices that were active (has any row; don't need TVT >0) in the past 4 weeks\n SELECT DISTINCT device_id\n FROM tubidw.all_metric_hourly\n WHERE DATE_TRUNC('week',hs) >= dateadd('week',-4,DATE_TRUNC('week',GETDATE()))\n AND DATE_TRUNC('week',hs) < DATE_TRUNC('week',GETDATE())\n {attr_filter} -- attribute filters dynamically populate here\n -- TODO: currently can't get a metric/attribute combo filter, like \"devices that watched at least 50% of a specific content_id\"\n )\n \"\"\"\n return attr_filter_query", "def _build_filter_set(self, column_config_name, service_name=None, **filters):\n\n if not service_name:\n service_name = column_config_name\n\n if not self._column_configs.get(service_name):\n self._get_col_config(service_name, fetch_name=column_config_name)\n\n caomColConfig = self._column_configs[service_name]\n\n mashupFilters = []\n for colname, value in filters.items():\n\n # make sure value is a list-like thing\n if np.isscalar(value,):\n value = [value]\n\n # Get the column type and separator\n colInfo = caomColConfig.get(colname)\n if not colInfo:\n warnings.warn(\"Filter {} does not exist. This filter will be skipped.\".format(colname), InputWarning)\n continue\n\n colType = \"discrete\"\n if (colInfo.get(\"vot.datatype\", colInfo.get(\"type\")) in (\"double\", \"float\", \"numeric\")) \\\n or colInfo.get(\"treatNumeric\"):\n colType = \"continuous\"\n\n separator = colInfo.get(\"separator\")\n freeText = None\n\n # validate user input\n if colType == \"continuous\":\n if len(value) < 2:\n warningString = \"{} is continuous, \".format(colname) + \\\n \"and filters based on min and max values.\\n\" + \\\n \"Not enough values provided, skipping...\"\n warnings.warn(warningString, InputWarning)\n continue\n elif len(value) > 2:\n warningString = \"{} is continuous, \".format(colname) + \\\n \"and filters based on min and max values.\\n\" + \\\n \"Too many values provided, the first two will be \" + \\\n \"assumed to be the min and max values.\"\n warnings.warn(warningString, InputWarning)\n else: # coltype is discrete, all values should be represented as strings, even if numerical\n value = [str(x) for x in value]\n\n # check for wildcards\n\n for i, val in enumerate(value):\n if ('*' in val) or ('%' in val):\n if freeText: # freeText is already set cannot set again\n warningString = \"Only one wildcarded value may be used per filter, \" + \\\n \"all others must be exact.\\n\" + \\\n \"Skipping {}...\".format(val)\n warnings.warn(warningString, InputWarning)\n else:\n freeText = val.replace('*', '%')\n value.pop(i)\n\n # craft mashup filter entry\n entry = {}\n entry[\"paramName\"] = colname\n if separator:\n entry[\"separator\"] = separator\n if colType == \"continuous\":\n entry[\"values\"] = [{\"min\": value[0], \"max\":value[1]}]\n else:\n entry[\"values\"] = value\n if freeText:\n entry[\"freeText\"] = freeText\n\n mashupFilters.append(entry)\n\n return mashupFilters", "def get_properties(self) -> List[ObserverPropertiesItem]:\n return [\n self._prop_builder.auto('Seed', type(self).seed),\n self._prop_builder.auto('Class filter', type(self).class_filter),\n self._prop_builder.auto('Random order', type(self).random_order),\n self._prop_builder.auto('Save gpu memory', type(self).save_gpu_memory),\n self._prop_builder.auto('Location filter ration', type(self).location_filter_ratio),\n self._prop_builder.auto('Dataset size', type(self).dataset_size),\n self._prop_builder.auto('Dataset config', type(self).dataset_config),\n self._prop_builder.auto('Switch training resets train pos ', type(self).switch_train_resets_train_pos),\n self._prop_builder.auto('Hide labels', type(self).is_hide_labels)\n ]", "def build_from_mapping(mapping, module_params):\n filters = module_params.get('filters', {})\n for param in module_params:\n if param != 'filters' and module_params[param] and param in mapping:\n filters[mapping[param]] = module_params[param]\n return from_dict(filters)", "def build_filters(self, filters=None):\n\n if filters is None:\n filters = {}\n\n orm_filters = super(EmployeeResource, self).build_filters(filters)\n\n if 'role' in filters:\n ids = (Employee.by_assignment_role(filters['role'])\n .values_list('id', flat=True))\n orm_filters['pk__in'] = ids\n\n return orm_filters", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['bijector'] = self.transform_or_spec\n return specs", "def filter_by_property(self, properties=None, **kwargs):\r\n\t\tif properties is None:\r\n\t\t\tproperties = {}\r\n\t\tproperties.update(kwargs)\r\n\t\tresult_list = ElementList()\r\n\t\tfor element in self:\r\n\t\t\tif all(k in element.properties and element.properties[k] == v\r\n\t\t\t\t\tfor k, v in properties.items()):\r\n\t\t\t\tresult_list.append(element)\r\n\t\treturn result_list", "def _determine_properties(self, paramdict):\n for var in paramdict:\n if is_dimensionless(paramdict[var]):\n self._all_params_unit[var] = \"none\"\n yield lems.Property(var, \"none\")\n else:\n dim = _determine_dimension(paramdict[var])\n self._all_params_unit[var] = dim\n yield lems.Property(var, dim)", "def get_filter_config(\n platform,\n filter_name,\n filter_options=None,\n terms=None,\n prepend=True,\n pillar_key=\"acl\",\n pillarenv=None,\n saltenv=None,\n merge_pillar=True,\n only_lower_merge=False,\n revision_id=None,\n revision_no=None,\n revision_date=True,\n revision_date_format=\"%Y/%m/%d\",\n):\n if not filter_options:\n filter_options = []\n if not terms:\n terms = []\n if merge_pillar and not only_lower_merge:\n acl_pillar_cfg = _get_pillar_cfg(\n pillar_key, saltenv=saltenv, pillarenv=pillarenv\n )\n filter_pillar_cfg = _lookup_element(acl_pillar_cfg, filter_name)\n filter_options = filter_options or filter_pillar_cfg.pop(\"options\", None)\n if filter_pillar_cfg:\n # Only when it was able to find the filter in the ACL config\n pillar_terms = filter_pillar_cfg.get(\n \"terms\", []\n ) # No problem if empty in the pillar\n terms = _merge_list_of_dict(terms, pillar_terms, prepend=prepend)\n # merge the passed variable with the pillar data\n # any filter term not defined here, will be appended from the pillar\n # new terms won't be removed\n filters = []\n filters.append(\n {\n filter_name: {\n \"options\": _make_it_list({}, filter_name, filter_options),\n \"terms\": terms,\n }\n }\n )\n return get_policy_config(\n platform,\n filters=filters,\n pillar_key=pillar_key,\n pillarenv=pillarenv,\n saltenv=saltenv,\n merge_pillar=merge_pillar,\n only_lower_merge=True,\n revision_id=revision_id,\n revision_no=revision_no,\n revision_date=revision_date,\n revision_date_format=revision_date_format,\n )", "def test_parse_filter_params_success(self):\n filter_params = {\n \"resolution\": \"daily\",\n \"time_scope_value\": \"-10\",\n \"time_scope_units\": \"day\",\n \"region\": FAKE.word(),\n \"payer_tenant_id\": FAKE.uuid4(),\n \"product_service\": FAKE.word(),\n }\n serializer = OCIFilterSerializer(data=filter_params)\n self.assertTrue(serializer.is_valid())", "def _parse_spec(self, spec):\n operators = ('ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod', 'all', 'size', 'exists', 'not')\n \n new_spec = {}\n for key, value in spec.iteritems():\n elements = key.split('__')\n op = None\n if elements[-1] in operators:\n op = elements.pop()\n\n field_spec, last_field = self.document._meta.resolve_subfield_hierarchy(elements, get_field = True)\n if last_field is not None:\n # TODO value should be properly prepared\n value = last_field.to_query(value)\n\n # TODO \n\n if op is not None:\n value = { \"$%s\" % op : value }\n\n new_spec[\".\".join(field_spec)] = value\n \n return new_spec", "def filters(self):\n return {\n 'port_channels': port_channels\n }", "def __init__(self, type: int, filter: int):\n ...", "def _add_matcher_specific_properties_to_json(self):\n return {\n 'unaryNumericMatcherData': {\n 'dataType': self._data_type,\n 'value': self._original_value,\n }\n }", "def _add_matcher_specific_properties_to_json(self):\n return {\n 'unaryNumericMatcherData': {\n 'dataType': self._data_type,\n 'value': self._original_value,\n }\n }", "def _add_matcher_specific_properties_to_json(self):\n return {\n 'unaryNumericMatcherData': {\n 'dataType': self._data_type,\n 'value': self._original_value,\n }\n }", "def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})", "def _initFilterTable(self):\n\n t = self.tableWidget_filter # shorthand notation\n\n ### Header population & properties\n t.setHorizontalHeaderLabels(self.data.filter_col_name_list)\n t.horizontalHeader().setMovable(True)\n\n ### Item population\n nRows = len(self.data.filter_spec)\n t.setRowCount(nRows)\n for (j, spec) in enumerate(self.data.filter_spec):\n for (i, filter_prop) in enumerate(self.data.filter_property_list):\n if filter_prop is not 'exclude':\n if filter_prop in spec[0]:\n item_string = spec[0][filter_prop]\n else:\n item_string = ''\n t.setItem(j,i,\n Qt.QTableWidgetItem(item_string))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsEnabled) # Make it editable\n else:\n t.setItem(j,i,Qt.QTableWidgetItem(''))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsUserCheckable|\n Qt.Qt.ItemIsEnabled) # Make it checkable\n if spec[1]: # exclusion flag\n t.item(j,i).setCheckState(Qt.Qt.Checked)\n else:\n t.item(j,i).setCheckState(Qt.Qt.Unchecked)\n\n\n\n ### Presentation formatting\n t.resizeColumnsToContents()\n for i in range(t.columnCount()):\n if t.columnWidth(i) > self.max_auto_adjust_column_width:\n t.setColumnWidth(i,self.max_auto_adjust_column_width)", "def buildReport(cls, queryList):\n boxList = list()\n for dslString,filterList in queryList:\n data = cls.__dataRequest(dslString[0])\n if data != '{}':\n for filter in filterList:\n try:\n if filter:\n filterObj = filter()\n filterObj.loadData(data)\n boxList.extend(filterObj.createBoxList())\n except Exception as e:\n devLogger.error(\"Could not create Filter object: \" + str(e))\n return boxList", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterCommitList, self).__init__(*args, **kwargs)\n\n # Save the \"stop on first match\" flag.\n self.matchfirst = self.get_boolean('matchFirst')\n logger.debug('matchFirst = {0}'.format(self.matchfirst))\n\n # Construct a regular expression tag evaluator for the path\n # names.\n pathregextag = self.thistag.find('PathRegex')\n if pathregextag != None:\n self.pathregex = RegexTag(pathregextag)\n else:\n self.pathregex = None\n\n # Construct a regular expression tag evaluator for the change\n # types.\n typeregextag = self.thistag.find('ChgTypeRegex')\n if typeregextag != None:\n self.typeregex = RegexTag(typeregextag)\n else:\n self.typeregex = None\n\n # Require at least one regex tag.\n if self.typeregex == None and self.pathregex == None:\n raise ValueError(\n 'Required tag missing: PathRegex or ChgTypeRegex')", "def init_filters(model: Model, settings: Model) -> None:\n filters = [\n {\"name\": \"Project\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Attachments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Priority\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Resolved\", \"filtration_type\": \"date\"},\n {\"name\": \"Labels\", \"filtration_type\": \"string\"},\n {\"name\": \"Created\", \"filtration_type\": \"date\"},\n {\"name\": \"Comments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Status\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Key\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Summary\", \"filtration_type\": \"string\"},\n {\"name\": \"Resolution\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Description\", \"filtration_type\": \"string\"},\n {\"name\": \"Components\", \"filtration_type\": \"string\"},\n ]\n for filter_ in filters:\n model.objects.create(\n name=filter_[\"name\"],\n filtration_type=filter_[\"filtration_type\"],\n settings=settings,\n )", "def test_filter_settings(self):\n self.es.register_filter(foo='bar')\n self.assertTrue(callable(self.es.filter['all'][0]))\n self.es.register_filter(bar='baz')\n self.assertLength(self.es.filter['all'], 2)", "def generate_property_template(self):\n template = {\n \"@id\": \"url or curie of the property\",\n \"@type\": \"rdf:Property\",\n \"rdfs:comment\": \"description of the property\",\n \"rdfs:label\": \"carmel case, should match @id\",\n \"schema:domainIncludes\": {\n \"@id\": \"class which use it as a property, could be list\"\n },\n \"schema:isPartOf\": {\n \"@id\": \"http://schema.biothings.io\"\n },\n \"schema:rangeIncludes\": {\n \"@id\": \"relates a property to a class that constitutes (one of) the expected type(s) for values of the property\"\n }\n }\n return template", "def build_query(self):\r\n\r\n # this filter is required\r\n query = Metric.query.filter(Metric.metric_type == self.metric_type)\r\n\r\n # get query parameters (parameters which are not here are ignored)\r\n is_active = request.args.get('is_active')\r\n frequency = request.args.get('frequency')\r\n threshold_type = request.args.get('threshold_type')\r\n sort = request.args.get('sort')\r\n\r\n # process each parameter, and if valid add it as a query condition\r\n if is_active is not None:\r\n is_active = is_active.lower() == 'true'\r\n query = Metric.query.filter_by(is_active=is_active)\r\n if frequency is not None:\r\n try:\r\n frequency = Frequency.from_name(frequency)\r\n except ValueError as e:\r\n msg = f\"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}\"\r\n abort(400, message=msg)\r\n query = query.filter_by(frequency=frequency)\r\n if threshold_type is not None:\r\n try:\r\n threshold_type = ThresholdType.from_name(threshold_type)\r\n except ValueError as e:\r\n msg = f\"Invalid 'threshold_type': {threshold_type}. Use one of \" \\\r\n f\"{ThresholdType.values()}\"\r\n abort(400, message=msg)\r\n query = query.filter_by(threshold_type=threshold_type)\r\n if sort is not None and sort.lstrip(\"-\") == 'metric_id':\r\n query = query.order_by(Metric.metric_id.desc())\r\n else:\r\n query = query.order_by(Metric.metric_id)\r\n\r\n return query", "def make_filter(name, schema):\n return HSMFilter(name, schema)", "def _get_filters(self, request, queryset, view): # noqa\n self.opts = queryset.model._meta\n filter_fields = getattr(view, \"filter_fields\", None)\n self.exclude = {}\n self.filters = {}\n\n if filter_fields:\n blacklist = RexList(getattr(view, \"filter_blacklist\", []))\n mapping = self._get_mapping(view)\n\n for fieldname_arg in self.query_params:\n raw_value = self.query_params.get(fieldname_arg)\n if raw_value in [\"''\", '\"\"']:\n raw_value = \"\"\n\n negate = fieldname_arg[-1] == \"!\"\n\n if negate:\n filter_field_name = fieldname_arg[:-1]\n TARGET = self.exclude\n else:\n TARGET = self.filters\n filter_field_name = fieldname_arg\n\n if filter_field_name in self.excluded_query_params:\n continue\n if self.ignore_filter(request, filter_field_name, view):\n continue\n try:\n if filter_field_name in blacklist:\n raise InvalidQueryArgumentError(fieldname_arg)\n parts = None\n if \"__\" in filter_field_name:\n parts = filter_field_name.split(\"__\")\n filter_field_name = parts[0]\n op = parts[-1]\n else:\n op = \"\"\n processor = getattr(\n self,\n \"process_{}\".format(filter_field_name),\n getattr(view, \"drfqs_filter_{}\".format(filter_field_name), None),\n )\n\n if (filter_field_name not in filter_fields) and (not processor):\n self.unknown_arguments.append((fieldname_arg, filter_field_name))\n continue\n # raise InvalidQueryArgumentError(filter_field_name)\n if raw_value is None and not processor:\n continue\n # field is configured in Serializer\n # so we use 'source' attribute\n if filter_field_name in mapping:\n real_field_name = mapping[filter_field_name].source\n # if '.' in real_field_name:\n # real_field_name = real_field_name.split('.')[0]\n # field_name = real_field_name.replace('.', '__')\n else:\n real_field_name = filter_field_name\n\n if processor:\n payload = {\n \"field\": filter_field_name,\n \"request\": request,\n \"param\": fieldname_arg,\n \"negate\": negate,\n \"op\": op,\n \"field_name\": real_field_name,\n \"parts\": parts,\n \"value\": raw_value,\n \"real_field_name\": real_field_name,\n }\n _f, _e = processor(dict(self.filters), dict(self.exclude), **payload)\n self.filters.update(**_f)\n self.exclude.update(**_e)\n else:\n if not raw_value:\n continue\n # field_object = opts.get_field(real_field_name)\n value_type = self.field_type(real_field_name)\n if parts:\n f = \"{}__{}\".format(real_field_name, \"__\".join(parts[1:]))\n else:\n f = filter_field_name\n if op in [\"in\", \"contained_by\"]:\n value = raw_value.split(\",\")\n elif op == \"acontains\":\n value = raw_value.split(\",\")\n f = f.replace(\"__acontains\", \"__contains\")\n elif op == \"isnull\":\n value = parse_bool(raw_value)\n elif value_type == bool:\n value = parse_bool(raw_value)\n else:\n value = raw_value\n TARGET[f] = value\n except ValueError:\n raise InvalidQueryValueError(fieldname_arg, raw_value)\n except QueryFilterException:\n raise\n except Exception as e:\n logger.exception(e)\n raise\n return self.filters, self.exclude", "def filters(self):\n return {\n 'dict_merge': do_merge,\n 'list_merge': do_list_merge,\n 'attrs': do_attrs,\n 'merge_mysql_privs': do_merge_mysql_privs,\n 'role': do_role,\n 'reduce': do_reduce,\n 'dict_join': do_dict_join,\n 'get': do_get,\n 'contains': do_contains,\n 'selectattrs': do_selectattrs,\n 'convert_integer': do_convert_integer,\n 'camel': do_camel\n }", "def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order", "def _create_fields(property_, alias_dictionary):\n fields = []\n # Only generate properties that have a field template\n if property_['field_template'] is not None:\n # If the property is independent, add the single-bit sized isInherited flag\n # to the list of Fields as well.\n if property_['independent']:\n fields.append(_create_inherited_flag_field(property_))\n\n fields.append(_create_property_field(property_, alias_dictionary))\n\n return fields", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def create_filter_query(self, collection_name: str, field: str, filter_type: str, filter_values: Union[List[str], str]=None):\n if filter_type == 'contains':\n # return [{'field' : field, 'filter_type' : 'contains', \"condition\":\"==\", \"condition_value\": filter_values}]\n return [{'field': field, 'filter_type': 'regexp', 'condition': '==', 'condition_value': '.*' + str(filter_values) + '.*'}]\n if filter_type == 'exact_match':\n return [{'field' : field, 'filter_type' : 'exact_match', \"condition\":\"==\", \"condition_value\": filter_values}]\n if filter_type == 'categories':\n return [{'field' : field, 'filter_type' : 'categories', \"condition\":\"==\", \"condition_value\": filter_values}]\n if filter_type == 'exists':\n if filter_values is None or filter_values == '==':\n return [{'field' : field, 'filter_type' : 'exists', \"condition\":\"==\", \"condition_value\":\" \"}]\n elif filter_values == '!=':\n return [{'field' : field, 'filter_type' : 'exists', \"condition\":\"!=\", \"condition_value\":\" \"}]\n if filter_type == '<=' or filter_type == '>=' or filter_type == '>' or filter_type == '<' or filter_type == '==':\n if self.collection_schema(collection_name)[field] == 'date':\n return [{'field' : field, 'filter_type' : 'date', \"condition\":filter_type, \"condition_value\": filter_values}]\n elif self.collection_schema(collection_name)[field] == 'numeric':\n return [{'field' : field, 'filter_type' : 'numeric', \"condition\":filter_type, \"condition_value\":filter_values}]\n else:\n raise ValueError(f\"{filter_type} has not been defined. Please choose one of contains/exact_match/exists/categories/>=/<=/>/<.\")", "def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters", "def _build_filter_chain(self):\n result = None\n for klass in self.filters:\n tmp = klass(self, self.args, result)\n logging.info(\"%s %s\", klass, tmp.active)\n if tmp.active:\n result = tmp\n return result or (lambda x: x)", "def testCmdFilter(self):\n\n self.inv._devices = {\n 'abc': self.Device(),\n 'xyz': self.Device(),\n 'bogus': self.Device()\n }\n # Defaults\n self.assertEqual('Targets: ', self.inv._CmdFilter('targets', []))\n self.assertEqual('XTargets: ', self.inv._CmdFilter('xtargets', []))\n\n # New values\n self.inv._CmdFilter('targets', ['abc'])\n self.assertEqual('abc', self.inv._filters['targets'])\n self.inv._CmdFilter('targets', ['xyz'], append=True)\n self.assertEqual('abc,xyz', self.inv._filters['targets'])\n # Prepend with an 'x' to update the exclusions.\n self.inv._CmdFilter('xtargets', ['abc'])\n self.assertEqual('abc', self.inv._exclusions['xtargets'])\n self.assertRaises(ValueError, self.inv._CmdFilter, 'bogus', [])", "def get_request_filters(self):\n # build the compiled set of all filters\n requested_filters = OrderedDict()\n for filter_name, f in self.filters.items():\n requested_filters[filter_name] = f\n\n # exclusion params\n exclude_name = '%s!' % filter_name\n if related(self, exclude_name) in self.data:\n # deepcopy the *base* filter to prevent copying of model & parent\n f_copy = copy.deepcopy(self.base_filters[filter_name])\n f_copy.parent = f.parent\n f_copy.model = f.model\n f_copy.exclude = not f.exclude\n\n requested_filters[exclude_name] = f_copy\n\n return requested_filters", "def _ApplyTestFilter(testfilter, bot_spec):\n if testfilter:\n return [(botname, set(testfilter) | (tests & set(['compile'])))\n for botname, tests in bot_spec]\n else:\n return bot_spec", "def get_filter(self, target_property, order=2, initial_states=None, postfilters=None):\n return self.get_state_space_filter(self.state_list,\n target=target_property,\n order=order,\n initial_states=initial_states,\n postfilters=postfilters\n )", "def __init__( self, filters=None, prx=None ):\n\n if filters is None:\n if prx is None:\n\n self._filter_list = rts2_wwwapi.rts2comm().get_filters()\n\n elif type(filters) == list:\n self._filter_list = filters\n\n elif type(filters) == dict:\n raise TypeError(\"Filters are should not be a dict, it probably should be None\")\n # this assumes that the keywords of the dictionary are \n # the fitler names and the value is the filter number. \n\n\n #sort by filter number and reverse look up. \n # this doesn't work in python3\n #for key, value in sorted(filters.iteritems(), key=lambda (k,v): (v,k)):\n #self._filter_list.append( key )\n\n elif type(filters) == str or type(filters) == unicode:\n self._filter_list = str(filters).split()\n\n else:\n raise TypeError(\"Unexpected filter type {}, type must be string, unicode, list or dict\".format(type(filters)))", "def create_filters(date=None, start_date=None, end_date=None,\n distance_min=None, distance_max=None,\n velocity_min=None, velocity_max=None,\n diameter_min=None, diameter_max=None,\n hazardous=None):\n # Using Operator functions to compare the input parameters\n # with passed values.\n filters = []\n if date:\n filters.append(DateFilter(operator.eq, date))\n\n if start_date:\n filters.append(DateFilter(operator.ge, start_date))\n\n if end_date:\n filters.append(DateFilter(operator.le, end_date))\n\n if diameter_min:\n filters.append(DiameterFilter(operator.ge, float(diameter_min)))\n\n if diameter_max:\n filters.append(DiameterFilter(operator.le, diameter_max))\n\n if hazardous is not None:\n filters.append(HazardousFilter(operator.eq, hazardous))\n\n if distance_min:\n filters.append(DistanecFilter(operator.ge, distance_min))\n\n if distance_max:\n filters.append(DistanecFilter(operator.le, distance_max))\n\n if velocity_min:\n filters.append(VelocityFilter(operator.ge, velocity_min))\n\n if velocity_max:\n filters.append(VelocityFilter(operator.le, velocity_max))\n\n return filters", "def get_params(self):\n outputs = ['sample',\n 'ratio_params',\n 'despike_params',\n 'autorange_params',\n 'bkgcorrect_params']\n\n out = {}\n for o in outputs:\n out[o] = getattr(self, o)\n\n out['filter_params'] = self.filt.params\n out['filter_sequence'] = self.filt.sequence\n out['filter_used'] = self.filt.make_keydict()\n\n return out", "def buildPredicateHash(self, subject):\n properties = {}\n for s,p,o in self.store.triples((subject, None, None)):\n oList = properties.get(p, [])\n oList.append(o)\n properties[p] = oList\n return properties", "def make_filter_string(cls, filter_specification):\n registry = get_current_registry()\n visitor_cls = registry.getUtility(IFilterSpecificationVisitor,\n name=EXPRESSION_KINDS.CQL)\n visitor = visitor_cls()\n filter_specification.accept(visitor)\n return str(visitor.expression)", "def __init__(self, properties):\n self.attributes = {}\n self.output_info = {}\n for key, node in properties.walk():\n self.attributes[key[1]] = node.get_value().strip(\" '\")", "def _add_matcher_specific_properties_to_json(self):\n return {\n 'betweenMatcherData': {\n 'dataType': self._data_type,\n 'start': self._original_lower,\n 'end': self._original_upper\n }\n }", "def __init__(self, parent, file_manager):\n self.parent = parent.GetParent()\n\n # List of selected data\n self.data = parent.GetSelectedData()\n\n # Output tier name\n self.tier_name = parent.GetFiltererdTierName()\n\n # Output format\n self.annot_format = parent.GetAnnotationFormat()\n\n # List of files/tiers to filter\n self.file_manager = file_manager\n\n # for \"rel\" filter only\n try:\n self.y_tier_name = parent.GetRelationTierName()\n except AttributeError:\n self.y_tier_name = None\n\n # for \"tag\", \"loc\" and \"dur\" filters\n try:\n # Match all or match any of the filters\n self.match_all = parent.GetMatchAll()\n except AttributeError:\n self.match_all = None", "def curate_filter_info(self):\n filter_list = [\n self.sample_name, self.final_id, self.all_variant_count,\n self.filter_min_depth_count, self.filter_max_depth_count,\n self.filter_common_var_count, self.log_mut_count,\n self.cosmic_variant_counts, self.unknown_maf_count\n ]\n return filter_list" ]
[ "0.6691381", "0.6613962", "0.6211153", "0.60873485", "0.59203535", "0.5826266", "0.5766887", "0.546501", "0.54321504", "0.5387858", "0.5352514", "0.5318498", "0.53072774", "0.52971756", "0.52805036", "0.5272654", "0.5242891", "0.5188393", "0.51839644", "0.5144999", "0.51025003", "0.5051847", "0.5035122", "0.50207436", "0.5019541", "0.50063837", "0.49401972", "0.492547", "0.48733547", "0.4867145", "0.4856669", "0.48511672", "0.48431644", "0.482481", "0.48215854", "0.48149616", "0.48123717", "0.47999242", "0.4798842", "0.47984943", "0.47891647", "0.47887322", "0.47879675", "0.47866386", "0.4786621", "0.47817945", "0.47742867", "0.47644728", "0.47644183", "0.47586268", "0.47569698", "0.47559285", "0.4745274", "0.47399297", "0.47224578", "0.47186542", "0.47172886", "0.47120872", "0.47080216", "0.4704095", "0.47032496", "0.4673772", "0.46536112", "0.4653474", "0.46512398", "0.46475703", "0.46422127", "0.46408075", "0.46408075", "0.46408075", "0.46228883", "0.46189216", "0.4603587", "0.45921943", "0.45772877", "0.4571318", "0.45625612", "0.45618528", "0.45572534", "0.45533502", "0.45512986", "0.45389995", "0.45378882", "0.45376748", "0.45342803", "0.45323914", "0.45323074", "0.45268255", "0.45218474", "0.45130593", "0.4512805", "0.44989714", "0.44944865", "0.44924885", "0.44875085", "0.44869202", "0.44856432", "0.44757807", "0.4464809", "0.4454959" ]
0.76600534
0
Gets the properties of the Managed object specified.
def get_object_properties(vim, collector, mobj, type, properties): client_factory = vim.client.factory if mobj is None: return None usecoll = collector if usecoll is None: usecoll = vim.get_service_content().propertyCollector property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = (properties is None or len(properties) == 0) property_spec.pathSet = properties property_spec.type = type object_spec = client_factory.create('ns0:ObjectSpec') object_spec.obj = mobj object_spec.skip = False property_filter_spec.propSet = [property_spec] property_filter_spec.objectSet = [object_spec] return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_object_properties(vim, collector, mobj, type, properties):\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = (properties is None or len(properties) == 0)\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim,\n usecoll,\n [property_filter_spec])", "def get_properties(self):\n return self.properties", "def properties_get(self):\n return self._get('properties')", "def get_properties(self):\n return self.properties", "def _get_managed_objects_properties(self, vim_type, properties=None):\n # Get Root Folder\n root_folder = self.content.rootFolder\n\n if properties is None:\n properties = ['name']\n\n # Create Container View with default root folder\n mor = self.content.viewManager.CreateContainerView(\n root_folder, [vim_type], True)\n\n # Create Traversal spec\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n name=\"traversal_spec\",\n path='view',\n skip=False,\n type=vim.view.ContainerView\n )\n\n # Create Property Spec\n property_spec = vmodl.query.PropertyCollector.PropertySpec(\n type=vim_type, # Type of object to retrieved\n all=False,\n pathSet=properties\n )\n\n # Create Object Spec\n object_spec = vmodl.query.PropertyCollector.ObjectSpec(\n obj=mor,\n skip=True,\n selectSet=[traversal_spec]\n )\n\n # Create Filter Spec\n filter_spec = vmodl.query.PropertyCollector.FilterSpec(\n objectSet=[object_spec],\n propSet=[property_spec],\n reportMissingObjectsInResults=False\n )\n\n return self.content.propertyCollector.RetrieveContents([filter_spec])", "def getProperties(self):\n return self.properties", "def getProperties(self, owner: unicode) -> List[ghidra.program.model.util.PropertyMap]:\n ...", "def get_properties_for_a_collection_of_objects(vim, type,\r\n obj_list, properties):\r\n client_factory = vim.client.factory\r\n if len(obj_list) == 0:\r\n return []\r\n prop_spec = get_prop_spec(client_factory, type, properties)\r\n lst_obj_specs = []\r\n for obj in obj_list:\r\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\r\n prop_filter_spec = get_prop_filter_spec(client_factory,\r\n lst_obj_specs, [prop_spec])\r\n return vim.RetrieveProperties(vim.get_service_content().propertyCollector,\r\n specSet=[prop_filter_spec])", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def object_attributes(obj):\n return obj.__dict__.items()", "def get_properties():", "def properties(self):\n return self._props", "def properties(self, filters={}):\n return self.__get_list_client(Property)(filters=filters)", "def getProperties():", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def getObjectProperty(self, owner: unicode, propertyName: unicode, saveableObjectClass: java.lang.Class, create: bool) -> ghidra.program.model.util.ObjectPropertyMap:\n ...", "def get_properties_for_a_collection_of_objects(vim, type,\n obj_list, properties):\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory,\n lst_obj_specs, [prop_spec])\n return retrieve_properties_ex(vim,\n vim.service_content.propertyCollector,\n [prop_filter_spec])", "def getPropertiesAll():", "def getObjectPropertyMap(self, propertyName: unicode) -> ghidra.program.model.util.ObjectPropertyMap:\n ...", "def properties(self):\n\n return self._properties", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def test_get_objects_with_properties(self):\n expected_result = self.spec.get(\"test_get_objects_with_properties\")\n expected_type = expected_result.get(\"_type\")\n expected_datastore_list = []\n\n for each_datastore in expected_result.get(\"datastore_infos\"):\n datastore_name = each_datastore[\"name\"]\n expected_datastore_list.append(datastore_name)\n datastore_list = []\n \n object_content = self.session.invoke_api(vim_util, \n 'get_objects', \n self.vim, \n 'Datastore', \n 100, \n ['name'])\n for one_object in object_content.objects:\n self.assertEqual(one_object.obj._type, expected_type)\n if hasattr(one_object, 'propSet'):\n dynamic_properties = one_object.propSet\n prop_dict = {}\n for prop in dynamic_properties:\n if prop.name == \"name\":\n datastore_list.append(prop.val)\n \n for each_ds_name in datastore_list:\n self.assertTrue(each_ds_name in datastore_list)", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def get_object_dimension_props(obj):\n props = eval(\"obj.\" + DIMENSION_PROPERTY_NAMESPACE)\n return props", "def properties(self):\n return PropertyManager(session=self._session)", "def properties(self):\n return self.properties_with_uid[1:]", "def get_properties(\n self,\n ins: common.GetPropertiesIns,\n timeout: Optional[float],\n ) -> common.GetPropertiesRes:\n get_properties_msg = serde.get_properties_ins_to_proto(ins)\n res_wrapper: ResWrapper = self.bridge.request(\n ins_wrapper=InsWrapper(\n server_message=ServerMessage(get_properties_ins=get_properties_msg),\n timeout=timeout,\n )\n )\n client_msg: ClientMessage = res_wrapper.client_message\n get_properties_res = serde.get_properties_res_from_proto(\n client_msg.get_properties_res\n )\n return get_properties_res", "def getProperties(groupId, contractId):\n\tprint \"Getting properties for group %s and contract %s\" % (groupId, contractId)\n\tproperty_parameters = { \"contractId\":contractId, \"groupId\":groupId }\n\tproperty_result = getResult('/papi/v0/properties', property_parameters)\n\t\n\tif \"properties\" in property_result:\n\t\tproperty_items = property_result['properties']['items']\n\telse:\n\t\tproperty_items = []\n\n\treturn (property_items)", "def get_properties_code(self, obj):\n return []", "def test_get_object_properties(self):\n test_spec = self.spec.get(\"test_get_object_properties\")\n host_moref = vim_util.get_moref(test_spec.get(\"host_id\"), 'HostSystem')\n objects = self.session.invoke_api( vim_util, \n 'get_object_properties', \n self.vim, \n host_moref, \n [\"summary.hardware.numCpuCores\", \"summary.hardware.numCpuThreads\"]) \n self.assertIsNotNone(objects)\n expected_numCpuCores = test_spec.get(\"numCpuCores\")\n expected_numCpuThreads = test_spec.get(\"numCpuThreads\")\n numCpuCores = 0\n numCpuThreads = 0\n if hasattr(objects[0], 'propSet'):\n dynamic_properties = objects[0].propSet\n for prop in dynamic_properties:\n if prop.name == \"summary.hardware.numCpuCores\":\n numCpuCores = prop.val\n else:\n numCpuThreads = prop.val\n self.assertEqual(expected_numCpuCores, numCpuCores)\n self.assertEqual(expected_numCpuThreads, numCpuThreads)", "def get_dynamic_properties(vim, mobj, property_names, obj_type=None):\n if not obj_type:\n obj_type = mobj._type\n obj_content = get_object_properties(\n vim, None, mobj, obj_type, property_names)\n properties = {}\n if obj_content:\n dynamic_properties = obj_content[0].propSet\n for dynamic_property in dynamic_properties:\n property_name = dynamic_property.name\n property_value = dynamic_property.val\n properties[property_name] = property_value\n return properties", "def get_instance_properties(self):\n return [p for p in self.session.query(self.Property).all() \\\n if p.is_simple() and p.is_instance_property()]", "def retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):\n objcont = []\n client_factory = vim.client.factory\n opts = client_factory.create('ns0:RetrieveOptions')\n opts.maxObjects = max_count\n res = vim.RetrievePropertiesEx(prop_coll,\n specSet=spec_set,\n options=opts)\n while True:\n if res and res.objects:\n objcont.extend(res.objects)\n if hasattr(res, \"token\") and res.token:\n res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)\n else:\n break\n return objcont", "def do_get_property(self, pspec):\n\n if pspec.name == 'joined':\n return self._joined\n\n if self._get_properties_call is not None:\n print('%r: Blocking on GetProperties() because someone '\n 'wants property %s', self, pspec.name)\n self._get_properties_call.block()\n\n if pspec.name == 'id':\n return self._id\n elif pspec.name == 'name':\n return self._name\n elif pspec.name == 'color':\n return self._color\n elif pspec.name == 'type':\n return self._type\n elif pspec.name == 'tags':\n return self._tags\n elif pspec.name == 'private':\n return self._private", "def _get_object_prop(self, vm, attributes):\n result = vm\n for attribute in attributes:\n try:\n result = getattr(result, attribute)\n except (AttributeError, IndexError):\n return None\n return result", "def GetVMProperties(self):\n try:\n properties = self.vmInstance.get_properties(from_cache=False)\n LOGGER.info('All properties of virtual machine \"{}\":'.format(VM_NAME))\n\n for key in properties.keys():\n if isinstance(properties[key], dict):\n LOGGER.info(' {}:'.format(key))\n\n for subKey in properties[key].keys():\n if isinstance(properties[key], dict):\n\n LOGGER.info(' {}:'.format(subKey))\n for subSubKey in properties[key][subKey].keys():\n LOGGER.info(' {}: {}'.format(subSubKey, properties[key][subKey][subSubKey]))\n\n else:\n LOGGER.info(' {}: {}'.format(subKey, properties[key][subKey]))\n\n else:\n LOGGER.info(' {}: {}'.format(key, properties[key]))\n\n except Exception as e:\n properties = None\n LOGGER.debug(e)\n LOGGER.error(traceback.format_exc())\n LOGGER.error('An error occured while getting properties of virtual machine \"{}\"!'.format(VM_NAME))\n\n return properties", "def get_properties(self):\n\n properties = {}\n for iface_name in self.all_interfaces:\n iface = getattr(self, iface_name, None)\n if iface:\n properties.update(iface.get_properties())\n return properties", "def readProperties(self):\r\n print('not yet implemented')", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def bson_properties(self):\n return []", "def base_properties(self):\n return self.properties.GetAll(self.mpris_base)", "def getPropertyDict(self):\n \n d = self.getChild('__properties')\n if d:\n return d.getDict()\n else:\n return {}", "def managed_objects(self):\n return self._managed_object_list", "def properties(self) -> Sequence['outputs.GoogleCloudContentwarehouseV1PropertyResponse']:\n return pulumi.get(self, \"properties\")", "def getProperties(self):\n return _libsbml.SBMLConverter_getProperties(self)", "def getmetadata(self, obj_id):\n return self.metadata[obj_id]", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})", "def get_properties(self):\n return COMMON_PROPERTIES", "def get_objects(vim, type, properties_to_collect=[\"name\"], all=False):\r\n client_factory = vim.client.factory\r\n object_spec = build_object_spec(client_factory,\r\n vim.get_service_content().rootFolder,\r\n [build_recursive_traversal_spec(client_factory)])\r\n property_spec = build_property_spec(client_factory, type=type,\r\n properties_to_collect=properties_to_collect,\r\n all_properties=all)\r\n property_filter_spec = build_property_filter_spec(client_factory,\r\n [property_spec],\r\n [object_spec])\r\n return vim.RetrieveProperties(vim.get_service_content().propertyCollector,\r\n specSet=[property_filter_spec])", "def properties(self):\r\n return resources.Properties(self)", "def get_result_properties(self):\n return [p for p in self.session.query(self.Property).all() \\\n if p.is_simple() and p.is_result_property()]", "def display_properties(self):\n return self._display_properties", "def player_properties(self):\n return self.properties.GetAll(self.player_interface)", "def properties(self) -> Optional[pulumi.Input['CosmosDBSpecPropertiesArgs']]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> List[TaskPropertyModel]:\n return self._properties", "def get_object_information(self, obj_ref: str, **kwargs) -> Dict[str, Any]:\n return cd_client.get_object_information(\n DirectoryArn=self._dir_arn,\n ObjectReference={\n 'Selector': obj_ref\n },\n **kwargs\n )", "def props(self):\n return self._props", "def props(self):\n return self._props", "def getObject(self):\n return self.base.get(\"object\", [])", "def getMemberProperties(self, member, exclude_props=[], include_props=None):\n if not self.is_compatible: return {}\n props = {}\n user = member.getUser()\n for sheet in user.getOrderedPropertySheets():\n for item in sheet.propertyItems():\n field = item[0]\n value = item[1]\n if type(value) is UnicodeType:\n value = value.encode('UTF8')\n if not props.has_key(field): props[field] = value\n #id property isn't stored in property sheet, we can get it from member or user object\n props['id'] = member.getProperty('id')\n return props", "def get_properties(self):\n return irmc_common.COMMON_PROPERTIES", "def properties(self) -> List[ProductionFlowItemProperty]:\n return self._properties", "def properties(self) -> Optional[pulumi.Input['SmsChannelPropertiesArgs']]:\n return pulumi.get(self, \"properties\")", "def get_attributes(self, context: ResourceCommandContext, obj_ref: str) -> dict:\n return self.handler.get_attributes(obj_ref)", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def collect_properties(service_instance, view_ref, obj_type, path_set=None,\n include_mors=False):\n collector = service_instance.content.propertyCollector\n\n # Create object specification to define the starting point of\n # inventory navigation\n obj_spec = vmodl.query.PropertyCollector.ObjectSpec()\n obj_spec.obj = view_ref\n obj_spec.skip = True\n\n # Create a traversal specification to identify the path for collection\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()\n traversal_spec.name = 'traverseEntities'\n traversal_spec.path = 'view'\n traversal_spec.skip = False\n traversal_spec.type = view_ref.__class__\n obj_spec.selectSet = [traversal_spec]\n\n # Identify the properties to the retrieved\n property_spec = vmodl.query.PropertyCollector.PropertySpec()\n property_spec.type = obj_type\n\n if not path_set:\n property_spec.all = True\n\n property_spec.pathSet = path_set\n\n # Add the object and property specification to the\n # property filter specification\n filter_spec = vmodl.query.PropertyCollector.FilterSpec()\n filter_spec.objectSet = [obj_spec]\n filter_spec.propSet = [property_spec]\n\n # Retrieve properties\n props = collector.RetrieveContents([filter_spec])\n\n data = []\n for obj in props:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n\n if include_mors:\n properties['obj'] = obj.obj\n\n data.append(properties)\n return data", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def get_properties(self):\n self.unimpl_base_class()", "def get_managed_object(self):\n return self.key", "def properties(self):\n raise NotImplementedError", "def properties(self):\n response = self._client.get('server/properties')\n return ServerProperties.from_json(response.text)", "def _getPropertyInfo(self, propertyId):\n retList = []\n # this information might need to be implemented and requested from\n # NvCameraTools(?)\n # property type, count\n\n propInfo = getPropertyInfo(propertyId)\n if (propInfo != None):\n return propInfo\n else:\n raise NvCameraException(NvError_BadValue, \"Invalid property id!\")", "def get_property(self, client):\r\n client.getProperty()", "def properties(self):\n pass", "def json(self):\n rv = {\n prop: getattr(self, prop)\n for prop in self.__properties__\n if prop in vars(self)\n }\n rv.update(self._props)\n return rv", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def __getattr__(self, name):\n\n value = rec_getattr(self.managedObject, name)\n\n if value:\n return value\n else:\n return getattr(self.managedObject, name)", "def get_property(self,name):\n return self.dp.get_property(name)", "def get_property(self,obj_property):\n return self.redis_server.hget(self.frbr_key,obj_property)", "def properties(self) -> pulumi.Input[Sequence[pulumi.Input['NotificationChannelPropertyArgs']]]:\n return pulumi.get(self, \"properties\")", "def list_all_properties(self):\n properties = list(self.property_only_graph.nodes())\n properties = [SchemaProperty(_prop, self) for _prop in properties]\n return properties", "def get_object_data(self, object_id):\n object_id = int(object_id)\n # Get Object's attributes\n with self.service.get_pg_connect() as connection:\n cursor = connection.cursor()\n cursor.execute(self.RUN_SQL, [object_id])\n data = cursor.fetchall()\n if not data:\n metrics[\"error\", (\"type\", \"object_not_found\")] += 1\n raise APIError(\"Object is not found\")\n # Build capabilities\n capabilities = ObjectCapabilities.get_capabilities(object_id)\n # Get object credentials\n (\n name,\n is_managed,\n profile,\n vendor,\n platform,\n version,\n scheme,\n address,\n port,\n user,\n password,\n super_password,\n remote_path,\n snmp_ro,\n pool_id,\n sw_image,\n auth_profile_id,\n ap_user,\n ap_password,\n ap_super_password,\n ap_snmp_ro,\n ap_snmp_rw,\n privilege_policy,\n p_privilege_policy,\n access_preference,\n p_access_preference,\n beef_storage_id,\n beef_path_template_id,\n ) = data[0]\n # Check object is managed\n if not is_managed:\n metrics[\"error\", (\"type\", \"object_not_managed\")] += 1\n raise APIError(\"Object is not managed\")\n if auth_profile_id:\n user = ap_user\n password = ap_password\n super_password = ap_super_password\n snmp_ro = ap_snmp_ro\n snmp_rw = ap_snmp_rw # noqa just to be\n #\n if privilege_policy == \"E\":\n raise_privileges = True\n elif privilege_policy == \"P\":\n raise_privileges = p_privilege_policy == \"E\"\n else:\n raise_privileges = False\n if access_preference == \"P\":\n access_preference = p_access_preference\n # Build credentials\n credentials = {\n \"name\": name,\n \"address\": address,\n \"user\": user,\n \"password\": password,\n \"super_password\": super_password,\n \"path\": remote_path,\n \"raise_privileges\": raise_privileges,\n \"access_preference\": access_preference,\n }\n if snmp_ro:\n credentials[\"snmp_ro\"] = snmp_ro\n if capabilities.get(\"SNMP | v2c\"):\n credentials[\"snmp_version\"] = \"v2c\"\n elif capabilities.get(\"SNMP | v1\"):\n credentials[\"snmp_version\"] = \"v1\"\n if scheme in CLI_PROTOCOLS:\n credentials[\"cli_protocol\"] = PROTOCOLS[scheme]\n if port:\n credentials[\"cli_port\"] = port\n elif scheme in HTTP_PROTOCOLS:\n credentials[\"http_protocol\"] = PROTOCOLS[scheme]\n if port:\n credentials[\"http_port\"] = port\n # Build version\n if vendor and platform and version:\n vendor = Vendor.get_by_id(vendor)\n version = {\n \"vendor\": vendor.code[0] if vendor.code else vendor.name,\n \"platform\": Platform.get_by_id(platform).name,\n \"version\": Firmware.get_by_id(version).version,\n }\n if sw_image:\n version[\"image\"] = sw_image\n else:\n version = None\n # Beef processing\n if scheme == BEEF and beef_storage_id and beef_path_template_id:\n mo = ManagedObject.get_by_id(object_id)\n tpl = Template.get_by_id(beef_path_template_id)\n beef_path = tpl.render_subject(object=mo)\n if beef_path:\n storage = ExtStorage.get_by_id(beef_storage_id)\n credentials[\"beef_storage_url\"] = storage.url\n credentials[\"beef_path\"] = beef_path\n return dict(\n profile=Profile.get_by_id(profile).name,\n pool_id=pool_id,\n credentials=credentials,\n capabilities=capabilities,\n version=version,\n )", "def get_property(self, property):\n return self.shell([\"getprop\", property])", "async def get_metadata_for_object(\n dbcon: DBConnection, object_type: str, object_id: int) -> Iterable[object_models.ObjectMetadata]:\n q = \"\"\"select metadata.object_type, metadata.object_id, metadata.key, metadata.value\n from object_metadata as metadata\n where metadata.object_type=%s and metadata.object_id=%s\"\"\"\n q_args = (object_type, object_id)\n return [object_models.ObjectMetadata(*row) for row in await dbcon.fetch_all(q, q_args)]", "async def get_metadata(dbcon: DBConnection, object_type: str, object_id: int) -> Dict[str, str]:\n q = \"\"\"select `key`, value from object_metadata where object_type=%s and object_id=%s\"\"\"\n q_args = (object_type, object_id)\n rows = await dbcon.fetch_all(q, q_args)\n metadict = {}\n for key, value in rows:\n metadict[key] = value\n return metadict", "def get(self, item):\n try:\n data = copy.deepcopy(getattr(self, item))\n except AttributeError:\n raise M2Error('Error while trying to get non-existent property `%s`' % item, False)\n except SQLAlchemyError:\n self.s.rollback()\n raise\n return data", "def _tp__get_typed_properties(self):\n try:\n return tuple(getattr(self, p) for p in self._tp__typed_properties)\n except AttributeError:\n raise NotImplementedError", "def get_properties_code(self, obj):\n # called only from generate_code_ctor when creating a class constructor to get the first lines\n # otherwise properties are part of the code returned by get_code\n prop_lines = []\n self._reset_vars()\n\n self._prepare_tmpl_content(obj)\n for line in self.tmpl_props:\n prop_lines.append(line % self.tmpl_dict)\n return prop_lines", "def get_computed_property_names(cls):\n computed_properties = {}\n\n for (property_name, instance) in cls._config_registry.iteritems():\n if property_name.startswith(COMPUTED_PROPERTY_PREFIX):\n computed_properties[property_name] = {\n 'description': instance.description\n }\n\n return computed_properties", "def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes", "def remote_properties(self):\n return dat2obj(pn_connection_remote_properties(self._impl))", "def Props(self):\n return self.__Props", "def _getPropName(self):\n return self.properties.keys()", "def get_plotable_instance_properties(self):\n return [p for p in self.session.query(self.Property).all() \\\n if p.is_plotable() and p.is_instance_property()]", "def document_properties(self):\n return self._document_properties" ]
[ "0.66242605", "0.6595597", "0.65718126", "0.65414923", "0.64137036", "0.63451725", "0.6315363", "0.6177473", "0.61558604", "0.61558604", "0.61421597", "0.61126566", "0.6106942", "0.6099833", "0.60941976", "0.6071322", "0.6071322", "0.6018031", "0.599956", "0.5993409", "0.59584665", "0.5901056", "0.5859067", "0.585625", "0.5852642", "0.58399284", "0.58364946", "0.5833915", "0.58327276", "0.5824847", "0.58209115", "0.5773833", "0.5709844", "0.56867075", "0.5682303", "0.5678647", "0.5674232", "0.56709415", "0.56325215", "0.56092346", "0.5604328", "0.55781144", "0.55600387", "0.5558263", "0.5544628", "0.5539992", "0.5525517", "0.55220985", "0.55191135", "0.55057555", "0.5479406", "0.546442", "0.5430579", "0.5427561", "0.5423588", "0.5404", "0.53893805", "0.53880244", "0.53796303", "0.5365269", "0.5346419", "0.5346419", "0.53463936", "0.53298694", "0.532587", "0.53195167", "0.53030306", "0.53013134", "0.52932596", "0.52932596", "0.5291461", "0.5290098", "0.52806854", "0.5280372", "0.5279807", "0.5262943", "0.5252096", "0.5246308", "0.5244251", "0.52391815", "0.523277", "0.5228555", "0.52257466", "0.52076626", "0.51886475", "0.51849467", "0.518052", "0.516255", "0.5159239", "0.51500887", "0.5149256", "0.5141922", "0.51230085", "0.5116834", "0.5109634", "0.51044965", "0.5099056", "0.5098796", "0.50953406", "0.5090758" ]
0.679616
0
Gets a particular property of the Managed Object.
def get_dynamic_property(vim, mobj, type, property_name): obj_content = \ get_object_properties(vim, None, mobj, type, [property_name]) property_value = None if obj_content: dynamic_property = obj_content[0].propSet if dynamic_property: property_value = dynamic_property[0].val return property_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_property(self, property):\n return self.shell([\"getprop\", property])", "def get_property(self,name):\n return self.dp.get_property(name)", "def get_property(self, key):\n return self.properties.get(key)", "def get_property(self, name):\n if (not name in self.properties):\n raise KeyError(\"Key '\" + name + \"' not found\")\n return self.properties[name]", "def do_get_property(self, pspec):\n\n if pspec.name == 'joined':\n return self._joined\n\n if self._get_properties_call is not None:\n print('%r: Blocking on GetProperties() because someone '\n 'wants property %s', self, pspec.name)\n self._get_properties_call.block()\n\n if pspec.name == 'id':\n return self._id\n elif pspec.name == 'name':\n return self._name\n elif pspec.name == 'color':\n return self._color\n elif pspec.name == 'type':\n return self._type\n elif pspec.name == 'tags':\n return self._tags\n elif pspec.name == 'private':\n return self._private", "def do_get_property(self, spec):\n attribute = self.find_attribute(spec.name)\n if attribute is not None and isinstance(attribute, property):\n return attribute.fget(self)\n else:\n raise ValueError(\"No such property\", spec.name)", "def get_property(self,obj_property):\n return self.redis_server.hget(self.frbr_key,obj_property)", "def getProperty(self, identifier):\n if identifier in self.properties.keys():\n return self.properties[identifier]\n else:\n return None", "def getProperty(self, propertyName: unicode) -> unicode:\n ...", "def __getattr__(self, name):\n\n value = rec_getattr(self.managedObject, name)\n\n if value:\n return value\n else:\n return getattr(self.managedObject, name)", "def GetProperty(self, propertyname, arg = None):\n if self.serviceimplementation == 'basic':\n # Conventionally properties starting with X (and only them) may return a UNO object\n calltype = self.vbGet + (self.flgUno if propertyname[0] == 'X' else 0)\n if arg is None:\n return self.EXEC(self.objectreference, calltype, propertyname)\n else: # There are a few cases (Calc ...) where GetProperty accepts an argument\n return self.EXEC(self.objectreference, calltype, propertyname, arg)\n return None", "def get_property_value(self, property, db):\n try:\n for p in self.properties:\n if p.idProperty == int(property):\n return p.get_value()\n except:\n return None", "def GetProp(self, name):\n return self._props.get(name)", "def get(self, prop):\r\n prop_parts = prop.split(\".\")\r\n val = None\r\n for part in prop_parts:\r\n if val is None:\r\n val = self.obj.get(part)\r\n else:\r\n val = val.get(part)\r\n return val", "def _GetPropertyValue(entity, property):\n if property in datastore_types._SPECIAL_PROPERTIES:\n assert property == datastore_types._KEY_SPECIAL_PROPERTY\n return entity.key()\n else:\n return entity[property]", "def get(self, item):\n try:\n data = copy.deepcopy(getattr(self, item))\n except AttributeError:\n raise M2Error('Error while trying to get non-existent property `%s`' % item, False)\n except SQLAlchemyError:\n self.s.rollback()\n raise\n return data", "def properties_get(self):\n return self._get('properties')", "def prop(self):\n return getattr(self, name)", "def getPropertie(self, propname):\n if propname in self._getPropName():\n return self.properties[propname]\n else:\n raise Exception(\"la propiedad no existe\")", "def get_dynamic_property(vim, mobj, type, property_name):\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value", "def get_property(self, client):\r\n client.getProperty()", "def getprop(self, prop_name):\n return self.shell(\"getprop %s\" % prop_name)", "def __getattr__(self, name):\r\n\t\treturn self.properties[name]", "def __getattr__(self, name):\r\n\t\treturn self.properties[name]", "def getProperty(propname):", "def getSingleProperty(propertyId, groupId, contractId ):\n\tproperty_parameters = { \"contractId\":contractId, \"groupId\":groupId }\n\tproperty_result = getResult('/papi/v0/properties/%s/' % propertyId, \n\t\t\t\t\t\t\t\tproperty_parameters)\n\treturn (property_result)", "def get_custom_property(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCustomProperty', self.handle)", "def get_property_value(self, property, db):\n if property == 'resultTime':\n return self.get_time()\n elif property == 'wallTime':\n return self.wallTime\n elif property == 'cost':\n return self.cost\n else:\n try:\n for pv in self.properties:\n if pv.idProperty == int(property):\n return pv.get_value()\n except:\n return None", "def property(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"property\")", "def get_property(dt, property, fieldname=None):\n\tif fieldname:\n\t\tprop = webnotes.conn.sql(\"\"\"\n\t\t\tselect value \n\t\t\tfrom `tabProperty Setter` \n\t\t\twhere doc_type=%s and field_name=%s\n\t\t\tand property=%s\"\"\", (dt, fieldname, property))\n\t\tif prop: \n\t\t\treturn prop[0][0]\n\t\telse:\n\t\t\tval = webnotes.conn.sql(\"\"\"\\\n\t\t\t\tSELECT %s FROM `tabDocField`\n\t\t\t\tWHERE parent = %s AND fieldname = %s\"\"\" % \\\n\t\t\t\t(property, '%s', '%s'), (dt, fieldname))\n\t\t\tif val and val[0][0]: return val[0][0] or ''\n\telse:\n\t\tprop = webnotes.conn.sql(\"\"\"\n\t\t\tselect value \n\t\t\tfrom `tabProperty Setter` \n\t\t\twhere doc_type=%s and doctype_or_field='DocType'\n\t\t\tand property=%s\"\"\", (dt, property))\n\t\tif prop: \n\t\t\treturn prop[0][0]\n\t\telse:\n\t\t\treturn webnotes.conn.get_value('DocType', dt, property)", "def dbus_property(self):\n return self._dbus_property", "def getp(self,p):\n return property_dict[p]", "def get_property(self):\r\n _get = lambda slf: self.getval()\r\n _set = lambda slf, val: self.setval(val)\r\n _del = lambda slf: self.delval()\r\n\r\n if self.column.can_delete:\r\n return property(_get, _set, _del)\r\n else:\r\n return property(_get, _set)", "def find_property(cls, prop_name):\n # Special-case - _id always maps to the primary key\n if prop_name == '_id':\n return property_from_field(django_model._meta.pk)\n\n # Otherwise, look through the Django model fields for a field\n # of the correct name. XXX should this be name or column?\n result = None\n for field in django_model._meta.fields:\n if field.name == prop_name:\n result = property_from_field(field)\n break\n return result", "def getAqProperty(self, prop):\n if getattr(aq_base(self), prop, None) is not None:\n return getattr(self, prop)\n classObj = self.getClassObject()\n if classObj:\n classObj = classObj.primaryAq()\n return getattr(classObj, prop)", "def getProperty(unique_name):", "def get_device_property(self, client, prop):\r\n value = client.getDeviceProperty(prop)\r\n return value", "def get_field_property(dt, fieldname, property):\n\tfield = webnotes.conn.sql(\"\"\"\n\t\tselect name, `%s` \n\t\tfrom tabDocField \n\t\twhere parent=%s and fieldname=%s\"\"\" % (property, '%s', '%s'), (dt, fieldname))\n\t\t\n\tprop = webnotes.conn.sql(\"\"\"\n\t\tselect value \n\t\tfrom `tabProperty Setter` \n\t\twhere doc_type=%s and field_name=%s and property=%s\"\"\", (dt, fieldname, property))\n\tif prop: \n\t\treturn prop[0][0]\n\telse:\n\t\treturn field[0][1]", "def __call__(self, arg):\n return self.get_property(arg)", "def get(key, nodename=None):\n return _get_property(key, nodename, None)", "def _get_model_db_property(self, property_name, default_value=None):\n model_db_config = self._resource_config.get(\"shared_resource\").get(\"model_db\")\n return model_db_config.get(property_name, default_value)", "def getprop(self, prop_name):\n return self.shell('getprop %s' % prop_name).decode('utf-8').strip()", "def GetWirelessProperty(self, networkid, prop):\n try:\n value = self.LastScan[networkid].get(prop)\n except IndexError:\n if self.debug_mode:\n print \"GetWirelessProperty: Index error occured trying to \" + \\\n \"retrieve property %s\" % prop\n value = \"\"\n try:\n value = misc.to_unicode(value)\n except:\n pass\n return value", "def getControlModelProperty( self, cCtrlName, cPropertyName ):\n oControlModel = self.getControlModel( cCtrlName )\n return oControlModel.getPropertyValue( cPropertyName )", "def get_property_value(self, name):\n # Return table field as property\n tableName, primKey = self.provider._split_path(self.path)\n if primKey is not None:\n ns, localName = util.split_namespace(name)\n if ns == (tableName + \":\"):\n conn = self.provider._init_connection()\n fieldlist = self.provider._get_field_list(conn, tableName)\n if localName in fieldlist:\n val = self.provider._get_field_by_primary_key(\n conn, tableName, primKey, localName\n )\n conn.close()\n return val\n conn.close()\n # else, let default implementation return supported live and dead properties\n return super().get_property_value(name)", "def getProperty(self, child, key):\n\n # First get the child's dictionary\n childDict = self.getInfoDict(child)\n if childDict:\n return childDict.get(key, None)", "def get_property(self, key):\n _key = DJANGO_CONF[key]\n return getattr(self, _key, CONF_SPEC[_key])", "def Get(self, interface_name, property_name):\n logger.debug(\n \"%r.Get(%r, %r) -> ...\",\n self, interface_name, property_name)\n try:\n props = self._dct_entry[interface_name]\n except KeyError:\n raise dbus.exceptions.DBusException(\n dbus.PROPERTIES_IFACE,\n \"No such interface {}\".format(interface_name))\n try:\n prop = props[property_name]\n except KeyError:\n raise dbus.exceptions.DBusException(\n dbus.PROPERTIES_IFACE,\n \"No such property {}:{}\".format(\n interface_name, property_name))\n try:\n value = prop.__get__(self, self.__class__)\n except dbus.exceptions.DBusException as exc:\n logger.error(\n \"%r.Get(%r, %r) -> (exception) %r\",\n self, interface_name, property_name, exc)\n raise\n except Exception as exc:\n logger.exception(\n \"runaway exception from Get(%r, %r)\",\n interface_name, property_name)\n raise dbus.exceptions.DBusException(\n dbus.PROPERTIES_IFACE,\n \"Unable to get property interface/property {}:{}: {!r}\".format(\n interface_name, property_name, exc))\n else:\n logger.debug(\n \"%r.Get(%r, %r) -> %r\",\n self, interface_name, property_name, value)\n return value", "def get(self, obj):\n obj = self._to_obj_tuple(obj)\n rows = self.query(object=obj)\n if rows:\n return rows[0]", "def pget(self, name):\n getter = attrgetter(name)\n attr = getter(self.pobj)\n return attr", "def pget(self, name):\n getter = attrgetter(name)\n attr = getter(self.pobj)\n return attr", "def get_role_property(self, obj, property_name): # pylint: disable=no-self-use\n # 2015-07-01 RoleDefinition: flattened, RoleAssignment: unflattened\n # 2018-01-01-preview RoleDefinition: flattened\n # 2020-04-01-preview RoleAssignment: flattened\n # 2022-04-01 RoleDefinition: flattened RoleAssignment: flattened\n # Get property_name from properties if the model is unflattened.\n if isinstance(obj, dict):\n if 'properties' in obj:\n obj = obj['properties']\n return obj[property_name]\n\n if hasattr(obj, 'properties'):\n obj = obj.properties\n return getattr(obj, property_name)", "def callPropertyGet(self, name = \"__value\", index = None):\n\t\tif name == 'IID':\n\t\t\treturn CSLValue(typeid = \"string\", value = self.callerInfo.IID)\n\t\tEntry = self.vtbl['p_' + name + \"_get\"]\n\t\t#localTbl = { 'vars':{}, 'status':0, 'props':{}, 'alias':{}, 'persistent':{}, 'instance':{}}\n\t\tpropEntry = Entry.parent\n\t\tprmName = propEntry.data['prm']\n\t\tlocalTbl = self.CSLCreateLocalTbl({}, {}, {}, copy.copy(propEntry.data['persistent']), copy.copy(propEntry.data['instance']))\n\t\tif prmName != \"\":\n\t\t\tif index == None:\n\t\t\t\tdefault = propEntry.data['default']\n\t\t\t\tif default != \"\":\n\t\t\t\t\tdefault = self.CSLCheckValue(default, localTbl)\n\t\t\t\telse:\n\t\t\t\t\tdefault = CSLValue(typeid = \"NULL\", value = None)\n\t\t\telse:\n\t\t\t\tdefault = index\n\n\t\t\tlocalTbl['vars'][prmName] = default\n\n\t\tself.procStack.append('p_' + name + '_get')\n\t\tself.lastLTbl = self.CSLInterpreter(Entry.child, localTbl)\n\t\tself.procStack.pop()\n\t\tl = self.lastLTbl['vars']\n\n\t\tself.debug(DEBUG_CALL, \"\\n\\nGetProp result: (\", name, \")\", l, \"haskey:\", l.has_key(name))\n\n\t\tif l != None and l.has_key(name):\n\t\t\tself.debug(DEBUG_CALL, \"Get Property return:\", l[name])\n\t\t\treturn copy.deepcopy(l[name])\n\t\telse:\n\t\t\treturn CSLValue(typeid = \"NULL\", value = None)", "def __get__(self, instance, owner):\n if self.name is None : self.name = Property.search(instance, owner,self)\n if self.name is not None:\n try:\n if instance is not None:\n return instance.__dict__[self.name]\n elif owner is not None:\n return owner.__dict__[self.name]\n else:\n raise ValueError(\"@instance and @owner can't both be null\") \n except (AttributeError,KeyError) as error:\n if not self.deleted:\n return self.default\n else:\n raise AttributeError(\"Cannot find %s in %s\" \n\t\t\t% (self,instance))\n else:\n raise AttributeError(\"Cannot find any property named %s in: %s\" % \n (self.name, owner))", "def getObjectProperty(self, owner: unicode, propertyName: unicode, saveableObjectClass: java.lang.Class, create: bool) -> ghidra.program.model.util.ObjectPropertyMap:\n ...", "def _get(self, name):\n return object.__getattribute__(self, name)", "def _get(self, name):\n return object.__getattribute__(self, name)", "def _get_object_prop(self, vm, attributes):\n result = vm\n for attribute in attributes:\n try:\n result = getattr(result, attribute)\n except (AttributeError, IndexError):\n return None\n return result", "def read_property(self, key: str) -> str:\n return self._env.read_property(key)", "def prop(self):\n return self._prop", "def get_attribute(self, attribute):\r\n return self.connection.get_instance_attribute(self.id, attribute)", "def propget(self, name):\r\n res = self._svn('propget', name)\r\n return res[:-1] # strip trailing newline\r", "def get_data_column ( self, object ):\n return getattr( object, self.name )", "def get(self, key):\n return getattr(self, key)", "def getprop(name):\n return _slp.getprop(name)", "def getObjectPropertyMap(self, propertyName: unicode) -> ghidra.program.model.util.ObjectPropertyMap:\n ...", "def getprop(self, key, strip=True):\n return self.adb.getprop(key, strip)", "def getattribute(objeto, name: str):\r\n # Get internal dict value matching name.\r\n value = objeto.__dict__.get(name)\r\n if not value:\r\n # Raise AttributeError if attribute value not found.\r\n return None\r\n # Return attribute value.\r\n return value", "def get(self, key):\n\n return self.dict_props.get(key)", "def get(self, obj):\n raise NotImplementedError", "def get(self, attr):\r\n return self.__dict__.get(attr)", "def get(self, att):\n return getattr(self, att)", "def getProperty(self, propID, time, objectID=0):\n key = (propID, objectID, time)\n if propID not in self.properties.index:\n\n raise APIError.APIError('Unknown property ID')\n\n if time not in self.properties.index:\n prop = interP.interpolateProperty(self.properties,\n time=time,\n propertyID=propID,\n objectID=objectID,\n method='linear')\n else:\n prop = self.properties[key]\n\n # Check pyro registering if applicaple\n if hasattr(self, '_pyroDaemon') and not hasattr(prop, '_PyroURI'):\n uri = self._pyroDaemon.register(prop)\n prop._PyroURI = uri\n\n return(prop)", "def get_attr(self, name, default=None):\n try:\n return self.managedobjectattribute_set.get(key=name).value\n except ManagedObjectAttribute.DoesNotExist:\n return default", "def get_properties(self):\n return self.properties", "def __getattr__(self, attr):\n return getattr(self.obj, attr)", "def __getattr__(self, attr):\n return getattr(self.obj, attr)", "def get_variable(self, name):\n return self._properties[name]", "def _getPropertyValue(self, name, target = ''):\n value = self._getPropertyValue(name)\n return value.getData() if value else None", "def for_property(self, name):\n return self[self.types_map.get(name, 'text')]", "def GetVmodlProperty(cls, variable):\n\n return cls.mappings.get(variable.lower(), variable)", "def property( self, prop ):\n raise NotImplementedError(\"property\")", "def get_property(self, key: str, default_value=None) -> Any:\n return_value = default_value\n if \"properties\" in self._node[\"app_data\"]:\n return_value = self._node[\"app_data\"][\"properties\"].get(key, default_value)\n\n return return_value", "def __getitem__(self, key):\n try:\n super(Hypertext, self).__getitem__(key)\n except AttributeError:\n return self.get_properties()[key]", "def get_attr(obj, attr):\n return getattr(obj, attr)", "def getProperty(self, node, name, propertyName, default=None):", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def get(self, attribute: str):\n return getattr(self, attribute)", "def prop(self, name):\n ret = libxml2mod.xmlGetProp(self._o, name)\n return ret", "def get(self, name):\n try:\n item = getattr(self, name)\n return item\n except AttributeError:\n message = \"Unable to access object \\\"%s\\\" in class ForcefieldAtom\" % name\n raise ValueError(message)", "def get_owner(self, property_name):\n\n property_owner = self.db.read_value(property_name, \"owner\")\n return property_owner", "def getValue(self, name):\n\n return getattr(self, name)", "def get(self, field):\n try:\n return self._state[field]\n except:\n raise ValueError(\"There is no model field called {}\".format(field))", "def get_value_from_object(obj, key):\n if is_dict(obj):\n return obj.get(key)\n return getattr(obj, key, None)", "def salesforce_get(self, obj_name, obj_id):\n self.builtin.log(f\"Getting {obj_name} with Id {obj_id}\")\n obj_class = getattr(self.cumulusci.sf, obj_name)\n return obj_class.get(obj_id)", "def autoprops_generated_getter(self):\n return getattr(self, private_property_name)" ]
[ "0.7311495", "0.7275758", "0.7193801", "0.7128644", "0.70792645", "0.7021553", "0.6953444", "0.69131315", "0.6811504", "0.67831117", "0.67660815", "0.67022717", "0.66058373", "0.65965885", "0.65847474", "0.6570505", "0.65630543", "0.65239185", "0.63693666", "0.6347373", "0.63456976", "0.6343433", "0.6311547", "0.6311547", "0.63005453", "0.62755644", "0.6261743", "0.62436295", "0.62220156", "0.6184803", "0.6156776", "0.6132061", "0.6110336", "0.61080426", "0.60821617", "0.60760313", "0.60710466", "0.60659665", "0.6055728", "0.6032625", "0.60026425", "0.59973085", "0.5970372", "0.5967188", "0.5964436", "0.5948126", "0.5919567", "0.59152246", "0.5905255", "0.58674264", "0.58674264", "0.5857093", "0.5849535", "0.5811369", "0.58111227", "0.5809128", "0.5809128", "0.5800777", "0.5786743", "0.5769562", "0.57437927", "0.5720168", "0.5697643", "0.5650697", "0.56488234", "0.56190145", "0.5606201", "0.560555", "0.5601989", "0.56002355", "0.5597435", "0.5592326", "0.5587147", "0.55870867", "0.5583104", "0.5582361", "0.5582361", "0.55457133", "0.55270743", "0.5520104", "0.55194634", "0.55163205", "0.55130464", "0.5507137", "0.54998326", "0.5497704", "0.54660344", "0.54660344", "0.54660344", "0.54660344", "0.54660344", "0.54465383", "0.5443452", "0.5417614", "0.54124796", "0.54102856", "0.5402909", "0.53923965", "0.53873104", "0.5381787" ]
0.62482166
27
Gets the list of objects of the type specified.
def get_objects(vim, type, properties_to_collect=["name"], all=False): client_factory = vim.client.factory object_spec = build_object_spec(client_factory, vim.get_service_content().rootFolder, [build_recursive_traversal_spec(client_factory)]) property_spec = build_property_spec(client_factory, type=type, properties_to_collect=properties_to_collect, all_properties=all) property_filter_spec = build_property_filter_spec(client_factory, [property_spec], [object_spec]) return vim.RetrieveProperties(vim.get_service_content().propertyCollector, specSet=[property_filter_spec])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_objects_by_type(self, *types) -> List[TgnObject]:\n if not types:\n return list(self.objects.values())\n types_l = [o.lower() for o in types]\n return [o for o in self.objects.values() if o.type.lower() in types_l]", "def all(self, *args, **kwargs):\n list_to_return = []\n if not self.object_type:\n return list_to_return\n class_name = eval(self.object_type)\n if self.objects_id:\n for id in self.objects_id.split(';'):\n if id:\n list_to_return.append(class_name.objects.get(id=id))\n return list_to_return", "def get_objects_by_type(self, object_type):\n\n # Get dictionary of objects by type.\n try:\n object_dict = self.model_map['object'][object_type]\n except KeyError:\n # This object type isn't in the model map.\n return None\n\n # Extract the object dictionaries and put them in list for\n # return.\n out = [value[1] for value in object_dict.values()]\n\n # The 'out' list can be empty if the object type is mapped,\n # but all the objects have been removed.\n if len(out) == 0:\n return None\n else:\n return out", "def _get_objects(self, object_type, **kwargs):\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n **kwargs)", "def get_all_by_type(self, obj_type):\n objects = []\n node = None\n data = []\n with lzma.open(os.path.join('resources', self.game, 'dumps',\n '{}.dump.xz'.format(obj_type)), 'rt', encoding='latin1') as df:\n for line in df.readlines():\n match = re.match('^\\*\\*\\* Property dump for object \\'\\S+ (\\S+)\\'.*$', line)\n if match:\n objects.append(match.group(1))\n if node:\n node.load_from_string_list(data)\n data = [line]\n node = self.get_node_by_full_object(match.group(1))\n else:\n data.append(line)\n\n if node:\n node.load_from_string_list(data)\n\n return objects", "def _get_objects(self, object_type, **kwargs):\r\n\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n jbod_id=self.jbod_id,\r\n **kwargs)", "def _get_objects(self, object_type, **kwargs):\r\n\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n brick_id=self.brick_id,\r\n **kwargs)", "def _get_objects(self, object_type, **kwargs):\r\n params = dict()\r\n if kwargs:\r\n for key, val in kwargs.items():\r\n if '_' in key:\r\n new_key = key.replace(\"_\",\"-\") \r\n params[new_key] = val\r\n else:\r\n params[key] = val\r\n try: \r\n response = requests.get(self.api_endpoint + object_type, \r\n auth=(self.user,self.pwd), \r\n params=params, verify=False)\r\n\r\n devices = json.loads(response.text)\r\n\r\n except requests.exceptions.RequestException as e:\r\n print \"Error:\",e\r\n return 1\r\n\r\n return_objects = []\r\n for i in devices.keys():\r\n if i == u\"links\":\r\n continue \r\n for j in devices[i]:\r\n return_objects.append(XtremObjFactory(object_type,j,self))\r\n\r\n return return_objects", "def get_all(self, objtype, **kwargs):\n h = \"Issue a GetObject to find an object\"\n kwargs['pytan_help'] = kwargs.get('pytan_help', h)\n\n clean_keys = ['obj', 'objtype', 'obj_map']\n clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)\n\n obj_map = pytan.utils.get_obj_map(objtype=objtype)\n\n all_type = obj_map['all']\n api_obj_all = pytan.utils.get_taniumpy_obj(obj_map=all_type)()\n\n found = self._find(obj=api_obj_all, **clean_kwargs)\n return found", "def get_all_items(model, type):\n if(type == \"office\"):\n return model.get_all_offices()\n elif(type == \"party\"):\n return model.get_all_parties()\n return []", "def get_objects_or_children_by_type(self, *types):\n\n objects = self.get_objects_by_type(*types)\n return objects if objects else self.get_children(*types)", "def get_all_items(self, object_type):\n\n if object_type not in NetBoxObject.__subclasses__():\n raise ValueError(f\"'{object_type.__name__}' object must be a sub class of '{NetBoxObject.__name__}'.\")\n\n return self.base_structure.get(object_type.name, list())", "def get_children(self, *types: str) -> List[TgnObject]:\n pass", "def get_objects(filter_rule=\"**\", obj_type=\"*\"):\n objects = ix.api.OfObjectVector()\n project_root = ix.application.get_factory().get_project()\n ix.application.get_matching_objects(objects, filter_rule, project_root,\n obj_type)\n return objects", "def getItemsOfType(typeId):\n return Gw2Spidy._request('all-items', str(typeId))['results']", "def get_related_objects(self, obj_type):\n suffix = self._get_api_suffix(obj_type)\n if obj_type == self.__class__ and suffix == 'adversaries':\n return []\n endpoint = self._get_api_endpoint() + '/' + suffix\n results = self.tq.get(endpoint)\n if 'data' not in results:\n return []\n\n tr = []\n for obj in results['data']:\n inst = obj_type(self.tq)\n inst.fill_from_api_response(obj)\n tr.append(inst)\n return tr", "def list_objects(self, path):\n return [x for x in self.list_objects_generator(path)]", "def list(cls):\n return [cls.__dict__.get(name) for name in dir(cls) if (\n not callable(getattr(cls, name)) and not name.startswith(\"_\")\n )]", "def query_all(cls)->List:\n database.cursor.execute(\"SELECT * FROM {}\".format(cls.table_name))\n items = database.cursor.fetchall()\n return [cls.to_object(item) for item in items]", "def get_objects(self):\n return self._objects", "def getTypesList():\n return Gw2Spidy._request('types')['results']", "def getList(self):", "def getList(self):", "def getList(self):\n pass", "def list(cls, standalone=True):\n\t\tif standalone:\n\t\t\treturn [i.name for i in cls]\n\t\telse:\n\t\t\treturn [i.name for i in FetcherEnum if issubclass(i.value, Fetcher)]", "def GetObjects(self): \r\n return self.model.GetObjects()", "def get_field_list_by_type(self, field_type):\n field_list = []\n for field in self.fields:\n if field.get_field_type() == field_type:\n field_list.append(field)\n return field_list", "def list(self, request):\n\n game_type_objects = GameType.objects.all()\n\n # Note additonal 'many=True'\n # It's for serializing a list of objects instead of one.\n serialized_game_types = GameTypeSerializer(\n game_type_objects,\n many=True,\n context={'request': request}\n )\n\n return Response(serialized_game_types.data)", "def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return self._manager.get_components_by_type(component_type)", "def _queryset(self):\n return self.type.objects.filter(id__in=self.ids)", "def obj_get_list(self, request=None, **kwargs):\n filters = None\n\n if hasattr(request, 'GET'):\n filters = request.GET\n\n applicable_filters = self.build_filters(filters=filters)\n applicable_filters.update(kwargs)\n\n try:\n return self.get_object_list(request).filter(**applicable_filters)\n except ValueError, e:\n raise NotFound(\"Invalid resource lookup data provided (mismatched type).\")", "def getlist(self, key, type=None):\n if key not in self:\n return []\n values = super().__getitem__(key)\n if type is not None:\n values = [type(value) for value in values]\n return values", "def objects (self):\n return InternalObjectList (self)", "def get_all(cls):\n if Model.data_connector:\n with Model.data_connector.u_lock:\n return Model.data_connector.get_all_objects(cls)\n \n return []", "def getItems(self): \n items = []\n if self.itemCount > 0:\n \n site = getSite()\n \n \n # Make string path relative to the site root\n # E.g. string path \"news\" becomes \"/yoursiteid/news\"\n site_path = site.getPhysicalPath();\n \n path = \"/\".join(site_path) + \"/\" + self.path \n \n #if self.itemPortalType2 != None:\n # types.append(self.itemPortalType2) \n \n #print \"Querying by:\" + type + \" \" + path\n content_by_type = self.context.portal_catalog(path={ \"query\": path, \"depth\" :9 }, \n sort_on=\"created\", \n sort_order=\"reverse\")[0:self.itemCount]\n\n \n items += [ brain.getObject() for brain in content_by_type ]\n\n return items", "def getTypes(self):\n return self._doRequest(self.httpClient.getTypes)", "def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return [c for c in self._components if isinstance(c, component_type)]", "def list(self, request):\n gametypes = GameType.objects.all()\n\n # many=True kwarg is necessary if serializing a list of objects instead of single object\n serializer = GameTypeSerializer(gametypes, many=True, context={'request': request})\n return Response(serializer.data)", "def get_items_of_type(self, item_type):\n return (item for item in self.items if item.get_type() == item_type)", "def get_all(cls):\n return db_session.query(cls).order_by(cls.name).all()", "def get_objects_with_cmodel(self, cmodel_uri, type=None):\n uris = self.risearch.get_subjects(modelns.hasModel, cmodel_uri)\n return [self.get_object(uri, type) for uri in uris]", "def all(cls):\n api = BuslineAPI()\n try:\n objects = api.all()\n except ApiException:\n objects = cls.objects.all()\n return objects", "def get_list(self):\n return self.__repository.get_all()", "def find_objects_by_type():\n try:\n keyword = request.form[\"keyword\"]\n object_type = request.form[\"object_type\"]\n\n # Get entities based on the selection\n entities = g.user.get_api().get_by_object_types(keyword, object_type)\n\n # Parse response object into table data\n data = raw_entities_to_table_data(entities)\n\n # If no entities were found reutrn with failure state and message\n result = get_result_template()\n if len(data[\"data\"]) == 0:\n result[\"status\"] = \"FAIL\"\n result[\"message\"] = 'No entities of type \"{TYPE}\" were found.'.format(\n TYPE=object_type\n )\n else:\n result[\"status\"] = \"SUCCESS\"\n result[\"data\"] = {\"table_field\": data}\n return jsonify(result_decorator(result))\n\n except Exception as e:\n result = get_result_template()\n result[\"status\"] = \"FAIL\"\n result[\"message\"] = str(e)\n return jsonify(result_decorator(result))", "def objects(self, cls):\n for name, info in direct_fields(self.__class__).items():\n if issubclass(cls, info.sub_fields[0].type_):\n return getattr(self, name)\n raise TypeError(cls)", "def get_all(self, context, type_):\n types = None\n if type_ and isinstance(type_, basestring):\n types = type_.strip(\",\").split(\",\")\n\n try:\n db_resource_mgrs_data = self.db_api.get_all_resource_managers(\n context, types=types)\n\n _resource_mgrs_data = []\n for db_resource_mgr_data in db_resource_mgrs_data:\n _resource_mgrs_data.append(_make_response(\n db_resource_mgr_data))\n except Exception as e:\n msg = (\"Error retrieving the 'resource managers' reason : %s\"\n % e.message)\n LOG.exception(msg)\n raise exception.RetrieveException(e.message)\n return _resource_mgrs_data", "def get_net_objs(host, object_type, refresh=False):\n if refresh:\n host.configManager.networkSystem.RefreshNetworkSystem() # Pick up any recent changes\n\n net_type = object_type.lower()\n if net_type == \"portgroup\":\n objects = host.configManager.networkSystem.networkInfo.portgroup\n elif net_type == \"vswitch\":\n objects = host.configManager.networkSystem.networkInfo.vswitch\n elif net_type == \"proxyswitch\":\n objects = host.configManager.networkSystem.networkInfo.proxySwitch\n elif net_type == \"vnic \":\n objects = host.configManager.networkSystem.networkInfo.vnic\n elif net_type == \"pnic \":\n objects = host.configManager.networkSystem.networkInfo.pnic\n else:\n logging.error(\"Invalid type %s for get_net_objs\", object_type)\n return None\n return list(objects)", "def get_all_documents(self, type: Type) -> List[DocumentReference]:\n runners = []\n collection = self.client.collection(type.value).list_documents()\n for document in collection:\n runners.append(document)\n\n return runners", "def find(self, model_type=\"Model\", filter=None):\n collection = self._db[model_type]\n print 'mongo.list()'\n if filter: \n objs = list(collection.find(filter))\n else:\n objs = list(collection.find())\n print 'objs are {}'.format(objs)\n result = []\n # hack to convert uuid to string\n for obj in objs:\n obj['_id'] = str(obj['_id'])\n result += [obj, ]\n return objs", "def create_list(self, args, l_type):\n\n scraper_types = [\n \"subreddit\",\n \"redditor\",\n \"comments\"\n ]\n\n index = scraper_types.index(l_type)\n item_list = [item[0] for item in self._list_switch(args, index)]\n\n return item_list", "def get_list(self, *args, **kwargs):\n pass", "def get_list(self, *args, **kwargs):\n pass", "def find_objects(self, ObjectClass, **kwargs):\n return ObjectClass.objects(**kwargs).all()", "def __toObjList(self, cursor, className):\n lst = []\n for row in cursor.fetchall():\n # first convert row to a dictionary\n rowdict={}\n for idx, col in enumerate(cursor.description):\n memberName = col[0].strip().replace(' ', '__')\n rowdict[memberName] = row[idx]\n # create class of type and set properties to dictionary\n instance = type(className, (), rowdict)\n lst.append(instance)\n return lst", "def list(self):\n return self.objects.all()", "def get_catalogs_by_genus_type(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.get_bins_by_genus_type\n catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs_by_genus_type(*args, **kwargs)\n cat_list = []\n for cat in catalogs:\n cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))\n return CatalogList(cat_list)", "def netsuite_get_all(self, record_type: str) -> list:\n if record_type is None:\n raise ValueError(\n \"Parameter 'record_type' is required for kw: netsuite_get_all\"\n )\n return self.client.getAll(recordType=record_type)", "def get_objects(vim, type, properties_to_collect=None, all=False):\n if not properties_to_collect:\n properties_to_collect = [\"name\"]\n\n client_factory = vim.client.factory\n trav_spec = vim_util.build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory,\n vim.service_content.rootFolder,\n [trav_spec])\n property_spec = vim_util.build_property_spec(\n client_factory, type_=type,\n properties_to_collect=properties_to_collect,\n all_properties=all)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n [property_spec],\n [object_spec])\n property_collector = vim.service_content.propertyCollector\n return retrieve_properties_ex(vim,\n property_collector,\n [property_filter_spec])", "def list(self):\n return self._get_list()", "def get_children(self, *seg_type: str) -> List[BaseSegment]:\n buff = []\n for seg in self.segments:\n if seg.is_type(*seg_type):\n buff.append(seg)\n return buff", "def get(self):\r\n\t\treturn list(self)", "def obj_list(self):\n return self._obj_list", "def getList(self):\n\treturn self.list", "def getObjects(self, idList=None):\n\n if idList is None:\n idList = ''\n if idList in ['', 'published', 'unpublished']:\n return self.iterateAllPaginated('objects/%s' % idList, func=vsdModels.APIObject._create)\n\n items = []\n for curId in idList:\n items.append(self.getObject(curId))\n return items", "def get_all(self, start_at, limit, order=None):\n result = []\n objects = []\n if limit == 0:\n objects = self.items[start_at:]\n else:\n objects = self.items[start_at:(start_at + limit)]\n for item in objects:\n result.append(FileDict(item))\n return result", "def list(self, teamId=None, type=None, sortBy=None, max=100,\n **request_parameters):\n check_type(teamId, basestring, optional=True)\n check_type(type, basestring, optional=True)\n check_type(sortBy, basestring, optional=True)\n check_type(max, int, optional=True)\n\n params = dict_from_items_with_values(\n request_parameters,\n teamId=teamId,\n type=type,\n sortBy=sortBy,\n max=max,\n )\n\n # API request - get items\n items = self._session.get_items(API_ENDPOINT, params=params)\n\n # Yield room objects created from the returned items JSON objects\n for item in items:\n yield self._object_factory(OBJECT_TYPE, item)", "def objects():\n url_parameters = dict(request.args)\n \n main_query = AstroObject.query\n \n if url_parameters.has_key(\"type\"):\n object_types = []\n for object_type in url_parameters[\"type\"]:\n # Validate object type input\n if object_type not in [\"star\", \"galaxy\", \"nebula\", \"other\"]:\n # some kind of error?\n pass\n else:\n object_types.append(object_type)\n \n main_query = main_query.filter(AstroObject.type.in_(object_types))\n \n if url_parameters.has_key(\"utc_time\"):\n pass\n \n if url_parameters.has_key(\"date\"):\n pass\n \n if url_parameters.has_key(\"latitude\"):\n pass\n \n if url_parameters.has_key(\"longitude\"):\n pass\n \n objects = main_query.all()\n \n return render_template('objects.html', objects=objects)", "def listObjects(**kwargs):\n cols = 'objects.name'\n tables = 'objects'\n\n if kwargs == {}:\n params = '1'\n return [Dso(str(item[0]), True) for item in _queryFetchMany(cols, tables, params)]\n\n paramslist = []\n if \"catalog\" in kwargs:\n if kwargs[\"catalog\"].upper() == \"NGC\" or kwargs[\"catalog\"].upper() == \"IC\":\n paramslist.append('name LIKE \"' + kwargs[\"catalog\"].upper() + '%\"')\n elif kwargs[\"catalog\"].upper() == \"M\":\n paramslist.append('messier != \"\"')\n else:\n raise ValueError('Wrong value for catalog filter. [NGC|IC|M]')\n if \"type\" in kwargs:\n paramslist.append('type = \"' + kwargs[\"type\"] + '\"')\n if \"constellation\" in kwargs:\n paramslist.append('const = \"' + kwargs[\"constellation\"].capitalize() + '\"')\n if \"minSize\" in kwargs:\n paramslist.append('majax >= ' + str(kwargs[\"minSize\"]))\n if \"maxSize\" in kwargs:\n paramslist.append('majax < ' + str(kwargs[\"maxSize\"]) + ' OR majax = \"\"')\n if \"upToBMag\" in kwargs:\n paramslist.append('bmag <= ' + str(kwargs[\"upToBMag\"]))\n if \"upToVMag\" in kwargs:\n paramslist.append('vmag <= ' + str(kwargs[\"upToVMag\"]))\n if \"withNames\" in kwargs and kwargs[\"withNames\"] == True:\n paramslist.append('commonnames != \"\"')\n\n if paramslist == []:\n raise ValueError(\"Wrong filter name.\")\n\n params = \" AND \".join(paramslist)\n return [Dso(item[0], True) for item in _queryFetchMany(cols, tables, params)]", "def by_type(self, type):\n return self.filter(related_type__title=type)", "def _get_embedded_objects(self):\n return [getattr(self, name) for name, field in self._get_fields().items() if isinstance(field, fields.Object)]", "def list(self):\n url = self._resource_name\n return self._get(url)", "def list(self, request):\n product_types = ProductType.objects.all()\n serializer = ProductTypeSerializer(product_types, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def getList(self):\n assert self._is_int is False\n return self._list", "def get_object_list(self, request):\r\n\r\n self._reset_collection()\r\n return self._meta.queryset.clone()", "def get_objects(self, subject=None, predicate=None):\n\n # Get the result of the search\n results = self.rdf.objects(subject, predicate)\n return list(results)", "def objects(self):\n\t\treturn self._objects", "def by_id(self, type_id):\n return EntryList([entry for entry in self.data if entry.type_id == type_id])", "def _list_objects(src: str)->list:\n if _is_s3(src):\n return aws_s3_ls(src)\n else:\n if _is_dir(src):\n return _list_dir(src)\n else:\n return [src]", "def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def get_types(self) :\n\n return list(self.types)[1:]", "def list(self):\n resources = self._os_resource_manager.list()\n resource_list = []\n for resource in resources:\n resource_list.append(self._resource_class(id=resource.id,\n name=resource.name))\n return resource_list", "def get_related_objects(self):\n result = []\n if self['name'] != None:\n tmp = ObjectDefinition.objects.filter(use__has_field=self['name'], object_type=self['object_type'])\n for i in tmp: result.append(i)\n return result", "def getList(self):\n return self.list", "def get_types(self):\n return self.types", "def as_list(self, sub_type=None):\n if sub_type is None:\n return self.as_type(list)\n\n return [\n item.as_type(sub_type)\n for item in self.as_type(list)\n ]", "def get(self, *args):\n return _libsbml.ListOfCompartmentTypes_get(self, *args)", "def get_catalogs_by_record_type(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.get_bins_by_record_type\n catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs_by_record_type(*args, **kwargs)\n cat_list = []\n for cat in catalogs:\n cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))\n return CatalogList(cat_list)", "def types():\n types = session.query(Type).all()\n return jsonify(types=[t.name for t in types])", "def getListOfSpeciesTypes(self, *args):\n return _libsbml.Model_getListOfSpeciesTypes(self, *args)", "def get(self):\r\n return get_all()", "def fetch_all(self):\n return list(iter(self))", "def _get_objects(self, cr, uid, name, args=[], ids=None): \n obj = self.pool.get(name)\n if not ids:\n ids = obj.search(cr, uid, args)\n return obj.browse(cr, uid, ids)", "def get_objects_with_attribute(self, obj_type: str, attribute: str, value: str) -> List[TgnObject]:\n return [o for o in self.get_objects_by_type(obj_type) if o.get_attribute(attribute) == value]", "def list_type_access(self, volume_type):\n return self._impl.list_type_access(volume_type)", "def _get_objects(self, object_query):\n object_name = object_query[\"object_name\"]\n expression = object_query.get(\"filters\", {}).get(\"expression\")\n\n if expression is None:\n return set()\n object_class = self.object_map[object_name]\n\n query = object_class.query\n filter_expression = self._build_expression(\n expression,\n object_class,\n object_query.get('fields', []),\n )\n if filter_expression is not None:\n query = query.filter(filter_expression)\n requested_permissions = object_query.get(\"permissions\", \"read\")\n if requested_permissions == \"update\":\n objs = [o for o in query if permissions.is_allowed_update_for(o)]\n else:\n objs = [o for o in query if permissions.is_allowed_read_for(o)]\n\n return objs", "def obj_get_list(self, request=None, **kwargs):\n filter_object = self.get_filter_object(request)\n list = self.get_collection(request).find(filter_object)\n order_field, direction = self.get_order_field_and_direction(request)\n \n if (order_field is not None):\n list.sort(order_field, direction)\n \n return map(Document, list)", "def get_queryset(self):\n request = self.request\n # Allow pages to be filtered to a specific type\n page_type = request.GET.get('type', 'wagtailcore.Page')\n try:\n models = page_models_from_string(page_type)\n except (LookupError, ValueError):\n raise BadRequestError(\"type doesn't exist\")\n if not models:\n models = [Page]\n if len(models) == 1:\n qs = models[0].objects.all()\n else:\n qs = Page.objects.all()\n # Filter pages by specified models\n qs = filter_page_type(qs, models)\n if self.revision_wanted is not None or self.is_preview:\n # Get pages that the current user has permission to publish\n qs = publishable_pages(self.user, qs)\n else:\n # Get live pages that are not in a private section\n qs = qs.live().public()\n # Filter by site\n return qs.descendant_of(request.site.root_page, inclusive=True)", "async def get_metadata_for_object_type(\n dbcon: DBConnection, object_type: str) -> Iterable[object_models.ObjectMetadata]:\n q = '''select metadata.object_type, metadata.object_id, metadata.key, metadata.value\n from object_metadata as metadata\n where metadata.object_type=%s'''\n return [object_models.ObjectMetadata(*row) for row in await dbcon.fetch_all(q, (object_type,))]", "def get_objects(slice, plugin_type, klass, **kwargs):\n try:\n# plugins_modules = settings.PLUGIN_LOADER.plugin_settings.get(plugin_type).get(\"general\").get(\"aggregate_plugins\")[0]\n plugins_modules = PLUGIN_LOADER.plugin_settings.get(plugin_type).get(\"general\").get(\"aggregate_plugins\")[0]\n p_agg = plugins_modules.split('.')[-1]\n p_models_path = '.'.join(plugins_modules.split('.')[:-1])\n try:\n model = getattr(__import__(p_models_path,fromlist=[klass]), klass)\n except: \n try: \n model = getattr(__import__(p_models_path+'.'+klass,fromlist=[klass]), klass)\n except:\n pass \n # Filters resources by slice (will not return any aggregate's resource from another slice)\n objects = model.objects.filter(**kwargs)\n #print \"objects: %s\" % str(objects)\n for obj in objects:\n if not (obj != None and obj.aggregate in slice._get_aggregates()):\n raise Exception\n return objects\n except Exception,e:\n print \"[ERROR] PluginCommunicator could not obtain object. Details: %s \" % str(e)\n return None", "def get_by_cls(self, cls: GObject.GType) -> typ.List[Gst.Element]:\n elements = self._pipeline.iterate_elements()\n if isinstance(elements, Gst.Iterator):\n # Patch \"TypeError: ‘Iterator’ object is not iterable.\"\n # For versions we have to get a python iterable object from Gst iterator\n _elements = []\n while True:\n ret, el = elements.next()\n if ret == Gst.IteratorResult(1): # GST_ITERATOR_OK\n _elements.append(el)\n else:\n break\n elements = _elements\n\n return [e for e in elements if isinstance(e, cls)]" ]
[ "0.7903194", "0.7377341", "0.73040676", "0.70410174", "0.692821", "0.69026285", "0.68966556", "0.6836504", "0.6808106", "0.6714846", "0.6546497", "0.65462595", "0.6545677", "0.6539864", "0.6508331", "0.6492654", "0.64498925", "0.64006305", "0.63523966", "0.63385695", "0.6306838", "0.6277879", "0.6277879", "0.6276692", "0.6276028", "0.6260296", "0.62467736", "0.62427086", "0.6227092", "0.6191076", "0.6153802", "0.6137789", "0.61282873", "0.61280245", "0.61233544", "0.61184275", "0.6105981", "0.61047465", "0.61023575", "0.6092883", "0.6086467", "0.608274", "0.6080745", "0.60638493", "0.6058299", "0.6050332", "0.60433215", "0.60398406", "0.60034287", "0.60027033", "0.59938675", "0.59938675", "0.5983526", "0.59729344", "0.59683275", "0.59647065", "0.595865", "0.59533346", "0.59251255", "0.59174466", "0.5888773", "0.5886688", "0.58787376", "0.586956", "0.58667207", "0.5864716", "0.5862779", "0.585581", "0.5849553", "0.5836053", "0.58347404", "0.5832688", "0.580653", "0.58047116", "0.5801734", "0.58001244", "0.57991874", "0.5798947", "0.5796562", "0.57919335", "0.5790815", "0.5785872", "0.5782384", "0.57817215", "0.5771004", "0.57685137", "0.57677585", "0.5765627", "0.57602495", "0.5757383", "0.5755671", "0.5754523", "0.5754514", "0.5754204", "0.5751181", "0.57464", "0.57431513", "0.5738066", "0.5737702", "0.5725592" ]
0.6089714
40
Builds the Property Spec Object.
def get_prop_spec(client_factory, spec_type, properties): prop_spec = client_factory.create('ns0:PropertySpec') prop_spec.type = spec_type prop_spec.pathSet = properties return prop_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_property_spec(client_factory, type=\"VirtualMachine\",\r\n properties_to_collect=[\"name\"],\r\n all_properties=False):\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = all_properties\r\n property_spec.pathSet = properties_to_collect\r\n property_spec.type = type\r\n return property_spec", "def build_property_filter_spec(client_factory, property_specs, object_specs):\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_filter_spec.propSet = property_specs\r\n property_filter_spec.objectSet = object_specs\r\n return property_filter_spec", "def build(self, spec, prefix):\n make()", "def _build_pod_spec(self):\n logger.debug(\"Building Pod Spec\")\n crds = []\n try:\n crds = [\n yaml.load(Path(f).read_text())\n for f in [\n \"files/configs.config.gatekeeper.sh.yaml\",\n \"files/constrainttemplates.templates.gatekeeper.sh.yaml\",\n \"files/constraintpodstatuses.status.gatekeeper.sh.yaml\",\n \"files/constrainttemplatepodstatuses.status.gatekeeper.sh.yaml\",\n ]\n ]\n except yaml.YAMLError as exc:\n logger.error(\"Error in configuration file:\", exc)\n\n crd_objects = [\n CustomResourceDefintion(crd[\"metadata\"][\"name\"], crd[\"spec\"])\n for crd in crds\n ]\n\n config = self.model.config\n spec_template = {}\n with open(\"files/pod-spec.yaml.jinja2\") as fh:\n spec_template = Template(fh.read())\n\n try:\n image_details = self.image.fetch()\n except OCIImageResourceError as e:\n self.model.unit.status = e.status\n return\n\n template_args = {\n \"crds\": crd_objects,\n \"image_details\": image_details,\n \"imagePullPolicy\": config[\"imagePullPolicy\"],\n \"app_name\": self.app.name,\n \"audit_cli_args\": self._audit_cli_args(),\n \"namespace\": os.environ[\"JUJU_MODEL_NAME\"],\n }\n\n spec = yaml.load(spec_template.render(**template_args))\n\n print(f\"Pod spec: {spec}\")\n return spec", "def get_prop_spec(client_factory, spec_type, properties):\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec", "def property_setup(self, properties):\n return properties", "def _create_properties(self): # pylint: disable=no-self-use\n properties = {}\n properties[\"product\"] = \"eventhub.python\"\n properties[\"version\"] = __version__\n properties[\"framework\"] = \"Python {}.{}.{}\".format(*sys.version_info[0:3])\n properties[\"platform\"] = sys.platform\n return properties", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()", "def _build_properties(self, k, v, definition):\n\n if isinstance(v, schema.Map):\n newdef = self._create_section(definition, k, term=k)\n\n if v.schema is None:\n # if it's a map for arbritary values, only include description\n field = nodes.line('', v.description)\n newdef.append(field)\n return\n\n newdeflist = self._create_def_list(newdef)\n\n sorted_schema = sorted(v.schema.items(),\n key=cmp_to_key(self._sort_by_type))\n for key, value in sorted_schema:\n self._build_properties(key, value, newdeflist)\n elif isinstance(v, schema.List):\n newdef = self._create_section(definition, k, term=k)\n\n # identify next section as list properties\n field = nodes.line()\n emph = nodes.emphasis('', 'List properties:')\n field.append(emph)\n newdef.append(field)\n\n newdeflist = self._create_def_list(newdef)\n\n self._build_properties('**', v.schema['*'], newdeflist)\n else:\n newdef = self._create_section(definition, k, term=k)\n if 'description' in v:\n field = nodes.line('', v['description'])\n newdef.append(field)\n else:\n field = nodes.line('', '++')\n newdef.append(field)", "def generate_property_template(self):\n template = {\n \"@id\": \"url or curie of the property\",\n \"@type\": \"rdf:Property\",\n \"rdfs:comment\": \"description of the property\",\n \"rdfs:label\": \"carmel case, should match @id\",\n \"schema:domainIncludes\": {\n \"@id\": \"class which use it as a property, could be list\"\n },\n \"schema:isPartOf\": {\n \"@id\": \"http://schema.biothings.io\"\n },\n \"schema:rangeIncludes\": {\n \"@id\": \"relates a property to a class that constitutes (one of) the expected type(s) for values of the property\"\n }\n }\n return template", "def render_specification_properties(spec, newline='\\n', ignore_props=None, prepend_items=None, append_items=None):\n\n spec_prop_list = []\n if prepend_items is not None:\n spec_prop_list += prepend_items\n ignore_keys = [] if ignore_props is None else ignore_props\n # Add link properties\n if isinstance(spec, LinkSpec):\n spec_prop_list.append('**Target Type** %s' %\n RSTDocument.get_reference(RSTSectionLabelHelper.get_section_label(\n spec['target_type']),\n spec['target_type']))\n # Add dataset properties\n if isinstance(spec, DatasetSpec):\n if spec.data_type_def is not None and spec.def_key() not in ignore_keys:\n spec_prop_list.append('**Neurodata Type:** %s' % str(spec.data_type_def))\n if spec.data_type_inc is not None and spec.inc_key() not in ignore_keys:\n extend_type = str(spec.data_type_inc)\n spec_prop_list.append('**Extends:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(extend_type),\n extend_type))\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('quantity', None) is not None and 'quantity' not in ignore_keys:\n spec_prop_list.append('**Quantity:** %s' % SpecToRST.quantity_to_string(spec['quantity']))\n if spec.get('dtype', None) is not None and 'dtype' not in ignore_keys:\n spec_prop_list.append('**Data Type:** %s' % SpecToRST.render_data_type(spec['dtype']))\n if spec.get('dims', None) is not None and 'dims' not in ignore_keys:\n spec_prop_list.append('**Dimensions:** %s' % str(spec['dims']))\n if spec.get('shape', None) is not None and 'shape' not in ignore_keys:\n spec_prop_list.append('**Shape:** %s' % str(spec['shape']))\n if spec.get('linkable', None) is not None and 'linnkable' not in ignore_keys:\n spec_prop_list.append('**Linkable:** %s' % str(spec['linkable']))\n # Add group properties\n if isinstance(spec, GroupSpec):\n if spec.data_type_def is not None and spec.def_key() not in ignore_keys:\n ntype = str(spec.data_type_def)\n spec_prop_list.append('**Neurodata Type:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(ntype),\n ntype))\n if spec.data_type_inc is not None and spec.inc_key() not in ignore_keys:\n extend_type = str(spec.data_type_inc)\n spec_prop_list.append('**Extends:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(extend_type),\n extend_type))\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('quantity', None) is not None and 'quantity' not in ignore_keys:\n spec_prop_list.append('**Quantity:** %s' % SpecToRST.quantity_to_string(spec['quantity']))\n if spec.get('linkable', None) is not None and 'linkable' not in ignore_keys:\n spec_prop_list.append('**Linkable:** %s' % str(spec['linkable']))\n # Add attribute spec properites\n if isinstance(spec, AttributeSpec):\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('dtype', None) is not None and 'dtype' not in ignore_keys:\n spec_prop_list.append('**Data Type:** %s' % SpecToRST.render_data_type(spec['dtype']))\n if spec.get('dims', None) is not None and 'dims' not in ignore_keys:\n spec_prop_list.append('**Dimensions:** %s' % str(spec['dims']))\n if spec.get('shape', None) is not None and 'shape' not in ignore_keys:\n spec_prop_list.append('**Shape:** %s' % str(spec['shape']))\n if spec.get('required', None) is not None and 'required' not in ignore_keys:\n spec_prop_list.append('**Required:** %s' % str(spec['required']))\n if spec.get('value', None) is not None and 'value' not in ignore_keys:\n spec_prop_list.append('**Value:** %s' % str(spec['value']))\n if spec.get('default_value', None) is not None and 'default_value' not in ignore_keys:\n spec_prop_list.append('**Default Value:** %s' % str(spec['default_value']))\n\n # Add common properties\n if spec.get('default_name', None) is not None:\n spec_prop_list.append('**Default Name:** %s' % str(spec['default_name']))\n if spec.get('name', None) is not None:\n spec_prop_list.append('**Name:** %s' % str(spec['name']))\n\n # Add custom items if necessary\n if append_items is not None:\n spec_prop_list += append_items\n\n # Render the specification properties list\n spec_doc = ''\n if len(spec_prop_list) > 0:\n spec_doc += newline\n for dp in spec_prop_list:\n spec_doc += newline + '- ' + dp\n spec_doc += newline\n # Return the rendered list\n return spec_doc", "def __init__(self, spec):\n self.spec = spec", "def _create_property_field(property_, alias_dictionary):\n name_for_methods = property_['name_for_methods']\n\n assert property_['default_value'] is not None, \\\n ('MakeComputedStyleBase requires an default value for all fields, none specified '\n 'for property ' + property_['name'])\n\n if property_['field_template'] in alias_dictionary:\n alias_template = property_['field_template']\n for field in alias_dictionary[alias_template]:\n if field != 'name':\n property_[field] = alias_dictionary[alias_template][field]\n\n if property_['field_template'] == 'keyword':\n type_name = property_['type_name']\n default_value = type_name + '::' + enum_value_name(property_['default_value'])\n assert property_['field_size'] is None, \\\n (\"'\" + property_['name'] + \"' is a keyword field, \"\n \"so it should not specify a field_size\")\n size = int(math.ceil(math.log(len(property_['keywords']), 2)))\n elif property_['field_template'] == 'multi_keyword':\n type_name = property_['type_name']\n default_value = type_name + '::' + enum_value_name(property_['default_value'])\n size = len(property_['keywords']) - 1 # Subtract 1 for 'none' keyword\n elif property_['field_template'] == 'external':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = None\n elif property_['field_template'] == 'primitive':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = 1 if type_name == 'bool' else property_[\"field_size\"] # pack bools with 1 bit.\n elif property_['field_template'] == 'pointer':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = None\n else:\n assert property_['field_template'] == 'monotonic_flag', \"Please put a valid value for field_template\"\n type_name = 'bool'\n default_value = 'false'\n size = 1\n\n if property_['wrapper_pointer_name']:\n assert property_['field_template'] in ['pointer', 'external']\n if property_['field_template'] == 'external':\n type_name = '{}<{}>'.format(property_['wrapper_pointer_name'], type_name)\n\n return Field(\n 'property',\n name_for_methods,\n property_name=property_['name'],\n inherited=property_['inherited'],\n independent=property_['independent'],\n type_name=type_name,\n wrapper_pointer_name=property_['wrapper_pointer_name'],\n field_template=property_['field_template'],\n size=size,\n default_value=default_value,\n custom_copy=property_['custom_copy'],\n custom_compare=property_['custom_compare'],\n mutable=property_['mutable'],\n getter_method_name=property_['getter'],\n setter_method_name=property_['setter'],\n initial_method_name=property_['initial'],\n computed_style_custom_functions=property_['computed_style_custom_functions'],\n )", "def _makeProperty( key, value ):\r\n property = PropertyValue()\r\n property.Name = key\r\n property.Value = value\r\n return property", "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def create_property(self, key, prop):\n\n setting = self.new_property(key, prop)\n setting.create()\n return setting", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['transform_fn'] = self.transform_or_spec\n if self._also_track_spec is not None:\n specs['also_track'] = self._also_track_spec\n return specs", "def build(self):\n return self.hyperparams.items()", "def __init__(self, property_id=None, name=None, address=None, ratings=None, location=None, phone=None, fax=None, category=None, business_model=None, rank=None, checkin=None, checkout=None, fees=None, policies=None, attributes=None, amenities=None, images=None, onsite_payments=None, rooms=None, rates=None, dates=None, descriptions=None, statistics=None, airports=None, registry_number=None, themes=None, all_inclusive=None, tax_id=None, chain=None, brand=None, spoken_languages=None, multi_unit=None): # noqa: E501 # noqa: E501\n\n self._property_id = None\n self._name = None\n self._address = None\n self._ratings = None\n self._location = None\n self._phone = None\n self._fax = None\n self._category = None\n self._business_model = None\n self._rank = None\n self._checkin = None\n self._checkout = None\n self._fees = None\n self._policies = None\n self._attributes = None\n self._amenities = None\n self._images = None\n self._onsite_payments = None\n self._rooms = None\n self._rates = None\n self._dates = None\n self._descriptions = None\n self._statistics = None\n self._airports = None\n self._registry_number = None\n self._themes = None\n self._all_inclusive = None\n self._tax_id = None\n self._chain = None\n self._brand = None\n self._spoken_languages = None\n self._multi_unit = None\n self.discriminator = None\n\n if property_id is not None:\n self.property_id = property_id\n if name is not None:\n self.name = name\n if address is not None:\n self.address = address\n if ratings is not None:\n self.ratings = ratings\n if location is not None:\n self.location = location\n if phone is not None:\n self.phone = phone\n if fax is not None:\n self.fax = fax\n if category is not None:\n self.category = category\n if business_model is not None:\n self.business_model = business_model\n if rank is not None:\n self.rank = rank\n if checkin is not None:\n self.checkin = checkin\n if checkout is not None:\n self.checkout = checkout\n if fees is not None:\n self.fees = fees\n if policies is not None:\n self.policies = policies\n if attributes is not None:\n self.attributes = attributes\n if amenities is not None:\n self.amenities = amenities\n if images is not None:\n self.images = images\n if onsite_payments is not None:\n self.onsite_payments = onsite_payments\n if rooms is not None:\n self.rooms = rooms\n if rates is not None:\n self.rates = rates\n if dates is not None:\n self.dates = dates\n if descriptions is not None:\n self.descriptions = descriptions\n if statistics is not None:\n self.statistics = statistics\n if airports is not None:\n self.airports = airports\n if registry_number is not None:\n self.registry_number = registry_number\n if themes is not None:\n self.themes = themes\n if all_inclusive is not None:\n self.all_inclusive = all_inclusive\n if tax_id is not None:\n self.tax_id = tax_id\n if chain is not None:\n self.chain = chain\n if brand is not None:\n self.brand = brand\n if spoken_languages is not None:\n self.spoken_languages = spoken_languages\n if multi_unit is not None:\n self.multi_unit = multi_unit", "def test_build_property(self):\n v1 = versions.Version(version='1.2.3.4', name='foo')\n expected = 4\n\n self.assertEqual(v1.build, expected)", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['bijector'] = self.transform_or_spec\n return specs", "def __init__(self, value):\n if isinstance(value, bool):\n ptr = self.ffi.chfl_property_bool(c_bool(value))\n elif isinstance(value, (float, int)):\n ptr = self.ffi.chfl_property_double(c_double(value))\n elif isinstance(value, str):\n ptr = self.ffi.chfl_property_string(value.encode(\"utf8\"))\n elif _is_vector3d(value):\n value = chfl_vector3d(value[0], value[1], value[2])\n ptr = self.ffi.chfl_property_vector3d(value)\n else:\n raise ChemfilesError(\n f\"can not create a Property with a value of type '{type(value)}'\"\n )\n\n super(Property, self).__init__(ptr, is_const=False)", "def get_specification(self) -> Dict:\n specification = {\n 'version': VERSION,\n 'metadata': {\n 'twoColumn': True,\n 'layout': self.autolayout\n },\n 'nodes': []\n }\n\n def strip_io(io_list: list, direction) -> list:\n \"\"\"\n Strips every input/output from metadata and leaves only\n `name` and `type` keys.\n \"\"\"\n return [\n {\n 'name': io['name'],\n 'type': io['type'],\n 'direction': direction\n }\n for io in io_list\n ]\n\n toremove = set()\n for key, node in self.nodes.items():\n try:\n node_cls = load_class(node.cls_name)\n except (ModuleNotFoundError, ImportError, Exception) as err:\n msg = f'Could not add {node_cls}. Reason:'\n _LOGGER.warn('-' * len(msg))\n _LOGGER.warn(msg)\n _LOGGER.warn(err)\n _LOGGER.warn('-' * len(msg))\n toremove.add(key)\n continue\n parameterschema = node_cls.form_parameterschema()\n\n properties = []\n for name, props in parameterschema['properties'].items():\n new_property = {'name': name}\n\n if 'default' in props:\n new_property['default'] = props['default']\n\n if 'description' in props:\n new_property['description'] = props['description']\n\n def add_default(default_val):\n if new_property.get('default') is None:\n new_property['default'] = default_val\n\n # Case for an input with range defined\n if 'enum' in props:\n new_property['type'] = 'select'\n new_property['values'] = list(map(str, props['enum']))\n add_default(new_property['values'][0])\n # Case for a single value input\n elif 'type' in props:\n if 'array' in props['type']:\n new_property['type'] = 'list'\n if 'items' in props and 'type' in props['items']:\n dtype = props['items']['type']\n new_property['dtype'] = dtype\n add_default([])\n elif 'boolean' in props['type']:\n new_property['type'] = 'checkbox'\n add_default(False)\n elif 'string' in props['type']:\n new_property['type'] = 'text'\n add_default('')\n elif 'integer' in props['type']:\n new_property['type'] = 'integer'\n add_default(0)\n elif 'number' in props['type']:\n new_property['type'] = 'number'\n add_default(0)\n elif 'object' in props['type']:\n # Object arguments should be defined in specification\n # as node inputs, rather than properties\n new_property = None\n else:\n new_property['type'] = 'text'\n add_default('')\n # If no type is specified then text is used\n else:\n new_property['type'] = 'text'\n add_default('')\n\n if new_property is not None:\n properties.append(new_property)\n\n specification['nodes'].append({\n 'name': node.name,\n 'type': node.type,\n 'category': node.category,\n 'properties': properties,\n 'interfaces': strip_io(\n self.io_mapping[node.type]['inputs'],\n 'input'\n ) + strip_io(\n self.io_mapping[node.type]['outputs'],\n 'output'\n )\n })\n\n for key in toremove:\n del self.nodes[key]\n return specification", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def build():", "def __init__(self):\n self.properties = {}", "def build_specfile_sections(spec):\n str = \"\"\n\n mandatory_sections = {\n 'DESCRIPTION' : '\\n%%description\\n%s\\n\\n', }\n\n str = str + SimpleTagCompiler(mandatory_sections).compile( spec )\n\n optional_sections = {\n 'DESCRIPTION_' : '%%description -l %s\\n%s\\n\\n',\n 'CHANGELOG' : '%%changelog\\n%s\\n\\n',\n 'X_RPM_PREINSTALL' : '%%pre\\n%s\\n\\n',\n 'X_RPM_POSTINSTALL' : '%%post\\n%s\\n\\n',\n 'X_RPM_PREUNINSTALL' : '%%preun\\n%s\\n\\n',\n 'X_RPM_POSTUNINSTALL' : '%%postun\\n%s\\n\\n',\n 'X_RPM_VERIFY' : '%%verify\\n%s\\n\\n',\n\n # These are for internal use but could possibly be overridden\n 'X_RPM_PREP' : '%%prep\\n%s\\n\\n',\n 'X_RPM_BUILD' : '%%build\\n%s\\n\\n',\n 'X_RPM_INSTALL' : '%%install\\n%s\\n\\n',\n 'X_RPM_CLEAN' : '%%clean\\n%s\\n\\n',\n }\n\n # Default prep, build, install and clean rules\n # TODO: optimize those build steps, to not compile the project a second time\n if 'X_RPM_PREP' not in spec:\n spec['X_RPM_PREP'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"' + '\\n%setup -q'\n\n if 'X_RPM_BUILD' not in spec:\n spec['X_RPM_BUILD'] = '[ ! -e \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && mkdir \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_INSTALL' not in spec:\n spec['X_RPM_INSTALL'] = 'scons --install-sandbox=\"$RPM_BUILD_ROOT\" \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_CLEAN' not in spec:\n spec['X_RPM_CLEAN'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"'\n\n str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )\n\n return str", "def __init__(self, pathspec, properties={}):\n import numpy\n self.pathspec = pathspec\n super(ArraySpec,self).__init__(numpy.ndarray)\n self.properties = OrderedDict(properties)", "def __init__(self, properties):\n self.attributes = {}\n self.output_info = {}\n for key, node in properties.walk():\n self.attributes[key[1]] = node.get_value().strip(\" '\")", "def get_properties():", "def assign_build_props(self, name, dependencies, worker=False):\n props = self.set_properties.copy()\n props[\"virtual_builder_name\"] = name\n props[\"package\"] = name\n props[\"dependencies\"] = dependencies\n if worker:\n props[\"workername\"] = worker\n return [\"build\", props]", "def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()", "def _determine_properties(self, paramdict):\n for var in paramdict:\n if is_dimensionless(paramdict[var]):\n self._all_params_unit[var] = \"none\"\n yield lems.Property(var, \"none\")\n else:\n dim = _determine_dimension(paramdict[var])\n self._all_params_unit[var] = dim\n yield lems.Property(var, dim)", "def build_specfile_header(spec):\n str = \"\"\n\n # first the mandatory sections\n mandatory_header_fields = {\n 'NAME' : '%%define name %s\\nName: %%{name}\\n',\n 'VERSION' : '%%define version %s\\nVersion: %%{version}\\n',\n 'PACKAGEVERSION' : '%%define release %s\\nRelease: %%{release}\\n',\n 'X_RPM_GROUP' : 'Group: %s\\n',\n 'SUMMARY' : 'Summary: %s\\n',\n 'LICENSE' : 'License: %s\\n',\n }\n\n str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )\n\n # now the optional tags\n optional_header_fields = {\n 'VENDOR' : 'Vendor: %s\\n',\n 'X_RPM_URL' : 'Url: %s\\n',\n 'SOURCE_URL' : 'Source: %s\\n',\n 'SUMMARY_' : 'Summary(%s): %s\\n',\n 'ARCHITECTURE' : 'BuildArch: %s\\n',\n 'X_RPM_DISTRIBUTION' : 'Distribution: %s\\n',\n 'X_RPM_ICON' : 'Icon: %s\\n',\n 'X_RPM_PACKAGER' : 'Packager: %s\\n',\n 'X_RPM_GROUP_' : 'Group(%s): %s\\n',\n\n 'X_RPM_REQUIRES' : 'Requires: %s\\n',\n 'X_RPM_PROVIDES' : 'Provides: %s\\n',\n 'X_RPM_CONFLICTS' : 'Conflicts: %s\\n',\n 'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\\n',\n\n 'X_RPM_SERIAL' : 'Serial: %s\\n',\n 'X_RPM_EPOCH' : 'Epoch: %s\\n',\n 'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\\n',\n 'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\\n',\n 'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\\n',\n 'X_RPM_PREFIX' : 'Prefix: %s\\n',\n\n # internal use\n 'X_RPM_BUILDROOT' : 'BuildRoot: %s\\n',\n }\n\n # fill in default values:\n # Adding a BuildRequires renders the .rpm unbuildable under systems which\n # are not managed by rpm, since the database to resolve this dependency is\n # missing (take Gentoo as an example)\n #if 'X_RPM_BUILDREQUIRES' not in spec:\n # spec['X_RPM_BUILDREQUIRES'] = 'scons'\n\n if 'X_RPM_BUILDROOT' not in spec:\n spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'\n\n str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )\n\n # Add any extra specfile definitions the user may have supplied.\n # These flags get no processing, they are just added.\n # github #3164: if we don't turn off debug package generation\n # the tests which build packages all fail. If there are no\n # extra flags, default to adding this one. If the user wants\n # to turn this back on, supply the flag set to None.\n\n if 'X_RPM_EXTRADEFS' not in spec:\n spec['X_RPM_EXTRADEFS'] = ['%global debug_package %{nil}']\n for extra in spec['X_RPM_EXTRADEFS']:\n str += extra + '\\n'\n\n return str", "def build_property(value_token: ValueToken) -> property:\n def caller(_: Any) -> Any:\n return value_token.get_value()\n return property(caller)", "def __init__(self, jsondict=None, strict=True):\n \n self.type = None\n \"\"\" Code that specifies the property DeviceDefinitionPropetyCode\n (Extensible).\n Type `CodeableConcept` (represented as `dict` in JSON). \"\"\"\n \n self.valueCode = None\n \"\"\" Property value as a code, e.g., NTP4 (synced to NTP).\n List of `CodeableConcept` items (represented as `dict` in JSON). \"\"\"\n \n self.valueQuantity = None\n \"\"\" Property value as a quantity.\n List of `Quantity` items (represented as `dict` in JSON). \"\"\"\n \n super(DeviceDefinitionProperty, self).__init__(jsondict=jsondict, strict=strict)", "def test_01_BuildObjects(self):\n # print(PrettyFormatAny.form(self.m_pyhouse_obj._Config, 'A3-01-A - Config', 190))\n # print(__file__)\n # print(PrettyFormatAny.form(self.m_pyhouse_obj._Config.YamlTree, 'Location', 190))\n # self.assertEqual(self.m_pyhouse_obj._Config.YamlConfigDir, '/etc/pyhouse/')", "def _buildProcVars (self):\n\t\talias = {val:key for key, val in proc.ALIAS.iteritems()}\n\t\tfor prop in sorted(self.props.keys()):\n\t\t\tval = self.props[prop]\n\t\t\tif not prop in ['id', 'tag', 'tmpdir', 'forks', 'cache', 'workdir', 'echo', 'runner',\n\t\t\t\t\t\t\t'errorhow', 'errorntry', 'defaultSh', 'exportdir', 'exporthow', 'exportow',\n\t\t\t\t\t\t\t'indir', 'outdir', 'length', 'args']:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif prop == 'args':\n\t\t\t\tself.props['procvars']['proc.args'] = val\n\t\t\t\tfor k, v in val.iteritems():\n\t\t\t\t\tself.props['procvars']['proc.args.' + k] = v\n\t\t\t\t\tself.log('%s => %s' % (k, v), 'info', 'p.args')\n\t\t\telse:\n\t\t\t\tself.props['procvars']['proc.' + prop] = val\n\t\t\t\tif alias.has_key (prop): \n\t\t\t\t\tself.props['procvars']['proc.' + alias[prop]] = val\n\t\t\t\t\tself.log ('%s (%s) => %s' % (prop, alias[prop], val), 'info', 'p.props')\n\t\t\t\telse:\n\t\t\t\t\tself.log ('%s => %s' % (prop, val), 'info', 'p.props')", "def get_generic_walker_properties(self, walker):\n etree.SubElement(walker, \"ParameterDeclarations\")\n bounding_box = etree.SubElement(walker, \"BoundingBox\")\n boundbox_center = etree.SubElement(bounding_box, \"Center\")\n boundbox_center.set(\"x\", \"1.5\")\n boundbox_center.set(\"y\", \"0.0\")\n boundbox_center.set(\"z\", \"0.9\")\n boundbox_dimemsion = etree.SubElement(bounding_box, \"Dimensions\")\n boundbox_dimemsion.set(\"width\", \"1.0\")\n boundbox_dimemsion.set(\"length\", \"1.0\")\n boundbox_dimemsion.set(\"height\", \"1.8\")\n properties_group = etree.SubElement(walker, \"Properties\")\n properties = etree.SubElement(properties_group, \"Property\")\n properties.set(\"name\", \"type\")\n properties.set(\"value\", \"simulation\")", "def test_01_BuildObjects(self):\n # print(PrettyFormatAny.form(self.m_pyhouse_obj, 'A1-01-A - Main', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'A1-02-B - House', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'A1-01-C - Location', 190))\n self.assertIsInstance(self.m_pyhouse_obj, PyHouseInformation)\n self.assertIsInstance(self.m_pyhouse_obj.House, HouseInformation)\n self.assertIsInstance(self.m_pyhouse_obj.House.Location, LocationInformationPrivate)", "def write_properties(self, prop_filename):\n # Collect list of all keys in self.plats that have True values,\n # but change \"windows\" to \"win64\" because build-sanity is annoying.\n sanity_plats = [\n (x if x != \"windows\" else \"win64\")\n for x in self.plats.keys() if self.plats[x]\n ]\n with open(prop_filename, \"w\") as prop:\n prop.write(\"CURRENT_BUILD_NUMBER={}\\n\".format(self.bld_num))\n prop.write(\"VERSION={}\\n\".format(self.version))\n prop.write(\"DISTROS={}\\n\".format(\" \".join(sanity_plats)))\n prop.write(\"TESTRUNNER_BRANCH={}\\n\".format(self.testrunner_branch))\n if self.use_magma:\n prop.write(\"EXTRA_TEST_PARAMS={}\\n\".format(\"bucket_storage=magma\"))", "def __init__(self, sheet_type, properties):\n super(SheetSpec,self).__init__(sheet_type)\n\n if 'level' not in properties:\n raise Exception(\"SheetSpec always requires 'level' property.\")\n\n\n properties = [(k, properties[k]) for k in self.name_ordering\n if k in properties]\n\n self.sheet_type = sheet_type\n self.properties = OrderedDict(properties)", "def _gen_polarion_property_file(test_attrs, test_attrs_values,\n test_run, test_case_id,\n property_file=None):\n test_keys = [\"polarion-testcase-id\"] * len(test_case_id)\n properties_mapping = OrderedDict()\n properties_mapping[\"properties\"] = {key: value for key, value in\n zip(test_attrs, test_attrs_values)\n if value is not None\n }\n properties_mapping[\"casemap\"] = {\n test_run: [\n {key: value} for key, value in zip(test_keys, test_case_id)\n ]\n }\n\n if property_file is None:\n property_file = \"/tmp/{}.json\".format(test_run)\n\n with open(property_file, 'w') as prop_file:\n dump(properties_mapping, prop_file, sort_keys=False, indent=1)\n\n return property_file", "def build(self) -> None:", "def __init__(self, p_description: str, property_name: str):\n self._p_description = p_description\n self._property_name = property_name\n self._property_data = None\n self._vectorized_data = None\n self._categories = None", "def make_pod_spec(self):\n spec = {\n 'containers': [{\n 'name': self.framework.model.app.name,\n 'imageDetails': {\n },\n 'ports': [{\n 'containerPort':\n self.framework.model.config['advertised-port'],\n 'protocol': 'TCP',\n }],\n }],\n }\n return spec", "def __init__(self, property_name='', *protocol_ids):\n\n self._full_path = ''\n\n if len(property_name) > 0 or len(protocol_ids) > 0:\n self._from_components(property_name, *protocol_ids)\n\n else:\n self._full_path = '{}'.format(ProtocolPath.property_separator)", "def build_obj_spec(\n obj_key,\n parameter_dict,\n experiment_name=None,\n obj_type=\"policy\",\n output=\"./policy_yamls/\",\n):\n # Set name via timestamp if not specified\n if obj_type not in [\"policy\", \"estimator\", \"dataset\"]:\n print(\"Invalid type: {}\".format(obj_type))\n return None\n\n if experiment_name == None:\n now = datetime.now()\n current_time = now.strftime(\"%H%M%S\")\n experiment_name = \"experiment_{}\".format(current_time)\n\n # Build dict structure\n obj_dict = {\n \"name\": experiment_name,\n \"type\": obj_type,\n \"key\": obj_key,\n \"parameters\": parameter_dict,\n }\n\n # Set output folder\n output_folder = os.path.join(output, experiment_name)\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n with open(\n os.path.join(output_folder, \"{}_spec.yaml\".format(obj_type)), \"w\"\n ) as file:\n yaml.dump(obj_dict, file)\n\n return obj_dict", "def properties(self):", "def properties(self):", "def properties(self):", "def get_model_with_properties():\n \n m = ConcreteModel()\n\n # ------------------------------------------------------------------\n # Data\n # ------------------------------------------------------------------\n\n m.np = 25 # Number of possible tays\n m.c = 4 # Number of components\n m.lc = 1 # Light component\n m.hc = 4 # Heavy component\n\n #### Constant parameters\n m.Rgas = 8.314 # Ideal gas constant in J/mol K\n m.Tref = 298.15 # Reference temperature in K\n\n #### Product specifications\n m.xspec_lc = 0.99 # Final liquid composition for methanol (1)\n m.xspec_hc = 0.99 # Fnal liquid composition for butanol (4)\n m.xspec_inter2 = 0.99 # Final liquid composition for ethanol (2)\n m.xspec_inter3 = 0.99 # Final liquid composition for propanol (3)\n m.Ddes = 50 # Final flowrate in distillate in mol/s\n m.Bdes = 50 # Final flowrate in bottoms in mol/s\n m.Sdes = 50 # Final flowrate in side product streams in mol/s\n\n # #### Known initial values\n m.Fi = m.Ddes + m.Bdes + 2 * m.Sdes # Side feed flowrate in mol/s\n m.Vi = 400 # Initial value for vapor flowrate in mol/s\n m.Li = 400 # Initial value for liquid flowrate in mol/s\n\n m.Tf = 358 # Side feed temperature in K\n\n m.Preb = 1.2 # Reboiler pressure in bar\n m.Pbot = 1.12 # Bottom-most tray pressure in bar\n m.Ptop = 1.08 # Top-most tray pressure in bar\n m.Pcon = 1.05 # Condenser pressure in bar\n m.Pf = 1.02\n\n m.rr0 = 0.893 # Internal reflux ratio initial value\n m.bu0 = 0.871 # Internal reflux ratio initial value\n\n\n #### Scaling factors\n m.Hscale = 1e3 \n m.Qscale = 1e-3 \n\n \n #### Constants for the calculation of liquid heat capacity\n m.cpc = {} # Constant 1 for liquid heat capacity \n m.cpc2 = {} # Constant 2 for liquid heat capacity \n m.cpc[1] = m.Rgas \n m.cpc[2] = 1\n m.cpc2['A', 1] = 1 / 100\n m.cpc2['B', 1] = 1 / 1e4\n m.cpc2['A', 2] = 1\n m.cpc2['B', 2] = 1\n\n\n # ------------------------------------------------------------------\n # Physical Properties\n #\n # Notation:\n # MW ........................ molecular weight in g/gmol\n # TB ........................ boiling point temperature in K\n # TC ........................ critical temperature in K\n # PC ........................ critical pressure in bar\n # w ........................ acentric factor\n # lden ...................... liquid density g/m3,\n # dHvap ..................... heat of vaporization in J/mol.\n # vpA, vpB, vpC, and vpD .... vapor pressure constants\n # cpA, cpB, cpC, and cpD .... heat capacity constants J/mol:\n # 1 for liq and 2 for vapor phase\n #\n # Reference A: R.C. Reid, J.M. Prausnitz and B.E. Poling,\n # \"The Properties of gases and liquids\", 1987 and 2004 Eds.\n #\n # ------------------------------------------------------------------\n\n m.prop = {} # Properties of components:\n cpL = {} # Ruczika-D method for liquid heat capacity calculation\n # (Reference A, page 6.20)\n sumA = {}\n sumB = {}\n sumC = {}\n cpL['a', 'C(H3)(C)'] = 4.19845\n cpL['b', 'C(H3)(C)'] = -0.312709\n cpL['c', 'C(H3)(C)'] = 0.178609\n cpL['a', 'C(H2)(C2)'] = 2.7345\n cpL['b', 'C(H2)(C2)'] = 0.122732\n cpL['c', 'C(H2)(C2)'] = -0.123482\n cpL['a', 'C(H2)(C)(O)'] = 0.517007\n cpL['b', 'C(H2)(C)(O)'] = 1.26631\n cpL['c', 'C(H2)(C)(O)'] = -0.0939713\n cpL['a', 'O(H)(C)'] = 16.1555\n cpL['b', 'O(H)(C)'] = -11.938\n cpL['c', 'O(H)(C)'] = 2.85117\n cpL['a', 'C(H3)(O)'] = 3.70344\n cpL['b', 'C(H3)(O)'] = -1.12884\n cpL['c', 'C(H3)(O)'] = 0.51239\n sumA[1] = (cpL['a', 'C(H3)(O)']\n + cpL['a', 'O(H)(C)']) \n sumB[1] = (cpL['b', 'C(H3)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[1] = (cpL['c', 'C(H3)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[2] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[2] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[2] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[3] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[3] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[3] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[4] = (cpL['a', 'C(H3)(C)']\n + 2 * cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[4] = (cpL['b', 'C(H3)(C)']\n + 2 * cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[4] = (cpL['c', 'C(H3)(C)']\n + 2 * cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n\n ## Methanol: component 1\n m.prop[1, 'MW'] = 32.042\n m.prop[1, 'TB'] = 337.7\n m.prop[1, 'TC'] = 512.6\n m.prop[1, 'PC'] = 80.9\n m.prop[1, 'w'] = 0.556\n m.prop[1, 'lden'] = 792e3\n m.prop[1, 'dHvap'] = 38.376e3\n m.prop[1, 'vpA'] = -8.54796\n m.prop[1, 'vpB'] = 0.76982\n m.prop[1, 'vpC'] = -3.10850\n m.prop[1, 'vpD'] = 1.54481\n m.prop[1, 'cpA', 1] = sumA[1]\n m.prop[1, 'cpB', 1] = sumB[1]\n m.prop[1, 'cpC', 1] = sumC[1]\n m.prop[1, 'cpD', 1] = 0\n m.prop[1, 'cpA', 2] = 2.115e1\n m.prop[1, 'cpB', 2] = 7.092e-2\n m.prop[1, 'cpC', 2] = 2.587e-5\n m.prop[1, 'cpD', 2] = -2.852e-8\n\n\n ## Ethanol: component 2\n m.prop[2, 'MW'] = 46.069\n m.prop[2, 'TB'] = 351.4\n m.prop[2, 'TC'] = 513.9\n m.prop[2, 'PC'] = 61.4\n m.prop[2, 'w'] = 0.644\n m.prop[2, 'lden'] = 789.3e3\n m.prop[2, 'dHvap'] = 42.698e3\n m.prop[2, 'vpA'] = -8.51838\n m.prop[2, 'vpB'] = 0.34163\n m.prop[2, 'vpC'] = -5.73683\n m.prop[2, 'vpD'] = 8.32581\n m.prop[2, 'cpA', 1] = sumA[2]\n m.prop[2, 'cpB', 1] = sumB[2]\n m.prop[2, 'cpC', 1] = sumC[2]\n m.prop[2, 'cpD', 1] = 0\n m.prop[2, 'cpA', 2] = 9.014\n m.prop[2, 'cpB', 2] = 2.141e-1\n m.prop[2, 'cpC', 2] = -8.390e-5\n m.prop[2, 'cpD', 2] = 1.373e-9\n\n\n ## Propanol: component 3\n m.prop[3, 'MW'] = 60.096\n m.prop[3, 'TB'] = 370.3\n m.prop[3, 'TC'] = 536.8\n m.prop[3, 'PC'] = 51.7\n m.prop[3, 'w'] = 0.623\n m.prop[3, 'lden'] = 804e3\n m.prop[3, 'dHvap'] = 47.763e3\n m.prop[3, 'vpA'] = -8.05594\n m.prop[3, 'vpB'] = 4.25183e-2\n m.prop[3, 'vpC'] = -7.51296\n m.prop[3, 'vpD'] = 6.89004\n m.prop[3, 'cpA', 1] = sumA[3]\n m.prop[3, 'cpB', 1] = sumB[3]\n m.prop[3, 'cpC', 1] = sumC[3]\n m.prop[3, 'cpD', 1] = 0\n m.prop[3, 'cpA', 2] = 2.47\n m.prop[3, 'cpB', 2] = 3.325e-1\n m.prop[3, 'cpC', 2] = -1.855e-4\n m.prop[3, 'cpD', 2] = 4.296e-8\n\n\n ## Butanol: component 4\n m.prop[4, 'MW'] = 74.123\n m.prop[4, 'TB'] = 390.9\n m.prop[4, 'TC'] = 563.1\n m.prop[4, 'PC'] = 44.2\n m.prop[4, 'w'] = 0.593\n m.prop[4, 'lden'] = 810e3\n m.prop[4, 'dHvap'] = 52.607e3\n m.prop[4, 'vpA'] = -8.00756\n m.prop[4, 'vpB'] = 0.53783\n m.prop[4, 'vpC'] = -9.34240\n m.prop[4, 'vpD'] = 6.68692\n m.prop[4, 'cpA', 1] = sumA[4]\n m.prop[4, 'cpB', 1] = sumB[4]\n m.prop[4, 'cpC', 1] = sumC[4]\n m.prop[4, 'cpD', 1] = 0\n m.prop[4, 'cpA', 2] = 3.266\n m.prop[4, 'cpB', 2] = 4.18e-1\n m.prop[4, 'cpC', 2] = -2.242e-4\n m.prop[4, 'cpD', 2] = 4.685e-8\n\n\n return m", "def set_properties(struct):", "def _build(self):", "def _build(self):", "def __init__(self, genus, species, properties=None):\n if properties is None:\n properties = []\n self.genus = genus\n self.species = species\n self.bin_nom = '{0} {1}'.format(genus.title(), species)\n self.load_locations = {}\n self.properties = properties", "def _create_properties_table(font, format, base):\n propstrings = bytearray()\n xlfd_props = create_xlfd_properties(font)\n xlfd_props['FONT'] = create_xlfd_name(xlfd_props)\n props = []\n props_struct = base.Struct(**_PROPS)\n for key, value in xlfd_props.items():\n prop = props_struct(\n name_offset=len(propstrings),\n isStringProp=isinstance(value, str),\n )\n propstrings += key.encode('ascii', 'replace') + b'\\0'\n if prop.isStringProp:\n prop.value = len(propstrings)\n value = from_quoted_string(value)\n propstrings += value.encode('ascii', 'replace') + b'\\0'\n else:\n prop.value = int(value)\n props.append(prop)\n table_bytes = (\n bytes(le.uint32(format))\n + bytes(base.uint32(len(props)))\n + bytes((props_struct * len(props))(*props))\n # pad to next int32 boundary\n + bytes(0 if len(props)&3 == 0 else 4-(len(props)&3))\n + bytes(base.uint32(len(propstrings)))\n + bytes(propstrings)\n )\n return table_bytes, format", "def test_property_cols():\n image_file = 'input/D00572501_z_c01_r3624p01_immasked.fits.fz'\n cat_file = 'input/D00572501_z_c01_r5473p01_piff.fits'\n psf_file = os.path.join('output','test_property_cols.piff')\n hsm_file = os.path.join('output','test_property_cols_hsm.fits')\n\n nstars = 25\n scale = 0.26\n size = 15\n order = 1\n stamp_size = 25\n\n config = {\n 'input' : {\n 'nstars': nstars,\n 'image_file_name' : image_file,\n 'image_hdu' : 1,\n 'weight_hdu' : 3,\n 'badpix_hdu' : 2,\n 'cat_file_name' : cat_file,\n 'x_col' : 'XWIN_IMAGE',\n 'y_col' : 'YWIN_IMAGE',\n 'sky_col' : 'BACKGROUND',\n 'stamp_size' : stamp_size,\n 'ra' : 'TELRA',\n 'dec' : 'TELDEC',\n 'gain' : 'GAINA',\n 'satur' : 'SATURATA',\n 'chipnum': 1,\n # Select ones with a variety of dtypes.\n 'property_cols' : ['SOURCE_ID', 'GI_COLOR', 'FLAGS', 'FLAG_COLOR', 'SPREAD_MODEL'],\n },\n 'select' : {\n 'type': 'Properties',\n 'where': 'np.abs(SPREAD_MODEL) < 3.e-4',\n\n 'reserve_frac' : 0.2,\n 'seed' : 1234,\n },\n 'psf' : {\n 'model' : {\n 'type' : 'PixelGrid',\n 'scale' : scale,\n 'size' : size,\n 'interp' : 'Lanczos(5)',\n },\n 'interp' : {\n 'type' : 'BasisPolynomial',\n 'order' : [1, 1, 1],\n 'keys': ['u', 'v', 'GI_COLOR'],\n },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats': [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n },\n ],\n },\n }\n\n piff.piffify(config)\n hsm = fitsio.read(hsm_file)\n cat = fitsio.read(cat_file)\n\n print('hsm dtype = ',hsm.dtype)\n print('cat dtype = ',cat.dtype)\n\n for key in hsm.dtype.names:\n print(key)\n if key in cat.dtype.names:\n assert hsm[key].dtype.type == cat[key].dtype.type\n elif key == 'reserve':\n assert hsm[key].dtype.type == np.dtype(bool).type\n elif key.startswith('flag'):\n assert hsm[key].dtype.type == np.dtype(int).type\n elif key == 'sky':\n # This one is read from the input catalog, but renamed\n assert hsm[key].dtype.type == np.float32\n else:\n assert hsm[key].dtype.type == np.dtype(float).type\n\n # Check that drawing the image works without specifying chipnum.\n # This is ok so long as the input is really only a single chip.\n # cf. Issue #140\n psf = piff.read(psf_file)\n im1 = psf.draw(35, 40, center=True, GI_COLOR=1)\n\n # If the input field didn't include chipnum, then it makes no difference for a single chip.\n del config['input']['chipnum']\n piff.piffify(config)\n psf = piff.read(psf_file)\n im2 = psf.draw(35, 40, center=True, GI_COLOR=1)\n assert im1 == im2", "def make_cake_spec():\n ###############################################################################################\n # Templates\n tmpl = make_cake_templates()\n\n ###############################################################################################\n # Objects\n cake = MaterialSpec(\n name=\"Abstract Cake\",\n template=tmpl[\"Dessert\"],\n process=ProcessSpec(\n name='Icing, in General',\n template=tmpl[\"Icing\"],\n tags=[\n 'spreading'\n ],\n notes='The act of covering a baked output with frosting'\n ),\n file_links=FileLink(\n filename=\"Becky's Butter Cake\",\n url='https://www.landolakes.com/recipe/16730/becky-s-butter-cake/'\n ),\n tags=[\n 'cake::butter cake',\n 'dessert::baked::cake',\n 'iced::chocolate'\n ],\n notes='Butter cake recipe reminiscent of the 1-2-3-4 cake that Grandma may have baked.'\n )\n\n ########################\n frosting = MaterialSpec(\n name=\"Abstract Frosting\",\n template=tmpl[\"Dessert\"],\n process=ProcessSpec(\n name='Mixing Frosting, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining ingredients to make a sweet frosting'\n ),\n tags=[\n 'frosting::chocolate',\n 'topping::chocolate'\n ],\n notes='Chocolate frosting'\n )\n IngredientSpec(\n name=\"{} input\".format(frosting.name),\n tags=list(frosting.tags),\n notes='Seems like a lot of frosting',\n labels=['coating'],\n process=cake.process,\n material=frosting,\n absolute_quantity=NominalReal(nominal=0.751, units='kg')\n )\n\n baked_cake = MaterialSpec(\n name=\"Abstract Baked Cake\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Baking, in General',\n template=tmpl[\"Baking in an oven\"],\n tags=[\n 'oven::baking'\n ],\n notes='Using heat to convert batter into a solid matrix'\n ),\n tags=[\n ],\n notes='The cakey part of the cake'\n )\n IngredientSpec(\n name=\"{} input\".format(baked_cake.name),\n tags=list(baked_cake.tags),\n labels=['substrate'],\n process=cake.process,\n material=baked_cake\n )\n\n ########################\n batter = MaterialSpec(\n name=\"Abstract Batter\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Batter, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The fluid that converts to cake with heat'\n )\n IngredientSpec(\n name=\"{} input\".format(batter.name),\n tags=list(batter.tags),\n labels=['precursor'],\n process=baked_cake.process,\n material=batter\n )\n\n ########################\n wetmix = MaterialSpec(\n name=\"Abstract Wet Mix\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Wet, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining wet ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The wet fraction of a batter'\n )\n IngredientSpec(\n name=\"{} input\".format(wetmix.name),\n tags=list(wetmix.tags),\n labels=['wet'],\n process=batter.process,\n material=wetmix\n )\n\n drymix = MaterialSpec(\n name=\"Abstract Dry Mix\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Dry, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining dry ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The dry fraction of a batter'\n )\n IngredientSpec(\n name=\"{} input\".format(drymix.name),\n tags=list(drymix.tags),\n labels=['dry'],\n process=batter.process,\n material=drymix,\n absolute_quantity=NominalReal(nominal=3.052, units='cups')\n )\n\n ########################\n flour = MaterialSpec(\n name=\"Abstract Flour\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Flour, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing all purpose flour'\n ),\n tags=[\n ],\n notes='All-purpose flour'\n )\n IngredientSpec(\n name=\"{} input\".format(flour.name),\n tags=list(flour.tags),\n labels=['dry'],\n process=drymix.process,\n material=flour,\n volume_fraction=NominalReal(nominal=0.9829, units='') # 3 cups\n )\n\n baking_powder = MaterialSpec(\n name=\"Abstract Baking Powder\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Baking Powder, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing baking powder'\n ),\n tags=[\n ],\n notes='Leavening agent for cake'\n )\n IngredientSpec(\n name=\"{} input\".format(baking_powder.name),\n tags=list(baking_powder.tags),\n labels=['leavening', 'dry'],\n process=drymix.process,\n material=baking_powder,\n volume_fraction=NominalReal(nominal=0.0137, units='') # 2 teaspoons\n )\n\n salt = MaterialSpec(\n name=\"Abstract Salt\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Salt, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing salt'\n ),\n tags=[\n ],\n notes='Plain old NaCl'\n )\n IngredientSpec(\n name=\"{} input\".format(salt.name),\n tags=list(salt.tags),\n labels=['dry', 'seasoning'],\n process=drymix.process,\n material=salt,\n volume_fraction=NominalReal(nominal=0.0034, units='') # 1/2 teaspoon\n )\n\n sugar = MaterialSpec(\n name=\"Abstract Sugar\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Sugar, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing all purpose flour'\n ),\n tags=[\n ],\n notes='Sugar'\n )\n IngredientSpec(\n name=\"{} input\".format(sugar.name),\n tags=list(sugar.tags),\n labels=['wet', 'sweetener'],\n process=wetmix.process,\n material=sugar,\n absolute_quantity=NominalReal(nominal=2, units='cups')\n )\n\n butter = MaterialSpec(\n name=\"Abstract Butter\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Butter, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing butter'\n ),\n tags=[\n ],\n notes='Shortening for making rich, buttery baked goods'\n )\n IngredientSpec(\n name=\"{} input\".format(butter.name),\n tags=list(butter.tags),\n labels=['wet', 'shortening'],\n process=wetmix.process,\n material=butter,\n absolute_quantity=NominalReal(nominal=1, units='cups')\n )\n IngredientSpec(\n name=\"{} input\".format(butter.name),\n tags=list(butter.tags),\n labels=['shortening'],\n process=frosting.process,\n material=butter,\n mass_fraction=NominalReal(nominal=0.1434, units='') # 1/2 c @ 0.911 g/cc\n )\n\n eggs = MaterialSpec(\n name=\"Abstract Eggs\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Eggs, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing eggs'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(eggs.name),\n tags=list(eggs.tags),\n labels=['wet'],\n process=wetmix.process,\n material=eggs,\n absolute_quantity=NominalReal(nominal=4, units='')\n )\n\n vanilla = MaterialSpec(\n name=\"Abstract Vanilla\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Vanilla, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing vanilla'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(vanilla.name),\n tags=list(vanilla.tags),\n labels=['wet', 'flavoring'],\n process=wetmix.process,\n material=vanilla,\n absolute_quantity=NominalReal(nominal=2, units='teaspoons')\n )\n IngredientSpec(\n name=\"{} input\".format(vanilla.name),\n tags=list(vanilla.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=vanilla,\n mass_fraction=NominalReal(nominal=0.0231, units='') # 2 tsp @ 0.879 g/cc\n )\n\n milk = MaterialSpec(\n name=\"Abstract Milk\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Milk, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing milk'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(milk.name),\n tags=list(milk.tags),\n labels=['wet'],\n process=batter.process,\n material=milk,\n absolute_quantity=NominalReal(nominal=1, units='cup')\n )\n IngredientSpec(\n name=\"{} input\".format(milk.name),\n tags=list(milk.tags),\n labels=[],\n process=frosting.process,\n material=milk,\n mass_fraction=NominalReal(nominal=0.0816, units='') # 1/4 c @ 1.037 g/cc\n )\n\n chocolate = MaterialSpec(\n name=\"Abstract Chocolate\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Chocolate, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing chocolate'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(chocolate.name),\n tags=list(chocolate.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=chocolate,\n mass_fraction=NominalReal(nominal=0.1132, units='') # 3 oz.\n )\n\n powder_sugar = MaterialSpec(\n name=\"Abstract Powdered Sugar\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Powdered Sugar, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing powdered sugar'\n ),\n tags=[\n ],\n notes='Granulated sugar mixed with corn starch'\n )\n IngredientSpec(\n name=\"{} input\".format(powder_sugar.name),\n tags=list(powder_sugar.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=powder_sugar,\n mass_fraction=NominalReal(nominal=0.6387, units='') # 4 c @ 30 g/ 0.25 cups\n )\n return cake", "def test_properties_get(self):\n pass", "def init_prop(obj):\n if 'Test_object' not in obj:\n obj['Test_object'] = \"None\"\n if 'Test_ratio' not in obj:\n obj['Test_ratio'] = 1\n if 'Correct_color' not in obj:\n obj['Correct_color'] = 0, 1.0, 0\n if 'Wrong_color' not in obj:\n obj['Wrong_color'] = 1.0, 0, 0\n if 'TEST' not in obj:\n obj[\"TEST\"] = \"INACTIVE\"\n\n if 'Active_Dialogue' not in obj:\n obj['Active_Dialogue'] = {}\n\n if 'STORY_MODE' not in obj:\n obj['STORY_MODE'] = \"NORMAL\"\n\n if 'SOLVED' not in obj:\n obj['SOLVED'] = \"No\"\n\n if 'SLIDE' not in obj:\n obj['SLIDE'] = 0\n if 'ACTIVE' not in obj:\n obj['ACTIVE'] = None\n if 'TEST_MODE' not in obj:\n obj['TEST_MODE'] = \"Off\"\n #Set run speed\n if 'running' not in obj:\n obj['running'] = 20\n #Set jump force\n if 'jump_force' not in obj:\n obj['jump_force'] = 20\n #Toggles first person mode\n if 'view_mode' not in obj:\n obj['view_mode'] = 'THIRD_PERSON'\n #The fp thumbstick layout\n if 'thumbstick_layout' not in obj:\n obj['thumbstick_layout'] = 'DEFAULT' #can be DEFAULT, LEGACY, SOUTHPAW, or LEGACYSOUTHPAW\n #Look invert for fp_mode\n if 'look_invert' not in obj:\n #1 = not inverted, -1 = inverted\n obj['look_invert'] = 1\n #When Camera has reached its destined position\n if 'cam_set' not in obj:\n obj['cam_set'] = 'Off'\n if 'index' not in obj:\n obj['index'] = 0", "def test_list_properties(self):\n pass", "def __init__(self, *properties):\n self._properties = properties", "def generate_object_specs(self):\n return [[] for _ in xrange(self.batch_size)]", "def create(self, validated_data):\n new_spec = Specification(key = validated_data.get('key'),\n value = validated_data.get('value'),\n category = validated_data.get('category'),\n car = validated_data.get('car'),)\n new_spec.save()\n\n return new_spec", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def properties(self) -> Optional[pulumi.Input['CosmosDBSpecPropertiesArgs']]:\n return pulumi.get(self, \"properties\")", "def test_fields_from_property():\n prop_template = PropertyTemplate(name=\"cookie eating template\", bounds=IntegerBounds(0, 1000))\n cond_template = ConditionTemplate(name=\"Hunger template\",\n bounds=CategoricalBounds([\"hungry\", \"full\", \"peckish\"]))\n prop = Property(name=\"number of cookies eaten\",\n template=prop_template,\n origin='measured',\n value=NominalInteger(27))\n cond = Condition(name=\"hunger level\",\n template=cond_template,\n origin='specified',\n value=NominalCategorical(\"hungry\"))\n\n prop_and_conds = PropertyAndConditions(property=prop, conditions=[cond])\n assert prop_and_conds.name == prop.name\n assert prop_and_conds.template == prop.template\n assert prop_and_conds.origin == prop.origin\n assert prop_and_conds.value == prop.value", "def __init__(self, property_config_info_list=None, object_def_name=None, description=None, search_boost=None, owner_type=None, value_type=None, enum_values=None, namespace=None, editable_in_version=None, blob_mime_type=None, safe_name=None, owner_id=None, array=None, editable_in_microversion=None, id=None, name=None): # noqa: E501 # noqa: E501\n\n self._property_config_info_list = None\n self._object_def_name = None\n self._description = None\n self._search_boost = None\n self._owner_type = None\n self._value_type = None\n self._enum_values = None\n self._namespace = None\n self._editable_in_version = None\n self._blob_mime_type = None\n self._safe_name = None\n self._owner_id = None\n self._array = None\n self._editable_in_microversion = None\n self._id = None\n self._name = None\n self.discriminator = None\n\n if property_config_info_list is not None:\n self.property_config_info_list = property_config_info_list\n if object_def_name is not None:\n self.object_def_name = object_def_name\n if description is not None:\n self.description = description\n if search_boost is not None:\n self.search_boost = search_boost\n if owner_type is not None:\n self.owner_type = owner_type\n if value_type is not None:\n self.value_type = value_type\n if enum_values is not None:\n self.enum_values = enum_values\n if namespace is not None:\n self.namespace = namespace\n if editable_in_version is not None:\n self.editable_in_version = editable_in_version\n if blob_mime_type is not None:\n self.blob_mime_type = blob_mime_type\n if safe_name is not None:\n self.safe_name = safe_name\n if owner_id is not None:\n self.owner_id = owner_id\n if array is not None:\n self.array = array\n if editable_in_microversion is not None:\n self.editable_in_microversion = editable_in_microversion\n if id is not None:\n self.id = id\n if name is not None:\n self.name = name", "def to_spec(self) -> dict[str, typing.Any]:\n spec = {\n \"name\": self.name,\n \"title\": self.title,\n \"comment\": self.comment,\n \"references\": self.references,\n \"institution\": self.institution,\n \"hierarchical\": self.hierarchical,\n \"last_update\": self.last_update.isoformat(),\n }\n if self.version is not None:\n spec[\"version\"] = self.version\n categories = {}\n for cat in self.values():\n code, cat_spec = cat.to_spec()\n categories[code] = cat_spec\n spec[\"categories\"] = categories\n\n return spec", "def __init__(self, goal):\n self._name = goal.get('name', '')\n self._description = goal.get('description', '')\n self._build_type = goal.get('buildType', 'minSizeRel')\n self._build_vars = goal.get('buildVars', {})\n self._build_goal = goal.get('buildGoal', self._name)\n self._artifacts = goal.get('artifacts', [])\n self._builds = {}\n for b in goal['builds']:\n vars = b.get('buildVars', self._build_vars)\n type = b.get('buildType', self._build_type)\n build_goal = b.get('buildGoal', self._build_goal)\n description = b.get('description', '')\n arch = b['arch']\n script = b.get('script', None)\n artifacts = b.get('artifacts', self._artifacts)\n self._builds[arch] = BuildSpec(goal=build_goal,\n type=type,\n vars=vars,\n description=description,\n arch=arch,\n script=script,\n artifacts=artifacts)", "def __init__(self, model):\n self.model = model\n self.nproperties = 0\n\n #: stores PSHELL, PCOMP, PCOMPG\n self.properties_shell = model.properties_shell\n\n # shear\n #: stores PSHEAR\n self.pshear = model.pshear\n\n # spring\n self.pelas = model.pelas\n\n # bush\n self.pbush = model.pbush\n\n # rods\n #self.conrod = model.conrod\n #self.crod = model.crod\n self.prod = model.prod\n\n # mass\n #: stores CONM1, CONM2, CMASS1, CMASS2, CMASS3, CMASS4, CMASS5, PMASS\n self.mass = model.mass\n\n # bars\n #: stores PBAR, PBARL\n self.properties_bar = model.properties_bar\n\n # beams\n #: stores PBEAM, PBEAML\n self.properties_beam = model.properties_beam\n\n # solids\n #: stores PSOLID, PLSOLID\n self.properties_solid = model.properties_solid\n\n # created by this class\n self.property_ids = None\n self.n = None\n self.property_groups = None", "def _build_study_spec(study_spec: study_pb2.StudySpec, state: int,\n creation_time: datetime.datetime) -> study_pb2.StudySpec:\n study_spec.state = state\n study_spec.creation_time.FromDatetime(creation_time)\n return study_spec", "def test_properties_distribution_get(self):\n pass", "def build(self):\n pass", "def build(self):\n pass", "def test_type_builder_handles_nested_properties():\n schema = [\n SchemaObject(\n name=\"ClassWithNestedClass\",\n properties=[\n SchemaObject(\n name=\"nestedValue\",\n properties=[\n SchemaValue(name=\"string_value\", value_type=\"string\"),\n SchemaEnum(\n name=\"enum_value\",\n value_type=\"string\",\n values=[\"hey\", \"new\", \"value\"],\n ),\n ],\n ),\n ],\n )\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 3\n assert build_result[0] == ClassDefinition(\n name=\"ClassWithNestedClass\",\n properties=[\n PropertyDefinition(\n name=\"nested_value\",\n key=\"nestedValue\",\n value_type=\"ClassWithNestedClassNestedValue\",\n known_type=False,\n ),\n ],\n depends_on={\"ClassWithNestedClassNestedValue\"},\n )\n assert build_result[1] == ClassDefinition(\n name=\"ClassWithNestedClassNestedValue\",\n properties=[\n PropertyDefinition(\n name=\"string_value\",\n key=\"string_value\",\n value_type=\"str\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"enum_value\",\n key=\"enum_value\",\n value_type=\"ClassWithNestedClassNestedValueEnumValue\",\n known_type=False,\n ),\n ],\n depends_on={\"ClassWithNestedClassNestedValueEnumValue\"},\n )\n assert build_result[2] == EnumDefinition(\n name=\"ClassWithNestedClassNestedValueEnumValue\",\n values=[(\"HEY\", \"hey\"), (\"NEW\", \"new\"), (\"VALUE\", \"value\")],\n depends_on=set(),\n )", "def properties(self):\n raise NotImplementedError", "def build(self):", "def build(self):", "def build(self):", "def custom_props():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 7772400\r\n section.page_height = 10058400\r\n document.add_heading('Custom Properties', level=1)\r\n\r\n customproperties = get_qlik_sense.get_customprop()\r\n num_of_customproperties = len(customproperties)\r\n table = document.add_table(rows=num_of_customproperties+1, cols=3)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n row.cells[1].text = 'choice values'\r\n row.cells[2].text = 'object types'\r\n\r\n for customproperty in range(num_of_customproperties):\r\n row = table.rows[customproperty+1]\r\n row.cells[0].text = str(customproperties[customproperty][0])\r\n row.cells[1].text = ', '.join(customproperties[customproperty][1])\r\n row.cells[2].text = ', '.join(customproperties[customproperty][2])\r\n document.add_page_break()", "def build(self):\n raise NotImplementedError", "def generate(self):\n fleet_config = self._build_base_object()\n fleet_config['LaunchSpecifications'] = list(self._build_launch_specs_object())\n return fleet_config", "def __init__(self, *specs: Specification) -> None:\n self.specs = specs", "def _get_object_properties(self):\n # Parse element tree to get all relevant bodies, joints, actuators, and geom groups\n _elements = sort_elements(root=self.get_obj())\n # print(ET.tostring(self.get_obj(), encoding='unicode', method='xml'))\n assert len(_elements[\"root_body\"]) == 1, \"Invalid number of root bodies found for robot model. Expected 1,\" \\\n \"got {}\".format(len(_elements[\"root_body\"]))\n _elements[\"root_body\"] = _elements[\"root_body\"][0]\n _elements[\"bodies\"] = [_elements[\"root_body\"]] + _elements[\"bodies\"] if \"bodies\" in _elements else \\\n [_elements[\"root_body\"]]\n self._root_body = _elements[\"root_body\"].get(\"name\")\n self._bodies = [e.get(\"name\") for e in _elements.get(\"bodies\", [])]\n self._joints = [e.get(\"name\") for e in _elements.get(\"joints\", [])]\n self._actuators = [e.get(\"name\") for e in _elements.get(\"actuators\", [])]\n self._sites = [e.get(\"name\") for e in _elements.get(\"sites\", [])]\n self._sensors = [e.get(\"name\") for e in _elements.get(\"sensors\", [])]\n composite_obj = _elements[\"root_body\"].find(\"./body/composite\")\n if composite_obj is not None:\n self._count = np.fromstring(composite_obj.get(\"count\"), dtype=int, sep=' ')\n self._composite_type = composite_obj.get(\"type\")\n self._spacing = float(composite_obj.get(\"spacing\"))\n assert len(self._count) == 3, \"the length of count must be 3, got: {} instead.\".format(len(self._count))\n dim = 3 - np.sum(self._count==1)\n self._composite_shape = [self._spacing * (self._count[i] - 1) for i in range(dim)]\n if dim == 1:\n self._contact_geoms = [f'G{i}' for i in range(self._count[0])] \n elif dim == 2: \n self._contact_geoms = [f'G{i}_{j}' for j in range(self._count[1])\n for i in range(self._count[0])]\n elif dim == 3:\n self._contact_geoms = [f'G{i}_{j}_{k}' for k in range(self._count[2])\n for j in range(self._count[1])\n for i in range(self._count[0])]\n else:\n self._contact_geoms = [e.get(\"name\") for e in _elements.get(\"contact_geoms\", [])]\n self._visual_geoms = [e.get(\"name\") for e in _elements.get(\"visual_geoms\", [])]\n\n # Add default materials if we're using domain randomization\n if macros.USING_INSTANCE_RANDOMIZATION:\n tex_element, mat_element, _, used = add_material(root=self.get_obj(), naming_prefix=self.naming_prefix)\n # Only add the material / texture if they were actually used\n if used:\n self.asset.append(tex_element)\n self.asset.append(mat_element)\n\n # Add prefix to all elements\n add_prefix(root=self.get_obj(), prefix=self.naming_prefix, exclude=self.exclude_from_prefixing)", "def get_spec(self):\n from schematics.types import ModelType\n spec = {\n 'id': self.name,\n 'description': self.description,\n 'addressable': self.array,\n 'required': self.required,\n }\n if self.type.has_schema:\n spec['schema'] = self.type.get_spec()\n else:\n spec.update(self.type.get_spec())\n\n return spec", "def _build_accessor(bufferview, ele_type, comptype_id, count, max_vals, min_vals, byte_offset, normalized):\n normalized = None if not normalized else normalized\n\n new_accessor = {\n \"bufferView\": bufferview,\n \"componentType\": comptype_id,\n \"type\": ele_type,\n \"count\": count,\n }\n\n properties_keys = [\"byteOffset\", \"normalized\", \"max\", \"min\"]\n properties_values = [byte_offset, normalized, max_vals, min_vals]\n\n for key, val in zip(properties_keys, properties_values):\n if val is not None:\n new_accessor[key] = val\n\n return new_accessor", "def test_should_return_correct_gremlin_for_property(self):\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).functional(true).makePropertyKey()'\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).functional(false).makePropertyKey()'\r\n self.property_spec['locking'] = False\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).functional(false).indexed().makePropertyKey()'\r\n self.property_spec['locking'] = False\r\n self.property_spec['indexed'] = True\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).makePropertyKey()'\r\n self.property_spec['functional'] = False\r\n self.property_spec['indexed'] = False\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected\r\n\r\n expected = 'updated_at = g.makeType().name(\"updated_at\").dataType(Integer.class).unique().makePropertyKey()'\r\n self.property_spec['functional'] = False\r\n self.property_spec['indexed'] = False\r\n self.property_spec['unique'] = True\r\n self.spec_parser._properties = {} # Reset saved properties\r\n self.spec_parser._names = []\r\n prop = self.spec_parser.parse_property(self.property_spec)\r\n assert prop.gremlin == expected, prop.gremlin", "def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\r\n prop_filter_spec = \\\r\n client_factory.create('ns0:PropertyFilterSpec')\r\n prop_filter_spec.propSet = prop_spec\r\n prop_filter_spec.objectSet = obj_spec\r\n return prop_filter_spec", "def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n rspec = RSpec(version=rspec_version)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n top_auth = resource_hrn.split('.')[0]\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],top_auth)\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n #print \"sfa_leases\", sfa_leases\n if sfa_leases:\n # SFAWRAP BUG ???\n # rspec.version.add_leases bugs with an empty set of leases\n # slice_id = leases[0]['slice_id']\n # TypeError: list indices must be integers, not str\n rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n \n return rspec.toxml()", "def build (self):\n raise NotImplementedError", "def build_parameters(pobj):\n ViscosityWilke.build_parameters(pobj)", "def test_dev_props(name, properties):\n assert properties['x']\n assert properties['y']", "def properties(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___Expression]:", "def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec", "def _make_specs(cls, doc_id=None, specs=None):\n final_specs = {}\n if doc_id:\n final_specs['_id'] = cls._id_type(doc_id)\n if specs:\n final_specs.update(specs)\n cls._add_shard(final_specs)\n return final_specs", "def test_properties_evolution_get(self):\n pass", "def build(_):", "def write_properties(props):\n root = Element('{%s}coreProperties' % COREPROPS_NS)\n for attr in (\"creator\", \"title\", \"description\", \"subject\", \"identifier\",\n \"language\"):\n SubElement(root, '{%s}%s' % (DCORE_NS, attr)).text = getattr(props, attr)\n\n for attr in (\"created\", \"modified\"):\n value = datetime_to_W3CDTF(getattr(props, attr))\n SubElement(root, '{%s}%s' % (DCTERMS_NS, attr),\n {'{%s}type' % XSI_NS:'%s:W3CDTF' % DCTERMS_PREFIX}).text = value\n\n for attr in (\"lastModifiedBy\", \"category\", \"contentStatus\", \"version\",\n \"revision\", \"keywords\"):\n SubElement(root, '{%s}%s' % (COREPROPS_NS, attr)).text = getattr(props, attr)\n\n if props.lastPrinted is not None:\n SubElement(root, \"{%s}lastPrinted\" % COREPROPS_NS).text = datetime_to_W3CDTF(props.lastPrinted\n )\n return tostring(root)" ]
[ "0.7005923", "0.6116688", "0.5938139", "0.58759177", "0.58554643", "0.57825583", "0.5649714", "0.5636901", "0.5603624", "0.559764", "0.5581322", "0.5505116", "0.54937416", "0.54541737", "0.5407613", "0.5344718", "0.532845", "0.5320788", "0.52512217", "0.5245343", "0.5237961", "0.5233512", "0.5223699", "0.52128834", "0.51999176", "0.5194127", "0.5187995", "0.5186603", "0.5179023", "0.51659966", "0.5161699", "0.514408", "0.5137324", "0.5130391", "0.5129957", "0.51294297", "0.5123039", "0.51222354", "0.51079863", "0.5100313", "0.5099741", "0.5095497", "0.50851053", "0.5069021", "0.5064936", "0.5062833", "0.5051044", "0.5041073", "0.5029328", "0.5029328", "0.5029328", "0.5027412", "0.50191593", "0.50186986", "0.50186986", "0.5006436", "0.5002727", "0.49960622", "0.49875087", "0.4980173", "0.497877", "0.49754873", "0.49744502", "0.49717888", "0.49686354", "0.49624923", "0.4960792", "0.49607036", "0.49606106", "0.49572065", "0.49545452", "0.4947004", "0.49441564", "0.493368", "0.49237418", "0.49237418", "0.4923306", "0.49224463", "0.49200797", "0.49200797", "0.49200797", "0.49151868", "0.49105722", "0.4910486", "0.49083358", "0.4899404", "0.4898481", "0.48889568", "0.4888941", "0.4881559", "0.48809817", "0.48707977", "0.4870236", "0.4862061", "0.48604214", "0.4853045", "0.48516753", "0.4850755", "0.48443273", "0.4842912" ]
0.59059453
3
Builds the Object Spec object.
def get_obj_spec(client_factory, obj, select_set=None): obj_spec = client_factory.create('ns0:ObjectSpec') obj_spec.obj = obj obj_spec.skip = False if select_set is not None: obj_spec.selectSet = select_set return obj_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def build(self, spec, prefix):\n make()", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()", "def _build_pod_spec(self):\n logger.debug(\"Building Pod Spec\")\n crds = []\n try:\n crds = [\n yaml.load(Path(f).read_text())\n for f in [\n \"files/configs.config.gatekeeper.sh.yaml\",\n \"files/constrainttemplates.templates.gatekeeper.sh.yaml\",\n \"files/constraintpodstatuses.status.gatekeeper.sh.yaml\",\n \"files/constrainttemplatepodstatuses.status.gatekeeper.sh.yaml\",\n ]\n ]\n except yaml.YAMLError as exc:\n logger.error(\"Error in configuration file:\", exc)\n\n crd_objects = [\n CustomResourceDefintion(crd[\"metadata\"][\"name\"], crd[\"spec\"])\n for crd in crds\n ]\n\n config = self.model.config\n spec_template = {}\n with open(\"files/pod-spec.yaml.jinja2\") as fh:\n spec_template = Template(fh.read())\n\n try:\n image_details = self.image.fetch()\n except OCIImageResourceError as e:\n self.model.unit.status = e.status\n return\n\n template_args = {\n \"crds\": crd_objects,\n \"image_details\": image_details,\n \"imagePullPolicy\": config[\"imagePullPolicy\"],\n \"app_name\": self.app.name,\n \"audit_cli_args\": self._audit_cli_args(),\n \"namespace\": os.environ[\"JUJU_MODEL_NAME\"],\n }\n\n spec = yaml.load(spec_template.render(**template_args))\n\n print(f\"Pod spec: {spec}\")\n return spec", "def build():", "def test_01_BuildObjects(self):\n # print(PrettyFormatAny.form(self.m_pyhouse_obj, 'A1-01-A - Main', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'A1-02-B - House', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'A1-01-C - Location', 190))\n self.assertIsInstance(self.m_pyhouse_obj, PyHouseInformation)\n self.assertIsInstance(self.m_pyhouse_obj.House, HouseInformation)\n self.assertIsInstance(self.m_pyhouse_obj.House.Location, LocationInformationPrivate)", "def __init__(self, spec):\n self.spec = spec", "def generate_object_specs(self):\n return [[] for _ in xrange(self.batch_size)]", "def build_obj_spec(\n obj_key,\n parameter_dict,\n experiment_name=None,\n obj_type=\"policy\",\n output=\"./policy_yamls/\",\n):\n # Set name via timestamp if not specified\n if obj_type not in [\"policy\", \"estimator\", \"dataset\"]:\n print(\"Invalid type: {}\".format(obj_type))\n return None\n\n if experiment_name == None:\n now = datetime.now()\n current_time = now.strftime(\"%H%M%S\")\n experiment_name = \"experiment_{}\".format(current_time)\n\n # Build dict structure\n obj_dict = {\n \"name\": experiment_name,\n \"type\": obj_type,\n \"key\": obj_key,\n \"parameters\": parameter_dict,\n }\n\n # Set output folder\n output_folder = os.path.join(output, experiment_name)\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n with open(\n os.path.join(output_folder, \"{}_spec.yaml\".format(obj_type)), \"w\"\n ) as file:\n yaml.dump(obj_dict, file)\n\n return obj_dict", "def build(self) -> None:", "def _build(self):", "def _build(self):", "def build(self):\n pass", "def build(self):\n pass", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def make_cake_spec():\n ###############################################################################################\n # Templates\n tmpl = make_cake_templates()\n\n ###############################################################################################\n # Objects\n cake = MaterialSpec(\n name=\"Abstract Cake\",\n template=tmpl[\"Dessert\"],\n process=ProcessSpec(\n name='Icing, in General',\n template=tmpl[\"Icing\"],\n tags=[\n 'spreading'\n ],\n notes='The act of covering a baked output with frosting'\n ),\n file_links=FileLink(\n filename=\"Becky's Butter Cake\",\n url='https://www.landolakes.com/recipe/16730/becky-s-butter-cake/'\n ),\n tags=[\n 'cake::butter cake',\n 'dessert::baked::cake',\n 'iced::chocolate'\n ],\n notes='Butter cake recipe reminiscent of the 1-2-3-4 cake that Grandma may have baked.'\n )\n\n ########################\n frosting = MaterialSpec(\n name=\"Abstract Frosting\",\n template=tmpl[\"Dessert\"],\n process=ProcessSpec(\n name='Mixing Frosting, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining ingredients to make a sweet frosting'\n ),\n tags=[\n 'frosting::chocolate',\n 'topping::chocolate'\n ],\n notes='Chocolate frosting'\n )\n IngredientSpec(\n name=\"{} input\".format(frosting.name),\n tags=list(frosting.tags),\n notes='Seems like a lot of frosting',\n labels=['coating'],\n process=cake.process,\n material=frosting,\n absolute_quantity=NominalReal(nominal=0.751, units='kg')\n )\n\n baked_cake = MaterialSpec(\n name=\"Abstract Baked Cake\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Baking, in General',\n template=tmpl[\"Baking in an oven\"],\n tags=[\n 'oven::baking'\n ],\n notes='Using heat to convert batter into a solid matrix'\n ),\n tags=[\n ],\n notes='The cakey part of the cake'\n )\n IngredientSpec(\n name=\"{} input\".format(baked_cake.name),\n tags=list(baked_cake.tags),\n labels=['substrate'],\n process=cake.process,\n material=baked_cake\n )\n\n ########################\n batter = MaterialSpec(\n name=\"Abstract Batter\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Batter, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The fluid that converts to cake with heat'\n )\n IngredientSpec(\n name=\"{} input\".format(batter.name),\n tags=list(batter.tags),\n labels=['precursor'],\n process=baked_cake.process,\n material=batter\n )\n\n ########################\n wetmix = MaterialSpec(\n name=\"Abstract Wet Mix\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Wet, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining wet ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The wet fraction of a batter'\n )\n IngredientSpec(\n name=\"{} input\".format(wetmix.name),\n tags=list(wetmix.tags),\n labels=['wet'],\n process=batter.process,\n material=wetmix\n )\n\n drymix = MaterialSpec(\n name=\"Abstract Dry Mix\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Mixing Dry, in General',\n template=tmpl[\"Mixing\"],\n tags=[\n 'mixing'\n ],\n notes='Combining dry ingredients to make a baking feedstock'\n ),\n tags=[\n ],\n notes='The dry fraction of a batter'\n )\n IngredientSpec(\n name=\"{} input\".format(drymix.name),\n tags=list(drymix.tags),\n labels=['dry'],\n process=batter.process,\n material=drymix,\n absolute_quantity=NominalReal(nominal=3.052, units='cups')\n )\n\n ########################\n flour = MaterialSpec(\n name=\"Abstract Flour\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Flour, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing all purpose flour'\n ),\n tags=[\n ],\n notes='All-purpose flour'\n )\n IngredientSpec(\n name=\"{} input\".format(flour.name),\n tags=list(flour.tags),\n labels=['dry'],\n process=drymix.process,\n material=flour,\n volume_fraction=NominalReal(nominal=0.9829, units='') # 3 cups\n )\n\n baking_powder = MaterialSpec(\n name=\"Abstract Baking Powder\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Baking Powder, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing baking powder'\n ),\n tags=[\n ],\n notes='Leavening agent for cake'\n )\n IngredientSpec(\n name=\"{} input\".format(baking_powder.name),\n tags=list(baking_powder.tags),\n labels=['leavening', 'dry'],\n process=drymix.process,\n material=baking_powder,\n volume_fraction=NominalReal(nominal=0.0137, units='') # 2 teaspoons\n )\n\n salt = MaterialSpec(\n name=\"Abstract Salt\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Salt, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing salt'\n ),\n tags=[\n ],\n notes='Plain old NaCl'\n )\n IngredientSpec(\n name=\"{} input\".format(salt.name),\n tags=list(salt.tags),\n labels=['dry', 'seasoning'],\n process=drymix.process,\n material=salt,\n volume_fraction=NominalReal(nominal=0.0034, units='') # 1/2 teaspoon\n )\n\n sugar = MaterialSpec(\n name=\"Abstract Sugar\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Sugar, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing all purpose flour'\n ),\n tags=[\n ],\n notes='Sugar'\n )\n IngredientSpec(\n name=\"{} input\".format(sugar.name),\n tags=list(sugar.tags),\n labels=['wet', 'sweetener'],\n process=wetmix.process,\n material=sugar,\n absolute_quantity=NominalReal(nominal=2, units='cups')\n )\n\n butter = MaterialSpec(\n name=\"Abstract Butter\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Butter, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing butter'\n ),\n tags=[\n ],\n notes='Shortening for making rich, buttery baked goods'\n )\n IngredientSpec(\n name=\"{} input\".format(butter.name),\n tags=list(butter.tags),\n labels=['wet', 'shortening'],\n process=wetmix.process,\n material=butter,\n absolute_quantity=NominalReal(nominal=1, units='cups')\n )\n IngredientSpec(\n name=\"{} input\".format(butter.name),\n tags=list(butter.tags),\n labels=['shortening'],\n process=frosting.process,\n material=butter,\n mass_fraction=NominalReal(nominal=0.1434, units='') # 1/2 c @ 0.911 g/cc\n )\n\n eggs = MaterialSpec(\n name=\"Abstract Eggs\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Eggs, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing eggs'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(eggs.name),\n tags=list(eggs.tags),\n labels=['wet'],\n process=wetmix.process,\n material=eggs,\n absolute_quantity=NominalReal(nominal=4, units='')\n )\n\n vanilla = MaterialSpec(\n name=\"Abstract Vanilla\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Vanilla, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing vanilla'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(vanilla.name),\n tags=list(vanilla.tags),\n labels=['wet', 'flavoring'],\n process=wetmix.process,\n material=vanilla,\n absolute_quantity=NominalReal(nominal=2, units='teaspoons')\n )\n IngredientSpec(\n name=\"{} input\".format(vanilla.name),\n tags=list(vanilla.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=vanilla,\n mass_fraction=NominalReal(nominal=0.0231, units='') # 2 tsp @ 0.879 g/cc\n )\n\n milk = MaterialSpec(\n name=\"Abstract Milk\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Milk, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::produce'\n ],\n notes='Purchasing milk'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(milk.name),\n tags=list(milk.tags),\n labels=['wet'],\n process=batter.process,\n material=milk,\n absolute_quantity=NominalReal(nominal=1, units='cup')\n )\n IngredientSpec(\n name=\"{} input\".format(milk.name),\n tags=list(milk.tags),\n labels=[],\n process=frosting.process,\n material=milk,\n mass_fraction=NominalReal(nominal=0.0816, units='') # 1/4 c @ 1.037 g/cc\n )\n\n chocolate = MaterialSpec(\n name=\"Abstract Chocolate\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Chocolate, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing chocolate'\n ),\n tags=[\n ],\n notes=''\n )\n IngredientSpec(\n name=\"{} input\".format(chocolate.name),\n tags=list(chocolate.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=chocolate,\n mass_fraction=NominalReal(nominal=0.1132, units='') # 3 oz.\n )\n\n powder_sugar = MaterialSpec(\n name=\"Abstract Powdered Sugar\",\n template=tmpl[\"Generic Material\"],\n process=ProcessSpec(\n name='Buying Powdered Sugar, in General',\n template=tmpl[\"Procurement\"],\n tags=[\n 'purchase::dry-goods'\n ],\n notes='Purchasing powdered sugar'\n ),\n tags=[\n ],\n notes='Granulated sugar mixed with corn starch'\n )\n IngredientSpec(\n name=\"{} input\".format(powder_sugar.name),\n tags=list(powder_sugar.tags),\n labels=['flavoring'],\n process=frosting.process,\n material=powder_sugar,\n mass_fraction=NominalReal(nominal=0.6387, units='') # 4 c @ 30 g/ 0.25 cups\n )\n return cake", "def build(_):", "def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n rspec = RSpec(version=rspec_version)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n top_auth = resource_hrn.split('.')[0]\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],top_auth)\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n #print \"sfa_leases\", sfa_leases\n if sfa_leases:\n # SFAWRAP BUG ???\n # rspec.version.add_leases bugs with an empty set of leases\n # slice_id = leases[0]['slice_id']\n # TypeError: list indices must be integers, not str\n rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n \n return rspec.toxml()", "def build(self):\n raise NotImplementedError", "def object_specs(self):\n if self._object_specs is None:\n self.object_specs = self.generate_object_specs()\n \n return self._object_specs", "def _build_impl(self):", "def build (self):\n raise NotImplementedError", "def __init__(self, name=None, debug_mode=False, features=None, ui=None, is_default=False, created=None, modified=None, id=None, team_id=None, team=None, portals=None, product_groups=None, product_types=None, product_sizes=None, product_size_materials=None, product_size_materials_rel=None):\n self.swagger_types = {\n 'name': 'str',\n 'debug_mode': 'bool',\n 'features': 'object',\n 'ui': 'object',\n 'is_default': 'bool',\n 'created': 'datetime',\n 'modified': 'datetime',\n 'id': 'str',\n 'team_id': 'str',\n 'team': 'Team',\n 'portals': 'list[Portal]',\n 'product_groups': 'list[ProductGroup]',\n 'product_types': 'list[ProductType]',\n 'product_sizes': 'list[ProductSize]',\n 'product_size_materials': 'list[ProductSizeMaterial]',\n 'product_size_materials_rel': 'list[TeamBuilderConfigProductSizeMaterial]'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'debug_mode': 'debugMode',\n 'features': 'features',\n 'ui': 'ui',\n 'is_default': 'isDefault',\n 'created': 'created',\n 'modified': 'modified',\n 'id': 'id',\n 'team_id': 'teamId',\n 'team': 'team',\n 'portals': 'portals',\n 'product_groups': 'productGroups',\n 'product_types': 'productTypes',\n 'product_sizes': 'productSizes',\n 'product_size_materials': 'productSizeMaterials',\n 'product_size_materials_rel': 'productSizeMaterialsRel'\n }\n\n self._name = name\n self._debug_mode = debug_mode\n self._features = features\n self._ui = ui\n self._is_default = is_default\n self._created = created\n self._modified = modified\n self._id = id\n self._team_id = team_id\n self._team = team\n self._portals = portals\n self._product_groups = product_groups\n self._product_types = product_types\n self._product_sizes = product_sizes\n self._product_size_materials = product_size_materials\n self._product_size_materials_rel = product_size_materials_rel", "def build(self):", "def build(self):", "def build(self):", "def build(cls, serial, thisUpdate, nextUpdate, names_and_objs, keypair, certs, version = 0):\n\n filelist = []\n for name, obj in names_and_objs:\n filelist.append((name.rpartition(\"/\")[2], sha256(obj.get_DER())))\n filelist.sort(key = lambda x: x[0])\n\n obj = cls.POW_class()\n obj.setVersion(version)\n obj.setManifestNumber(serial)\n obj.setThisUpdate(thisUpdate)\n obj.setNextUpdate(nextUpdate)\n obj.setAlgorithm(rpki.oids.id_sha256)\n obj.addFiles(filelist)\n\n self = cls(POW = obj)\n self.sign(keypair, certs)\n return self", "def test_01_BuildObjects(self):\n # print(PrettyFormatAny.form(self.m_pyhouse_obj._Config, 'A3-01-A - Config', 190))\n # print(__file__)\n # print(PrettyFormatAny.form(self.m_pyhouse_obj._Config.YamlTree, 'Location', 190))\n # self.assertEqual(self.m_pyhouse_obj._Config.YamlConfigDir, '/etc/pyhouse/')", "def __init__(self, *specs: Specification) -> None:\n self.specs = specs", "def _build(self, **kwargs):", "def build_specfile_header(spec):\n str = \"\"\n\n # first the mandatory sections\n mandatory_header_fields = {\n 'NAME' : '%%define name %s\\nName: %%{name}\\n',\n 'VERSION' : '%%define version %s\\nVersion: %%{version}\\n',\n 'PACKAGEVERSION' : '%%define release %s\\nRelease: %%{release}\\n',\n 'X_RPM_GROUP' : 'Group: %s\\n',\n 'SUMMARY' : 'Summary: %s\\n',\n 'LICENSE' : 'License: %s\\n',\n }\n\n str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )\n\n # now the optional tags\n optional_header_fields = {\n 'VENDOR' : 'Vendor: %s\\n',\n 'X_RPM_URL' : 'Url: %s\\n',\n 'SOURCE_URL' : 'Source: %s\\n',\n 'SUMMARY_' : 'Summary(%s): %s\\n',\n 'ARCHITECTURE' : 'BuildArch: %s\\n',\n 'X_RPM_DISTRIBUTION' : 'Distribution: %s\\n',\n 'X_RPM_ICON' : 'Icon: %s\\n',\n 'X_RPM_PACKAGER' : 'Packager: %s\\n',\n 'X_RPM_GROUP_' : 'Group(%s): %s\\n',\n\n 'X_RPM_REQUIRES' : 'Requires: %s\\n',\n 'X_RPM_PROVIDES' : 'Provides: %s\\n',\n 'X_RPM_CONFLICTS' : 'Conflicts: %s\\n',\n 'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\\n',\n\n 'X_RPM_SERIAL' : 'Serial: %s\\n',\n 'X_RPM_EPOCH' : 'Epoch: %s\\n',\n 'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\\n',\n 'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\\n',\n 'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\\n',\n 'X_RPM_PREFIX' : 'Prefix: %s\\n',\n\n # internal use\n 'X_RPM_BUILDROOT' : 'BuildRoot: %s\\n',\n }\n\n # fill in default values:\n # Adding a BuildRequires renders the .rpm unbuildable under systems which\n # are not managed by rpm, since the database to resolve this dependency is\n # missing (take Gentoo as an example)\n #if 'X_RPM_BUILDREQUIRES' not in spec:\n # spec['X_RPM_BUILDREQUIRES'] = 'scons'\n\n if 'X_RPM_BUILDROOT' not in spec:\n spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'\n\n str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )\n\n # Add any extra specfile definitions the user may have supplied.\n # These flags get no processing, they are just added.\n # github #3164: if we don't turn off debug package generation\n # the tests which build packages all fail. If there are no\n # extra flags, default to adding this one. If the user wants\n # to turn this back on, supply the flag set to None.\n\n if 'X_RPM_EXTRADEFS' not in spec:\n spec['X_RPM_EXTRADEFS'] = ['%global debug_package %{nil}']\n for extra in spec['X_RPM_EXTRADEFS']:\n str += extra + '\\n'\n\n return str", "def build(self, args: Args) -> OpenSCADObject:\n raise NotImplementedError(\"This must be overwritten\")", "def build(self, obj):\n if isinstance(obj, self.art_type):\n return obj\n elif isinstance(obj, (tuple, list, dict, set)):\n if obj.__class__ is tuple:\n return self.build_tuple(obj)\n elif obj.__class__ is dict:\n return self.build_dict(obj)\n elif obj.__class__ is list:\n return self.build_list(obj)\n else:\n return self.build_set(obj)\n elif isinstance(obj, SageObject):\n return self.build_from_magic_method(obj)\n else:\n return self.build_from_string(obj)", "def get_specification(self) -> Dict:\n specification = {\n 'version': VERSION,\n 'metadata': {\n 'twoColumn': True,\n 'layout': self.autolayout\n },\n 'nodes': []\n }\n\n def strip_io(io_list: list, direction) -> list:\n \"\"\"\n Strips every input/output from metadata and leaves only\n `name` and `type` keys.\n \"\"\"\n return [\n {\n 'name': io['name'],\n 'type': io['type'],\n 'direction': direction\n }\n for io in io_list\n ]\n\n toremove = set()\n for key, node in self.nodes.items():\n try:\n node_cls = load_class(node.cls_name)\n except (ModuleNotFoundError, ImportError, Exception) as err:\n msg = f'Could not add {node_cls}. Reason:'\n _LOGGER.warn('-' * len(msg))\n _LOGGER.warn(msg)\n _LOGGER.warn(err)\n _LOGGER.warn('-' * len(msg))\n toremove.add(key)\n continue\n parameterschema = node_cls.form_parameterschema()\n\n properties = []\n for name, props in parameterschema['properties'].items():\n new_property = {'name': name}\n\n if 'default' in props:\n new_property['default'] = props['default']\n\n if 'description' in props:\n new_property['description'] = props['description']\n\n def add_default(default_val):\n if new_property.get('default') is None:\n new_property['default'] = default_val\n\n # Case for an input with range defined\n if 'enum' in props:\n new_property['type'] = 'select'\n new_property['values'] = list(map(str, props['enum']))\n add_default(new_property['values'][0])\n # Case for a single value input\n elif 'type' in props:\n if 'array' in props['type']:\n new_property['type'] = 'list'\n if 'items' in props and 'type' in props['items']:\n dtype = props['items']['type']\n new_property['dtype'] = dtype\n add_default([])\n elif 'boolean' in props['type']:\n new_property['type'] = 'checkbox'\n add_default(False)\n elif 'string' in props['type']:\n new_property['type'] = 'text'\n add_default('')\n elif 'integer' in props['type']:\n new_property['type'] = 'integer'\n add_default(0)\n elif 'number' in props['type']:\n new_property['type'] = 'number'\n add_default(0)\n elif 'object' in props['type']:\n # Object arguments should be defined in specification\n # as node inputs, rather than properties\n new_property = None\n else:\n new_property['type'] = 'text'\n add_default('')\n # If no type is specified then text is used\n else:\n new_property['type'] = 'text'\n add_default('')\n\n if new_property is not None:\n properties.append(new_property)\n\n specification['nodes'].append({\n 'name': node.name,\n 'type': node.type,\n 'category': node.category,\n 'properties': properties,\n 'interfaces': strip_io(\n self.io_mapping[node.type]['inputs'],\n 'input'\n ) + strip_io(\n self.io_mapping[node.type]['outputs'],\n 'output'\n )\n })\n\n for key in toremove:\n del self.nodes[key]\n return specification", "def build_model():", "def setUp(self):\n self.nC4H10O = Species(\n label='n-C4H10O',\n conformer=Conformer(\n E0=(-317.807, 'kJ/mol'),\n modes=[\n IdealGasTranslation(mass=(74.07, \"g/mol\")),\n NonlinearRotor(inertia=([41.5091, 215.751, 233.258], \"amu*angstrom^2\"), symmetry=1),\n HarmonicOscillator(frequencies=(\n [240.915, 341.933, 500.066, 728.41, 809.987, 833.93, 926.308, 948.571, 1009.3, 1031.46, 1076,\n 1118.4, 1184.66, 1251.36, 1314.36, 1321.42, 1381.17, 1396.5, 1400.54, 1448.08, 1480.18, 1485.34,\n 1492.24, 1494.99, 1586.16, 2949.01, 2963.03, 2986.19, 2988.1, 2995.27, 3026.03, 3049.05, 3053.47,\n 3054.83, 3778.88], \"cm^-1\")),\n HinderedRotor(inertia=(0.854054, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[0.25183, -1.37378, -2.8379, 0.0305112, 0.0028088],\n [0.458307, 0.542121, -0.599366, -0.00283925, 0.0398529]], \"kJ/mol\")),\n HinderedRotor(inertia=(8.79408, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[0.26871, -0.59533, -8.15002, -0.294325, -0.145357],\n [1.1884, 0.99479, -0.940416, -0.186538, 0.0309834]], \"kJ/mol\")),\n HinderedRotor(inertia=(7.88153, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[-4.67373, 2.03735, -6.25993, -0.27325, -0.048748],\n [-0.982845, 1.76637, -1.57619, 0.474364, -0.000681718]], \"kJ/mol\")),\n HinderedRotor(inertia=(2.81525, \"amu*angstrom^2\"), symmetry=3, barrier=(2.96807, \"kcal/mol\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n molecular_weight=(74.07, \"g/mol\"),\n transport_data=TransportData(sigma=(5.94, 'angstrom'), epsilon=(559, 'K')),\n energy_transfer_model=SingleExponentialDown(alpha0=(447.5 * 0.011962, \"kJ/mol\"), T0=(300, \"K\"), n=0.85),\n )\n\n self.nC4H10O.from_smiles('CCCCO')\n\n self.nC4H8 = Species(\n label='n-C4H8',\n conformer=Conformer(\n E0=(-17.8832, 'kJ/mol'),\n modes=[\n IdealGasTranslation(mass=(56.06, \"g/mol\")),\n NonlinearRotor(inertia=([22.2748, 122.4, 125.198], \"amu*angstrom^2\"), symmetry=1),\n HarmonicOscillator(frequencies=(\n [308.537, 418.67, 636.246, 788.665, 848.906, 936.762, 979.97, 1009.48, 1024.22, 1082.96, 1186.38,\n 1277.55, 1307.65, 1332.87, 1396.67, 1439.09, 1469.71, 1484.45, 1493.19, 1691.49, 2972.12, 2994.31,\n 3018.48, 3056.87, 3062.76, 3079.38, 3093.54, 3174.52], \"cm^-1\")),\n HinderedRotor(inertia=(5.28338, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[-0.579364, -0.28241, -4.46469, 0.143368, 0.126756],\n [1.01804, -0.494628, -0.00318651, -0.245289, 0.193728]], \"kJ/mol\")),\n HinderedRotor(inertia=(2.60818, \"amu*angstrom^2\"), symmetry=3, fourier=(\n [[0.0400372, 0.0301986, -6.4787, -0.0248675, -0.0324753],\n [0.0312541, 0.0538, -0.493785, 0.0965968, 0.125292]], \"kJ/mol\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n )\n\n self.nC4H8.from_smiles('CCC=C')\n\n self.H2O = Species(\n label='H2O',\n conformer=Conformer(\n E0=(-269.598, 'kJ/mol'),\n modes=[\n IdealGasTranslation(mass=(18.01, \"g/mol\")),\n NonlinearRotor(inertia=([0.630578, 1.15529, 1.78586], \"amu*angstrom^2\"), symmetry=2),\n HarmonicOscillator(frequencies=([1622.09, 3771.85, 3867.85], \"cm^-1\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n )\n\n self.H2O.from_smiles('O')\n\n self.N2 = Species(\n label='N2',\n molecular_weight=(28.04, \"g/mol\"),\n transport_data=TransportData(sigma=(3.41, \"angstrom\"), epsilon=(124, \"K\")),\n energy_transfer_model=None,\n )\n\n self.N2.from_smiles('N#N')\n\n logging.error('to TS')\n\n self.TS = TransitionState(\n label='TS',\n conformer=Conformer(\n E0=(-42.4373, \"kJ/mol\"),\n modes=[\n IdealGasTranslation(mass=(74.07, \"g/mol\")),\n NonlinearRotor(inertia=([40.518, 232.666, 246.092], \"u*angstrom**2\"), symmetry=1, quantum=False),\n HarmonicOscillator(frequencies=(\n [134.289, 302.326, 351.792, 407.986, 443.419, 583.988, 699.001, 766.1, 777.969, 829.671, 949.753,\n 994.731, 1013.59, 1073.98, 1103.79, 1171.89, 1225.91, 1280.67, 1335.08, 1373.9, 1392.32, 1417.43,\n 1469.51, 1481.61, 1490.16, 1503.73, 1573.16, 2972.85, 2984.3, 3003.67, 3045.78, 3051.77, 3082.37,\n 3090.44, 3190.73, 3708.52], \"kayser\")),\n HinderedRotor(inertia=(2.68206, \"amu*angstrom^2\"), symmetry=3, barrier=(3.35244, \"kcal/mol\")),\n HinderedRotor(inertia=(9.77669, \"amu*angstrom^2\"), symmetry=1, fourier=(\n [[0.208938, -1.55291, -4.05398, -0.105798, -0.104752],\n [2.00518, -0.020767, -0.333595, 0.137791, -0.274578]], \"kJ/mol\")),\n ],\n spin_multiplicity=1,\n optical_isomers=1,\n ),\n frequency=(-2038.34, 'cm^-1'),\n )\n\n self.reaction = Reaction(\n label='dehydration',\n reactants=[self.nC4H10O],\n products=[self.nC4H8, self.H2O],\n transition_state=self.TS,\n kinetics=Arrhenius(A=(0.0387, 'm^3/(mol*s)'), n=2.7, Ea=(2.6192e4, 'J/mol'), T0=(1, 'K'))\n )\n\n self.network = Network(\n label='n-butanol',\n isomers=[Configuration(self.nC4H10O)],\n reactants=[],\n products=[Configuration(self.nC4H8, self.H2O)],\n path_reactions=[self.reaction],\n bath_gas={self.N2: 1.0},\n )\n\n self.pdepnetwork = deepcopy(self.network)\n self.pdepnetwork.__class__ = PDepNetwork\n self.pdepnetwork.source = [self.pdepnetwork.isomers[0].species[0]]\n self.pdepnetwork.index = 1\n self.pdepnetwork.explored = []", "def _build(self):\n raise NotImplementedError()", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'key': 'str',\n 'display_name': 'str',\n 'description': 'str',\n 'glossary_key': 'str',\n 'parent_term_key': 'str',\n 'is_allowed_to_have_child_terms': 'bool',\n 'path': 'str',\n 'lifecycle_state': 'str',\n 'time_created': 'datetime',\n 'time_updated': 'datetime',\n 'created_by_id': 'str',\n 'updated_by_id': 'str',\n 'owner': 'str',\n 'workflow_status': 'str',\n 'uri': 'str',\n 'associated_object_count': 'int',\n 'associated_objects': 'list[TermAssociatedObject]'\n }\n\n self.attribute_map = {\n 'key': 'key',\n 'display_name': 'displayName',\n 'description': 'description',\n 'glossary_key': 'glossaryKey',\n 'parent_term_key': 'parentTermKey',\n 'is_allowed_to_have_child_terms': 'isAllowedToHaveChildTerms',\n 'path': 'path',\n 'lifecycle_state': 'lifecycleState',\n 'time_created': 'timeCreated',\n 'time_updated': 'timeUpdated',\n 'created_by_id': 'createdById',\n 'updated_by_id': 'updatedById',\n 'owner': 'owner',\n 'workflow_status': 'workflowStatus',\n 'uri': 'uri',\n 'associated_object_count': 'associatedObjectCount',\n 'associated_objects': 'associatedObjects'\n }\n\n self._key = None\n self._display_name = None\n self._description = None\n self._glossary_key = None\n self._parent_term_key = None\n self._is_allowed_to_have_child_terms = None\n self._path = None\n self._lifecycle_state = None\n self._time_created = None\n self._time_updated = None\n self._created_by_id = None\n self._updated_by_id = None\n self._owner = None\n self._workflow_status = None\n self._uri = None\n self._associated_object_count = None\n self._associated_objects = None", "def __init__(self, json=None, verbose=True):\n\n self.verbose = verbose\n if json:\n self.composite_ = self.build_engine_from_json(json=json, verbose=verbose)\n self.prebuilt_ = True\n else:\n self.prebuilt_ = False", "def __init__(self, build_specification):\n _reader_interface = self.reader_factory(mode=\"json\")\n _reader = _reader_interface(build_specification)\n\n _objects = [] # empty roster of objects, fill is a value Object is created\n for item in _reader.data:\n class_kwargs = _reader.data[item]\n module_name = class_kwargs.get(\"module\", None)\n if module_name is not None:\n module_dict = dict(name=f\".{module_name}\", package=\"pubsub\")\n try:\n the_module = import_module(**module_dict)\n try:\n the_class = getattr(the_module, module_name.capitalize())\n if the_class:\n _objects.append(the_class(**class_kwargs))\n except AttributeError:\n print(\n f\"Skipping module {the_module.__name__}; class {module_name.capitalize()} not found.\"\n )\n except ModuleNotFoundError:\n print(\n f'Skipping module {module_dict[\"package\"] + module_dict[\"name\"]}; module not found.'\n )\n\n # connect the publish-subscribe mechanism\n subscribers = [item for item in _objects if isinstance(item, ISubscriber)]\n for item in subscribers:\n print(f\"{item.name} has an ISubscriber interface\")\n\n publishers = [item for item in _objects if isinstance(item, IPublisher)]\n for item in publishers:\n print(f\"{item.name} has an IPublisher interface\")\n for who in subscribers:\n item.connect(who)\n\n for item in publishers:\n print(f\"{item.name} responds to a command and pubishes:\")\n item.publish()", "def get_obj_spec(client_factory, obj, select_set=None):\n obj_spec = client_factory.create('ns0:ObjectSpec')\n obj_spec.obj = obj\n obj_spec.skip = False\n if select_set is not None:\n obj_spec.selectSet = select_set\n return obj_spec", "def _make_specs(cls, doc_id=None, specs=None):\n final_specs = {}\n if doc_id:\n final_specs['_id'] = cls._id_type(doc_id)\n if specs:\n final_specs.update(specs)\n cls._add_shard(final_specs)\n return final_specs", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['bijector'] = self.transform_or_spec\n return specs", "def get_spec(self):\n from schematics.types import ModelType\n spec = {\n 'id': self.name,\n 'description': self.description,\n 'addressable': self.array,\n 'required': self.required,\n }\n if self.type.has_schema:\n spec['schema'] = self.type.get_spec()\n else:\n spec.update(self.type.get_spec())\n\n return spec", "def build_property_spec(client_factory, type=\"VirtualMachine\",\r\n properties_to_collect=[\"name\"],\r\n all_properties=False):\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = all_properties\r\n property_spec.pathSet = properties_to_collect\r\n property_spec.type = type\r\n return property_spec", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'name': 'str',\n 'store_data': 'object',\n 'discovered': 'datetime',\n 'extraction_failure': 'bool',\n 'in_trash': 'bool',\n 'is_extracted': 'bool',\n 'meta_available': 'bool',\n 'size': 'int',\n 'start_time': 'datetime',\n 'end_time': 'datetime',\n 'duration': 'float',\n 'messages': 'int',\n 'tags': 'list[Tag]'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'name': 'name',\n 'store_data': 'store_data',\n 'discovered': 'discovered',\n 'extraction_failure': 'extraction_failure',\n 'in_trash': 'in_trash',\n 'is_extracted': 'is_extracted',\n 'meta_available': 'meta_available',\n 'size': 'size',\n 'start_time': 'start_time',\n 'end_time': 'end_time',\n 'duration': 'duration',\n 'messages': 'messages',\n 'tags': 'tags'\n }\n\n self._detail_type = None\n self._name = None\n self._store_data = None\n self._discovered = None\n self._extraction_failure = None\n self._in_trash = None\n self._is_extracted = None\n self._meta_available = None\n self._size = None\n self._start_time = None\n self._end_time = None\n self._duration = None\n self._messages = None\n self._tags = None", "def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n import time\n start_time = None\n end_time = None\n\n # Default duration for WiLab is 2 hours\n duration_default = 120\n for lease in leases:\n if 'end_time' in lease:\n end_time = lease['end_time']\n start_time = lease['start_time']\n break\n\n if start_time is None:\n # start_time = Now\n start_time = time.time()\n\n if end_time is None:\n end_time = int(start_time + duration_default*60)\n #raise Exception, \"end_time is mandatory in leases\"\n\n # duration in seconds from now till end_time\n duration = end_time - start_time\n # duration in minutes\n duration = duration / 60\n duration = int(duration)\n if duration < duration_default:\n duration = duration_default\n Log.tmp(\"start_time = \",start_time)\n Log.tmp(\"end_time = \",end_time)\n Log.tmp(\"duration = \",duration)\n # RSpec will have expires date = now + duration\n rspec = RSpec(version=rspec_version, ttl=duration, expires=end_time)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n i = 0\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n\n # The only change for WiLab compared to Generic SFAWrapParser\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],cm[1])\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource['client_id'] = \"PC\" + str(i)\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n i = i + 1\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n #sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n ##print \"sfa_leases\", sfa_leases\n #if sfa_leases:\n # # SFAWRAP BUG ???\n # # rspec.version.add_leases bugs with an empty set of leases\n # # slice_id = leases[0]['slice_id']\n # # TypeError: list indices must be integers, not str\n # rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n return rspec.toxml()", "def generate(self):\n fleet_config = self._build_base_object()\n fleet_config['LaunchSpecifications'] = list(self._build_launch_specs_object())\n return fleet_config", "def build(self):\n raise NotImplementedError(\"This is an interface method. Implement it in subclass.\")", "def build(self):\n\n raise NotImplementedError(\"Implement build() method\")", "def build_specfile_sections(spec):\n str = \"\"\n\n mandatory_sections = {\n 'DESCRIPTION' : '\\n%%description\\n%s\\n\\n', }\n\n str = str + SimpleTagCompiler(mandatory_sections).compile( spec )\n\n optional_sections = {\n 'DESCRIPTION_' : '%%description -l %s\\n%s\\n\\n',\n 'CHANGELOG' : '%%changelog\\n%s\\n\\n',\n 'X_RPM_PREINSTALL' : '%%pre\\n%s\\n\\n',\n 'X_RPM_POSTINSTALL' : '%%post\\n%s\\n\\n',\n 'X_RPM_PREUNINSTALL' : '%%preun\\n%s\\n\\n',\n 'X_RPM_POSTUNINSTALL' : '%%postun\\n%s\\n\\n',\n 'X_RPM_VERIFY' : '%%verify\\n%s\\n\\n',\n\n # These are for internal use but could possibly be overridden\n 'X_RPM_PREP' : '%%prep\\n%s\\n\\n',\n 'X_RPM_BUILD' : '%%build\\n%s\\n\\n',\n 'X_RPM_INSTALL' : '%%install\\n%s\\n\\n',\n 'X_RPM_CLEAN' : '%%clean\\n%s\\n\\n',\n }\n\n # Default prep, build, install and clean rules\n # TODO: optimize those build steps, to not compile the project a second time\n if 'X_RPM_PREP' not in spec:\n spec['X_RPM_PREP'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"' + '\\n%setup -q'\n\n if 'X_RPM_BUILD' not in spec:\n spec['X_RPM_BUILD'] = '[ ! -e \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && mkdir \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_INSTALL' not in spec:\n spec['X_RPM_INSTALL'] = 'scons --install-sandbox=\"$RPM_BUILD_ROOT\" \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_CLEAN' not in spec:\n spec['X_RPM_CLEAN'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"'\n\n str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )\n\n return str", "def getBuilder():", "def __init__(self, engine: Engine, spec_id: int):\n\n self.engine = engine\n self.data = engine.get_spec(spec_id)\n assert self.data is not None\n\n # build the talent rows\n self.talent_rows = {}\n self.levels = {}\n for tier_data in self.data[\"talent_tiers\"]:\n # store talents as one-indexed\n talent_row = {}\n for talent_data in tier_data[\"talents\"]:\n ttip = talent_data[\"spell_tooltip\"]\n f_str = \"({}, {}) {}\".format(\n talent_data[\"talent\"][\"name\"],\n ttip[\"cast_time\"],\n ttip[\"description\"],\n )\n talent_row[talent_data[\"column_index\"] + 1] = {\n \"text\": f_str,\n \"raw\": talent_data,\n }\n\n # save tiers as one-indexed\n index = level_to_index(tier_data[\"level\"])\n self.talent_rows[index] = talent_row\n self.levels[index] = tier_data[\"level\"]\n\n # store this spec's talent macros\n self.macros = {}\n for row_idx, row_data in self.talent_rows.items():\n macro = build_row_macro(row_idx, row_data)\n if macro is not None:\n self.macros[row_idx] = macro\n\n self.name = self.data[\"name\"]\n\n # build a data structure for serialization\n media = engine.get_spec_media(spec_id)\n assert media is not None\n self.to_serialize = {\n \"icon\": media[\"assets\"][0][\"value\"],\n \"name\": self.name,\n \"slug\": self.name.lower().replace(\" \", \"_\"),\n \"role\": self.data[\"role\"][\"name\"],\n \"has_macros\": bool(self.macros),\n }\n self.to_serialize[\"talent_rows\"] = []\n for row, data in self.talent_rows.items():\n rdata: dict = {\n \"index\": row,\n \"level\": self.levels[row],\n \"macro\": None,\n }\n if row in self.macros:\n rdata[\"macro\"] = self.macros[row][0]\n rdata[\"macro_lines\"] = self.macros[row][1]\n rdata[\"talents\"] = {}\n for talent_idx, talent_data in data.items():\n tdata = Talent(\n self.engine, talent_data[\"raw\"][\"talent\"][\"id\"]\n ).to_serialize\n tdata[\"active\"] = is_talent_active(talent_data[\"raw\"])\n rdata[\"talents\"][talent_idx] = tdata\n self.to_serialize[\"talent_rows\"].append(rdata)", "def test_constructor(self):\n # Record the model types of all the models to be created\n all_model_types = model_type_to_display_name.keys()\n\n # Record the attribute / value pairs that are common to all models.\n common_attr_value_dict = {\"data\": self.fake_df,\n \"name_spec\": self.fake_names,\n \"design\": self.fake_design,\n \"ind_var_names\": self.fake_names[\"x\"],\n \"alt_id_col\": self.alt_id_col,\n \"obs_id_col\": self.obs_id_col,\n \"choice_col\": self.choice_col,\n \"specification\": self.fake_specification,\n \"alt_IDs\": self.fake_df[\"alt_id\"].values,\n \"choices\": self.fake_df[\"choice\"].values}\n\n # Create a shape name dictionary to relate the various models to the\n # names of their shape parameters.\n shape_name_dict = {\"MNL\": None,\n \"Asym\": self.fake_shape_names[:2],\n \"Cloglog\": None,\n \"Scobit\": self.fake_shape_names,\n \"Uneven\": self.fake_shape_names,\n \"Nested Logit\": None,\n \"Mixed Logit\": None}\n\n # Create a shape reference position dictionary to relate the various\n # models to their shape reference positions.\n shape_ref_dict = {}\n for key in shape_name_dict:\n shape_ref_dict[key] = (None if key != \"Asym\" else\n self.fake_shape_ref_pos)\n\n # Create an intercept_names and intercept_ref_position dictionary to\n # relate the various models to their respective kwargs.\n intercept_names_dict = {}\n intercept_ref_dict = {}\n for key in shape_name_dict:\n if key in [\"MNL\", \"Nested Logit\", \"Mixed Logit\"]:\n intercept_names_dict[key] = None\n intercept_ref_dict[key] = None\n else:\n intercept_names_dict[key] = self.fake_intercept_names\n intercept_ref_dict[key] = self.fake_intercept_ref_pos\n\n # Create a nest_names dictionary to relate the various models to their\n # nest_name attributes\n nest_name_dict = {}\n nest_spec_dict = {}\n for key in shape_name_dict:\n if key != \"Nested Logit\":\n nest_name_dict[key] = None\n nest_spec_dict[key] = None\n else:\n nest_name_dict[key] = list(self.fake_nest_spec.keys())\n nest_spec_dict[key] = self.fake_nest_spec\n\n # Create dictionaries for the mixing_id_col, mixing_vars, and\n # mixing_pos attributes\n mixing_id_col_dict = {}\n mixing_vars_dict = {}\n mixing_pos_dict = {}\n\n for key in shape_name_dict:\n if key != \"Mixed Logit\":\n mixing_id_col_dict[key] = None\n mixing_vars_dict[key] = None\n mixing_pos_dict[key] = None\n else:\n mixing_id_col_dict[key] = self.obs_id_col\n mixing_vars_dict[key] = self.fake_names[\"x\"]\n mixing_pos_dict[key] = [0]\n\n # Record the attribute / value pairs that vary across models\n varying_attr_value_dict = {\"model_type\": model_type_to_display_name,\n \"intercept_names\": intercept_names_dict,\n \"intercept_ref_position\":\n intercept_ref_dict,\n \"shape_names\": shape_name_dict,\n \"shape_ref_position\": shape_ref_dict,\n \"nest_names\": nest_name_dict,\n \"nest_spec\": nest_spec_dict,\n \"mixing_id_col\": mixing_id_col_dict,\n \"mixing_vars\": mixing_vars_dict,\n \"mixing_pos\": mixing_pos_dict}\n\n # Set up the keyword arguments that are needed for each of the model\n # types\n variable_kwargs = {}\n for model_name in all_model_types:\n variable_kwargs[model_name] = {}\n variable_kwargs[model_name][\"intercept_names\"] =\\\n intercept_names_dict[model_name]\n variable_kwargs[model_name][\"intercept_ref_pos\"] =\\\n intercept_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_ref_pos\"] =\\\n shape_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_names\"] =\\\n shape_name_dict[model_name]\n variable_kwargs[model_name][\"nest_spec\"] =\\\n nest_spec_dict[model_name]\n variable_kwargs[model_name][\"mixing_id_col\"] =\\\n mixing_id_col_dict[model_name]\n variable_kwargs[model_name][\"mixing_vars\"] =\\\n mixing_vars_dict[model_name]\n\n # Execute the test for each model type\n for model_name in all_model_types:\n # Update the model type in the list of constructor args\n self.constructor_args[-1] = model_name\n\n # Use this specific model's keyword arguments\n self.constructor_kwargs.update(variable_kwargs[model_name])\n\n # Construct the model object\n model_obj = pylogit.create_choice_model(*self.constructor_args,\n **self.constructor_kwargs)\n\n # Make sure that the constructor has all of the required attributes\n for attr in common_attr_value_dict:\n value = common_attr_value_dict[attr]\n if isinstance(value, pd.DataFrame):\n self.assertTrue(value.equals(model_obj.data))\n elif isinstance(value, np.ndarray):\n npt.assert_allclose(value,\n model_obj.__getattribute__(attr))\n else:\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n for attr in varying_attr_value_dict:\n value = varying_attr_value_dict[attr][model_name]\n\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n return None", "def build(self):\n # Clean all fields.\n self._clean_fields()\n\n # Build", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['transform_fn'] = self.transform_or_spec\n if self._also_track_spec is not None:\n specs['also_track'] = self._also_track_spec\n return specs", "def build(self) -> Optional[Bundle]:\n # Prepare STIX2 bundle objects with author.\n bundle_objects = [self.author]\n\n # Add object marking definitions to bundle.\n bundle_objects.extend(self.object_markings)\n\n # Create intrusion sets and add to bundle.\n intrusion_sets = self._create_intrusion_sets()\n bundle_objects.extend(intrusion_sets)\n\n # Create sectors and add to bundle.\n sectors = self._create_sectors()\n bundle_objects.extend(sectors)\n\n # Intrusion sets target sectors and add to bundle.\n intrusion_sets_target_sectors = self._create_targets_relationships(\n intrusion_sets, sectors\n )\n bundle_objects.extend(intrusion_sets_target_sectors)\n\n # Create locations and add to bundle.\n locations = self._create_locations()\n bundle_objects.extend(locations)\n\n # Intrusion sets target locations and add to bundle.\n intrusion_sets_target_locations = self._create_targets_relationships(\n intrusion_sets, locations\n )\n bundle_objects.extend(intrusion_sets_target_locations)\n\n # Create observations.\n observations = self._create_ioc_observations()\n\n # Get observables and add to bundle.\n observables = [o.observable for o in observations if o.observable is not None]\n bundle_objects.extend(observables)\n\n # Get indicators, create YARA indicators and to bundle.\n indicators = [o.indicator for o in observations if o.indicator is not None]\n indicators.extend(self._create_yara_indicators())\n bundle_objects.extend(indicators)\n\n # Get observation relationships and add to bundle.\n indicators_based_on_observables = [\n o.relationship for o in observations if o.relationship is not None\n ]\n bundle_objects.extend(indicators_based_on_observables)\n\n # Indicator indicates entities, add to bundle.\n indicator_indicates = intrusion_sets\n\n indicator_indicates_entities = self._create_indicates_relationships(\n indicators, indicator_indicates\n )\n bundle_objects.extend(indicator_indicates_entities)\n\n # Create object references for the report.\n object_refs = create_object_refs(\n intrusion_sets,\n sectors,\n intrusion_sets_target_sectors,\n locations,\n intrusion_sets_target_locations,\n observables,\n indicators,\n indicators_based_on_observables,\n indicator_indicates_entities,\n )\n\n # TODO: Ignore reports without any references or not?\n # Hack, the report must have at least on object reference.\n if not object_refs:\n dummy_object = self._create_dummy_object()\n\n bundle_objects.append(dummy_object)\n object_refs.append(dummy_object)\n\n # Create report and add to bundle.\n report = self._create_report(object_refs)\n bundle_objects.append(report)\n\n # XXX: Without allow_custom=True the observable with the custom property\n # will cause an unexpected property (x_opencti_score) error.\n return Bundle(objects=bundle_objects, allow_custom=True)", "def __init__(self):\n self.swagger_types = {\n 'owner_id': 'str',\n 'created_at': 'datetime',\n 'identifier': 'str',\n 'identifier_type': 'str',\n 'default_language': 'str',\n 'optional_identifier': 'str',\n 'id': 'str',\n 'v': 'float',\n 'id': 'str',\n 'case_records': 'list[str]'\n }\n\n self.attribute_map = {\n 'owner_id': '_ownerId',\n 'created_at': '_createdAt',\n 'identifier': 'identifier',\n 'identifier_type': 'identifierType',\n 'default_language': 'defaultLanguage',\n 'optional_identifier': 'optionalIdentifier',\n 'id': '_id',\n 'v': '__v',\n 'case_records': 'caseRecords'\n }\n\n self._owner_id = None\n self._created_at = None\n self._identifier = None\n self._identifier_type = None\n self._default_language = None\n self._optional_identifier = None\n self._id = None\n self._v = None\n self._id = None\n self._case_records = None", "def test_factory_methods(self):\n\n po = ProjectObject.gen_bounding_box_object(id=\"1\", bounds=self.bounds)\n self.assertEqual(po.project_type, \"bounding_box\")\n self.assertAlmostEqual(po.bounds, self.bounds)\n self.assertEqual(po.id, \"1\")\n\n po = ProjectObject.gen_voxels_object(id=\"2\", voxels=self.voxels)\n self.assertEqual(po.project_type, \"voxels\")\n self.assertAlmostEqual(po.voxels.bounds(), self.voxels.bounds())\n self.assertEqual(po.id, \"2\")\n\n po = ProjectObject.gen_meshes_object(id=\"3\", meshes=self.meshes)\n self.assertEqual(po.project_type, \"meshes\")\n self.assertEqual(\n po.meshes.num_primitive_meshes(), self.meshes.num_primitive_meshes()\n )\n self.assertEqual(po.id, \"3\")", "def _get_object_properties(self):\n # Parse element tree to get all relevant bodies, joints, actuators, and geom groups\n _elements = sort_elements(root=self.get_obj())\n # print(ET.tostring(self.get_obj(), encoding='unicode', method='xml'))\n assert len(_elements[\"root_body\"]) == 1, \"Invalid number of root bodies found for robot model. Expected 1,\" \\\n \"got {}\".format(len(_elements[\"root_body\"]))\n _elements[\"root_body\"] = _elements[\"root_body\"][0]\n _elements[\"bodies\"] = [_elements[\"root_body\"]] + _elements[\"bodies\"] if \"bodies\" in _elements else \\\n [_elements[\"root_body\"]]\n self._root_body = _elements[\"root_body\"].get(\"name\")\n self._bodies = [e.get(\"name\") for e in _elements.get(\"bodies\", [])]\n self._joints = [e.get(\"name\") for e in _elements.get(\"joints\", [])]\n self._actuators = [e.get(\"name\") for e in _elements.get(\"actuators\", [])]\n self._sites = [e.get(\"name\") for e in _elements.get(\"sites\", [])]\n self._sensors = [e.get(\"name\") for e in _elements.get(\"sensors\", [])]\n composite_obj = _elements[\"root_body\"].find(\"./body/composite\")\n if composite_obj is not None:\n self._count = np.fromstring(composite_obj.get(\"count\"), dtype=int, sep=' ')\n self._composite_type = composite_obj.get(\"type\")\n self._spacing = float(composite_obj.get(\"spacing\"))\n assert len(self._count) == 3, \"the length of count must be 3, got: {} instead.\".format(len(self._count))\n dim = 3 - np.sum(self._count==1)\n self._composite_shape = [self._spacing * (self._count[i] - 1) for i in range(dim)]\n if dim == 1:\n self._contact_geoms = [f'G{i}' for i in range(self._count[0])] \n elif dim == 2: \n self._contact_geoms = [f'G{i}_{j}' for j in range(self._count[1])\n for i in range(self._count[0])]\n elif dim == 3:\n self._contact_geoms = [f'G{i}_{j}_{k}' for k in range(self._count[2])\n for j in range(self._count[1])\n for i in range(self._count[0])]\n else:\n self._contact_geoms = [e.get(\"name\") for e in _elements.get(\"contact_geoms\", [])]\n self._visual_geoms = [e.get(\"name\") for e in _elements.get(\"visual_geoms\", [])]\n\n # Add default materials if we're using domain randomization\n if macros.USING_INSTANCE_RANDOMIZATION:\n tex_element, mat_element, _, used = add_material(root=self.get_obj(), naming_prefix=self.naming_prefix)\n # Only add the material / texture if they were actually used\n if used:\n self.asset.append(tex_element)\n self.asset.append(mat_element)\n\n # Add prefix to all elements\n add_prefix(root=self.get_obj(), prefix=self.naming_prefix, exclude=self.exclude_from_prefixing)", "def generate_object_spec(shape_choices, *, textures=None, colors=None,\n fill_is=None, class_is=None, random_state=None):\n assert textures is None or colors is None\n assert not (textures is None and colors is None)\n assert fill_is in ['shape', 'random']\n assert class_is in ['shape', 'fill']\n assert random_state is not None\n\n shape_generator = random_state.choice(shape_choices)\n shape_name = get_shape_name(shape_generator)\n shape_class = SHAPE_CLASSES[shape_name]\n\n if textures is not None:\n assert len(textures) > 0\n fills = textures\n elif colors is not None:\n assert len(colors) > 0\n fills = colors\n\n if fill_is == 'shape':\n # The color of a shape is intrinsic to the shape, so the color\n # should be chosen from the provided colors using the class\n # index of the shape. (Decrement shape class by 1 to adjust\n # for the background class being class 0.)\n fill = fills[shape_class-1]\n\n # Because the mapping between shape and color is fixed, saying\n # that class of the object is the class of the shape is the\n # same as saying that the class of the object is the class of\n # the color.\n object_class = shape_class\n elif fill_is == 'random':\n # The color of a shape is accidental, so choose it randomly.\n fill_class = random_state.choice(len(fills))\n fill = fills[fill_class]\n\n # When the mapping between shape and color is random, we need\n # to know which attribute of the object determines the class.\n if class_is == 'shape':\n object_class = shape_class\n elif class_is == 'fill':\n # Add 1 to the color class because 0 is the background class.\n object_class = fill_class + 1\n\n spec = ObjectSpec(\n generator=shape_generator, shape=shape_name,\n fill=fill, class_idx=object_class)\n\n return spec", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'uses_git': 'bool',\n 'git_remote_url': 'str',\n 'git_username': 'str',\n 'git_password': 'str',\n 'git_username_user_attribute': 'str',\n 'git_password_user_attribute': 'str',\n 'git_service_name': 'str',\n 'deploy_secret': 'str',\n 'unset_deploy_secret': 'bool',\n 'pull_request_mode': 'str',\n 'validation_required': 'bool',\n 'allow_warnings': 'bool',\n 'is_example': 'bool',\n 'can': 'dict(str, bool)'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'uses_git': 'uses_git',\n 'git_remote_url': 'git_remote_url',\n 'git_username': 'git_username',\n 'git_password': 'git_password',\n 'git_username_user_attribute': 'git_username_user_attribute',\n 'git_password_user_attribute': 'git_password_user_attribute',\n 'git_service_name': 'git_service_name',\n 'deploy_secret': 'deploy_secret',\n 'unset_deploy_secret': 'unset_deploy_secret',\n 'pull_request_mode': 'pull_request_mode',\n 'validation_required': 'validation_required',\n 'allow_warnings': 'allow_warnings',\n 'is_example': 'is_example',\n 'can': 'can'\n }\n\n self._id = None\n self._name = None\n self._uses_git = None\n self._git_remote_url = None\n self._git_username = None\n self._git_password = None\n self._git_username_user_attribute = None\n self._git_password_user_attribute = None\n self._git_service_name = None\n self._deploy_secret = None\n self._unset_deploy_secret = None\n self._pull_request_mode = None\n self._validation_required = None\n self._allow_warnings = None\n self._is_example = None\n self._can = None", "def to_spec(self) -> dict[str, typing.Any]:\n spec = {\n \"name\": self.name,\n \"title\": self.title,\n \"comment\": self.comment,\n \"references\": self.references,\n \"institution\": self.institution,\n \"hierarchical\": self.hierarchical,\n \"last_update\": self.last_update.isoformat(),\n }\n if self.version is not None:\n spec[\"version\"] = self.version\n categories = {}\n for cat in self.values():\n code, cat_spec = cat.to_spec()\n categories[code] = cat_spec\n spec[\"categories\"] = categories\n\n return spec", "def __init__(self, goal):\n self._name = goal.get('name', '')\n self._description = goal.get('description', '')\n self._build_type = goal.get('buildType', 'minSizeRel')\n self._build_vars = goal.get('buildVars', {})\n self._build_goal = goal.get('buildGoal', self._name)\n self._artifacts = goal.get('artifacts', [])\n self._builds = {}\n for b in goal['builds']:\n vars = b.get('buildVars', self._build_vars)\n type = b.get('buildType', self._build_type)\n build_goal = b.get('buildGoal', self._build_goal)\n description = b.get('description', '')\n arch = b['arch']\n script = b.get('script', None)\n artifacts = b.get('artifacts', self._artifacts)\n self._builds[arch] = BuildSpec(goal=build_goal,\n type=type,\n vars=vars,\n description=description,\n arch=arch,\n script=script,\n artifacts=artifacts)", "def build_model(self):\n pass", "def build_model(self):\n pass", "def make_objects(self):\n pass", "def _build_study_spec(study_spec: study_pb2.StudySpec, state: int,\n creation_time: datetime.datetime) -> study_pb2.StudySpec:\n study_spec.state = state\n study_spec.creation_time.FromDatetime(creation_time)\n return study_spec", "def build(c):", "def __init__(self, jsondict=None, strict=True):\n \n self.systemType = None\n \"\"\" The standard that is used to operate and communicate.\n Type `str`. \"\"\"\n \n self.version = None\n \"\"\" The version of the standard that is used to operate and communicate.\n Type `str`. \"\"\"\n \n super(DeviceDefinitionSpecialization, self).__init__(jsondict=jsondict, strict=strict)", "def build_model(self):\n raise NotImplementedError", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'version': 'str',\n 'tagline': 'str',\n 'keywords': 'str',\n 'short_description': 'str',\n 'usage_information': 'str',\n 'long_description': 'str',\n 'license_model_description': 'str',\n 'system_requirements': 'str',\n 'time_released': 'datetime',\n 'release_notes': 'str',\n 'categories': 'list[str]',\n 'publisher': 'Publisher',\n 'languages': 'list[Item]',\n 'screenshots': 'list[Screenshot]',\n 'videos': 'list[NamedLink]',\n 'support_contacts': 'list[SupportContact]',\n 'support_links': 'list[NamedLink]',\n 'documentation_links': 'list[DocumentationLink]',\n 'icon': 'UploadData',\n 'banner': 'UploadData',\n 'regions': 'list[Region]',\n 'package_type': 'str',\n 'default_package_version': 'str',\n 'links': 'list[Link]',\n 'is_featured': 'bool'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'version': 'version',\n 'tagline': 'tagline',\n 'keywords': 'keywords',\n 'short_description': 'shortDescription',\n 'usage_information': 'usageInformation',\n 'long_description': 'longDescription',\n 'license_model_description': 'licenseModelDescription',\n 'system_requirements': 'systemRequirements',\n 'time_released': 'timeReleased',\n 'release_notes': 'releaseNotes',\n 'categories': 'categories',\n 'publisher': 'publisher',\n 'languages': 'languages',\n 'screenshots': 'screenshots',\n 'videos': 'videos',\n 'support_contacts': 'supportContacts',\n 'support_links': 'supportLinks',\n 'documentation_links': 'documentationLinks',\n 'icon': 'icon',\n 'banner': 'banner',\n 'regions': 'regions',\n 'package_type': 'packageType',\n 'default_package_version': 'defaultPackageVersion',\n 'links': 'links',\n 'is_featured': 'isFeatured'\n }\n\n self._id = None\n self._name = None\n self._version = None\n self._tagline = None\n self._keywords = None\n self._short_description = None\n self._usage_information = None\n self._long_description = None\n self._license_model_description = None\n self._system_requirements = None\n self._time_released = None\n self._release_notes = None\n self._categories = None\n self._publisher = None\n self._languages = None\n self._screenshots = None\n self._videos = None\n self._support_contacts = None\n self._support_links = None\n self._documentation_links = None\n self._icon = None\n self._banner = None\n self._regions = None\n self._package_type = None\n self._default_package_version = None\n self._links = None\n self._is_featured = None", "def get_openapi_spec(self):\n\n spec = {\"operationId\": snake_to_camel(self._wrapped_function.__name__), \"responses\": {}}\n\n if self._doc.short_description:\n spec[\"summary\"] = self._doc.short_description\n\n if self._doc.long_description:\n spec[\"description\"] = self._doc.long_description\n\n if self._tags:\n spec[\"tags\"] = self._tags\n\n if self._path_parameters or self._query_parameters:\n spec[\"parameters\"] = []\n\n for name, param_type in self._path_parameters.items():\n if self._is_param_ignored(name):\n continue\n\n param_spec = {\n \"name\": name,\n \"in\": \"path\",\n \"required\": True,\n \"schema\": {\"type\": self._extension.PARAMETER_TYPE_MAP.get(param_type, \"string\")},\n }\n\n param_doc = self._get_param_doc(name)\n if param_doc is not None:\n param_spec[\"description\"] = param_doc.description\n\n spec[\"parameters\"].append(param_spec)\n\n for name, param_type in self._query_parameters.items():\n param_refl: inspect.Parameter = self._signature.parameters[name]\n param_spec = {\n \"name\": name,\n \"in\": \"query\",\n \"required\": param_refl.default == inspect.Parameter.empty,\n \"schema\": {\"type\": self._extension.PARAMETER_TYPE_MAP.get(param_type, \"string\")},\n }\n\n param_doc = self._get_param_doc(name)\n if param_doc is not None:\n param_spec[\"description\"] = param_doc.description\n\n spec[\"parameters\"].append(param_spec)\n\n if self._request_body_parameter:\n mimetypes = self._request_body_content_types\n\n spec[\"requestBody\"] = {\n \"content\": {\n mimetype: {\"schema\": self._process_model_schema(self._request_body_class)} for mimetype in mimetypes\n },\n \"required\": True,\n }\n\n if issubclass(self._request_body_class, ExamplesMixin):\n for mimetype in mimetypes:\n spec[\"requestBody\"][\"content\"][mimetype][\"examples\"] = model_examples_to_openapi_dict(\n self._request_body_class\n )\n\n param_doc = self._get_param_doc(self._request_body_parameter)\n if param_doc is not None and param_doc.description:\n spec[\"requestBody\"][\"description\"] = param_doc.description\n\n spec[\"x-codegen-request-body-name\"] = \"body\"\n elif self._request_body_file_type:\n spec[\"requestBody\"] = {\n \"content\": {self._request_body_file_type: {\"schema\": {\"type\": \"string\", \"format\": \"binary\"}}}\n }\n\n if self._security:\n spec[\"security\"] = self._security\n\n for response_class, codes in self._responses.items():\n for code, response_data in codes.items():\n if issubclass(response_class, FileResponse):\n mime = response_data.mimetype or \"application/octet-stream\"\n spec[\"responses\"][str(code)] = {\n \"description\": response_data.description or response_class.__name__,\n \"content\": {mime: {\"schema\": {\"type\": \"string\", \"format\": \"binary\"}}},\n }\n else:\n spec[\"responses\"][str(code)] = {\n \"description\": response_data.description or response_class.__name__,\n \"content\": {\"application/json\": {\"schema\": self._process_model_schema(response_class)}},\n }\n\n if issubclass(response_class, ExamplesMixin):\n # fmt: off\n spec[\"responses\"][str(code)][\"content\"][\"application/json\"][\"examples\"] = \\\n model_examples_to_openapi_dict(response_class)\n # fmt: on\n\n return spec", "def __init__(self, jsondict=None, strict=True):\n \n self.capability = None\n \"\"\" Device capabilities.\n List of `DeviceDefinitionCapability` items (represented as `dict` in JSON). \"\"\"\n \n self.contact = None\n \"\"\" Details for human/organization for support.\n List of `ContactPoint` items (represented as `dict` in JSON). \"\"\"\n \n self.deviceName = None\n \"\"\" A name given to the device to identify it.\n List of `DeviceDefinitionDeviceName` items (represented as `dict` in JSON). \"\"\"\n \n self.identifier = None\n \"\"\" Instance identifier.\n List of `Identifier` items (represented as `dict` in JSON). \"\"\"\n \n self.languageCode = None\n \"\"\" Language code for the human-readable text strings produced by the\n device (all supported).\n List of `CodeableConcept` items (represented as `dict` in JSON). \"\"\"\n \n self.manufacturerReference = None\n \"\"\" Name of device manufacturer.\n Type `FHIRReference` (represented as `dict` in JSON). \"\"\"\n \n self.manufacturerString = None\n \"\"\" Name of device manufacturer.\n Type `str`. \"\"\"\n \n self.material = None\n \"\"\" A substance used to create the material(s) of which the device is\n made.\n List of `DeviceDefinitionMaterial` items (represented as `dict` in JSON). \"\"\"\n \n self.modelNumber = None\n \"\"\" The model number for the device.\n Type `str`. \"\"\"\n \n self.note = None\n \"\"\" Device notes and comments.\n List of `Annotation` items (represented as `dict` in JSON). \"\"\"\n \n self.onlineInformation = None\n \"\"\" Access to on-line information.\n Type `str`. \"\"\"\n \n self.owner = None\n \"\"\" Organization responsible for device.\n Type `FHIRReference` (represented as `dict` in JSON). \"\"\"\n \n self.parentDevice = None\n \"\"\" The parent device it can be part of.\n Type `FHIRReference` (represented as `dict` in JSON). \"\"\"\n \n self.physicalCharacteristics = None\n \"\"\" Dimensions, color etc..\n Type `ProdCharacteristic` (represented as `dict` in JSON). \"\"\"\n \n self.property = None\n \"\"\" The actual configuration settings of a device as it actually\n operates, e.g., regulation status, time properties.\n List of `DeviceDefinitionProperty` items (represented as `dict` in JSON). \"\"\"\n \n self.quantity = None\n \"\"\" The quantity of the device present in the packaging (e.g. the\n number of devices present in a pack, or the number of devices in\n the same package of the medicinal product).\n Type `Quantity` (represented as `dict` in JSON). \"\"\"\n \n self.safety = None\n \"\"\" Safety characteristics of the device.\n List of `CodeableConcept` items (represented as `dict` in JSON). \"\"\"\n \n self.shelfLifeStorage = None\n \"\"\" Shelf Life and storage information.\n List of `ProductShelfLife` items (represented as `dict` in JSON). \"\"\"\n \n self.specialization = None\n \"\"\" The capabilities supported on a device, the standards to which the\n device conforms for a particular purpose, and used for the\n communication.\n List of `DeviceDefinitionSpecialization` items (represented as `dict` in JSON). \"\"\"\n \n self.type = None\n \"\"\" What kind of device or device system this is.\n Type `CodeableConcept` (represented as `dict` in JSON). \"\"\"\n \n self.udiDeviceIdentifier = None\n \"\"\" Unique Device Identifier (UDI) Barcode string.\n List of `DeviceDefinitionUdiDeviceIdentifier` items (represented as `dict` in JSON). \"\"\"\n \n self.url = None\n \"\"\" Network address to contact device.\n Type `str`. \"\"\"\n \n self.version = None\n \"\"\" Available versions.\n List of `str` items. \"\"\"\n \n super(DeviceDefinition, self).__init__(jsondict=jsondict, strict=strict)", "def _build(specs_dict: dict, **kwargs: bool):\n return [\n Card(face, suit, value=specs_dict.get(face).get(suit), **kwargs)\n for face in specs_dict.keys()\n for suit in specs_dict.get(face).keys()\n ]", "def build(self):\n self.build_inputs()\n self.build_image_embeddings()\n self.build_seq_embeddings()\n self.build_encoder()\n self.build_prediction_model()\n self.setup_encoder_initializer()\n self.setup_global_step()\n self.list_trainable_variables()", "def make_pod_spec(self):\n spec = {\n 'containers': [{\n 'name': self.framework.model.app.name,\n 'imageDetails': {\n },\n 'ports': [{\n 'containerPort':\n self.framework.model.config['advertised-port'],\n 'protocol': 'TCP',\n }],\n }],\n }\n return spec", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'identifier': 'int',\n 'success': 'bool',\n 'description': 'str',\n 'duration': 'float',\n 'bag_name': 'str',\n 'bag_store_name': 'str',\n 'results': 'object',\n 'bag': 'BagSummary'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'identifier': 'identifier',\n 'success': 'success',\n 'description': 'description',\n 'duration': 'duration',\n 'bag_name': 'bag_name',\n 'bag_store_name': 'bag_store_name',\n 'results': 'results',\n 'bag': 'bag'\n }\n\n self._detail_type = None\n self._identifier = None\n self._success = None\n self._description = None\n self._duration = None\n self._bag_name = None\n self._bag_store_name = None\n self._results = None\n self._bag = None", "def _instantiate_image_obj(name_template, obj_template, fuzzer, benchmark):\n name = _substitute(name_template, fuzzer, benchmark)\n obj = obj_template.copy()\n for key in obj:\n if key in ('build_arg', 'depends_on'):\n obj[key] = [\n _substitute(item, fuzzer, benchmark) for item in obj[key]\n ]\n else:\n obj[key] = _substitute(obj[key], fuzzer, benchmark)\n return name, obj", "def __init__(self):\n self.swagger_types = {\n 'annotations': 'dict(str, str)',\n 'end_time': 'int',\n 'hosts': 'list[str]',\n 'is_ephemeral': 'bool',\n 'is_user_event': 'bool',\n 'name': 'str',\n 'start_time': 'int',\n 'summarized_events': 'int',\n 'table': 'str',\n 'tags': 'list[str]'\n }\n\n self.attribute_map = {\n 'annotations': 'annotations',\n 'end_time': 'endTime',\n 'hosts': 'hosts',\n 'is_ephemeral': 'isEphemeral',\n 'is_user_event': 'isUserEvent',\n 'name': 'name',\n 'start_time': 'startTime',\n 'summarized_events': 'summarizedEvents',\n 'table': 'table',\n 'tags': 'tags'\n }\n\n self._annotations = None\n self._end_time = None\n self._hosts = None\n self._is_ephemeral = False\n self._is_user_event = False\n self._name = None\n self._start_time = None\n self._summarized_events = None\n self._table = None\n self._tags = None", "def create(self, spec, force_cache=False, image_dir=\"~/.hyperkit\"):", "def build_all(self):\n self.android_build()\n self.generate_patch_build('')\n self.generate_specs_build()\n self.generate_interfaces()", "def build(cls, so_tree, raw):\n return cls(\n model=raw[\"model\"],\n model_rna_type=RnaType.from_so_id(so_tree, raw[\"model_rna_type\"]),\n model_domain=raw[\"model_domain\"],\n model_name=raw[\"model_name\"],\n model_long_name=raw[\"model_long_name\"],\n sequence_info=HitComponent(\n completeness=raw[\"sequence_completeness\"],\n start=raw[\"sequence_start\"],\n stop=raw[\"sequence_stop\"],\n ),\n model_info=HitComponent(\n completeness=raw[\"model_completeness\"],\n start=raw[\"model_start\"],\n stop=raw[\"model_stop\"],\n ),\n )", "def create(self, validated_data):\n new_spec = Specification(key = validated_data.get('key'),\n value = validated_data.get('value'),\n category = validated_data.get('category'),\n car = validated_data.get('car'),)\n new_spec.save()\n\n return new_spec", "def __init__(self, mean_radius=None, cluster=None, orbital_period=None, surface_area=None, orbital_eccentricity=None, mass=None, description=None, type=None, max_absolute_magnitude=None, mean_temperature=None, constellation=None, von_klitzing_constant=None, maximum_temperature=None, temperature=None, definition=None, id=None, periapsis=None, absolute_magnitude=None, density=None, notable_features=None, average_speed=None, label=None, apoapsis=None, volume=None, messier_name=None, max_apparent_magnitude=None, explorer=None, minimum_temperature=None, ngc_name=None): # noqa: E501\n\n\n self.openapi_types = {\n 'mean_radius': List[object],\n 'cluster': List[object],\n 'orbital_period': List[object],\n 'surface_area': List[object],\n 'orbital_eccentricity': List[float],\n 'mass': List[object],\n 'description': List[str],\n 'type': List[str],\n 'max_absolute_magnitude': List[float],\n 'mean_temperature': List[object],\n 'constellation': List[object],\n 'von_klitzing_constant': List[float],\n 'maximum_temperature': List[object],\n 'temperature': List[object],\n 'definition': List[str],\n 'id': str,\n 'periapsis': List[object],\n 'absolute_magnitude': List[float],\n 'density': List[object],\n 'notable_features': List[str],\n 'average_speed': List[object],\n 'label': List[str],\n 'apoapsis': List[object],\n 'volume': List[object],\n 'messier_name': List[str],\n 'max_apparent_magnitude': List[float],\n 'explorer': List[object],\n 'minimum_temperature': List[object],\n 'ngc_name': List[str]\n }\n\n self.attribute_map = {\n 'mean_radius': 'meanRadius',\n 'cluster': 'cluster',\n 'orbital_period': 'orbitalPeriod',\n 'surface_area': 'surfaceArea',\n 'orbital_eccentricity': 'orbitalEccentricity',\n 'mass': 'mass',\n 'description': 'description',\n 'type': 'type',\n 'max_absolute_magnitude': 'maxAbsoluteMagnitude',\n 'mean_temperature': 'meanTemperature',\n 'constellation': 'constellation',\n 'von_klitzing_constant': 'vonKlitzingConstant',\n 'maximum_temperature': 'maximumTemperature',\n 'temperature': 'temperature',\n 'definition': 'definition',\n 'id': 'id',\n 'periapsis': 'periapsis',\n 'absolute_magnitude': 'absoluteMagnitude',\n 'density': 'density',\n 'notable_features': 'notableFeatures',\n 'average_speed': 'averageSpeed',\n 'label': 'label',\n 'apoapsis': 'apoapsis',\n 'volume': 'volume',\n 'messier_name': 'messierName',\n 'max_apparent_magnitude': 'maxApparentMagnitude',\n 'explorer': 'explorer',\n 'minimum_temperature': 'minimumTemperature',\n 'ngc_name': 'ngcName'\n }\n\n self._mean_radius = mean_radius\n self._cluster = cluster\n self._orbital_period = orbital_period\n self._surface_area = surface_area\n self._orbital_eccentricity = orbital_eccentricity\n self._mass = mass\n self._description = description\n self._type = type\n self._max_absolute_magnitude = max_absolute_magnitude\n self._mean_temperature = mean_temperature\n self._constellation = constellation\n self._von_klitzing_constant = von_klitzing_constant\n self._maximum_temperature = maximum_temperature\n self._temperature = temperature\n self._definition = definition\n self._id = id\n self._periapsis = periapsis\n self._absolute_magnitude = absolute_magnitude\n self._density = density\n self._notable_features = notable_features\n self._average_speed = average_speed\n self._label = label\n self._apoapsis = apoapsis\n self._volume = volume\n self._messier_name = messier_name\n self._max_apparent_magnitude = max_apparent_magnitude\n self._explorer = explorer\n self._minimum_temperature = minimum_temperature\n self._ngc_name = ngc_name", "def __init__(self):\n self.swagger_types = {\n 'is_waiting': 'bool',\n 'is_active': 'bool',\n 'is_acd': 'bool',\n 'is_preferred': 'bool',\n 'is_screenshare': 'bool',\n 'is_cobrowse': 'bool',\n 'is_voicemail': 'bool',\n 'is_flagged': 'bool',\n 'is_monitored': 'bool',\n 'filter_wrap_up_notes': 'bool',\n 'match_all': 'bool'\n }\n\n self.attribute_map = {\n 'is_waiting': 'isWaiting',\n 'is_active': 'isActive',\n 'is_acd': 'isAcd',\n 'is_preferred': 'isPreferred',\n 'is_screenshare': 'isScreenshare',\n 'is_cobrowse': 'isCobrowse',\n 'is_voicemail': 'isVoicemail',\n 'is_flagged': 'isFlagged',\n 'is_monitored': 'isMonitored',\n 'filter_wrap_up_notes': 'filterWrapUpNotes',\n 'match_all': 'matchAll'\n }\n\n self._is_waiting = None\n self._is_active = None\n self._is_acd = None\n self._is_preferred = None\n self._is_screenshare = None\n self._is_cobrowse = None\n self._is_voicemail = None\n self._is_flagged = None\n self._is_monitored = None\n self._filter_wrap_up_notes = None\n self._match_all = None", "def _build_model(self):\n raise NotImplementedError()", "def __init__(self):\n self.drones = ZergUnit(UnitTypeId.DRONE, to_count=0)\n self.lings = ZergUnit(UnitTypeId.ZERGLING, to_count=999)\n self.queens = ZergUnit(UnitTypeId.QUEEN, to_count=3)\n self.roaches = ZergUnit(UnitTypeId.ROACH, to_count=100, priority=True)\n self.ravagers = ZergUnit(UnitTypeId.RAVAGER, to_count=0)\n self.defense_spines = DefensiveBuilding(\n unit_type=UnitTypeId.SPINECRAWLER, position_type=DefensePosition.Entrance, to_base_index=1, to_count=3\n )\n self.gas = StepBuildGas(to_count=3)\n\n unit_building = BuildOrder(\n [\n Step(None, self.drones, skip_until=self.should_build_drones),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.defense_spines),\n Step(\n RequiredAll([UnitExists(UnitTypeId.ROACHWARREN), UnitExists(UnitTypeId.ROACH)]),\n self.ravagers,\n skip_until=self.should_build_ravagers,\n ),\n Step(UnitExists(UnitTypeId.ROACHWARREN), self.roaches),\n Step(\n RequiredAll(\n [\n UnitExists(UnitTypeId.SPAWNINGPOOL),\n UnitExists(\n UnitTypeId.ROACHWARREN,\n include_pending=True,\n include_not_ready=True,\n include_killed=True,\n ),\n ]\n ),\n self.lings,\n ),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.queens),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.lings),\n ]\n )\n\n buildings: BuildOrder = BuildOrder(\n [\n Step(None, ActBuilding(UnitTypeId.SPAWNINGPOOL, to_count=1)),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), ActBuilding(UnitTypeId.ROACHWARREN, to_count=1)),\n Step(None, self.gas, skip_until=self.should_build_gas),\n ]\n )\n\n super().__init__(buildings, unit_building)", "def build(self):\n self._remove_swarm_keys()\n self._remove_pod_keys()\n self._set_image()\n self._translate_docker_properties()", "def __init__(self, pathspec, properties={}):\n import numpy\n self.pathspec = pathspec\n super(ArraySpec,self).__init__(numpy.ndarray)\n self.properties = OrderedDict(properties)", "def build_model(self) -> DM:\n model = DM()\n model[self.modelroot] = content = DM()\n\n content['key'] = self.key\n content['id'] = self.id\n content['system-family'] = self.family\n for cp in self.parameters:\n content.append('calculation-parameter', DM(cp))\n\n self._set_model(model)\n return model", "def __init__(self, options, build_revision):\n\n self.options = options\n self._src_dir = os.path.abspath(options.src_dir)\n self._chrome_dir = os.path.join(self._src_dir, 'chrome')\n # TODO: This scode should not be grabbing so deeply into WebKit.\n # Worse, this code ends up looking at top-of-tree WebKit\n # instead of the revision in DEPS.\n self._webkit_dir = os.path.join(self._src_dir, 'third_party', 'WebKit',\n 'Source', 'WebCore')\n self._v8_dir = os.path.join(self._src_dir, 'v8')\n # TODO: need to get the build *output* directory passed in instead so Linux\n # and Mac don't have to walk up a directory to get to the right directory.\n if chromium_utils.IsWindows():\n self._build_dir = os.path.join(options.build_dir, options.target)\n self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'win')\n elif chromium_utils.IsLinux():\n self._build_dir = os.path.join(os.path.dirname(options.build_dir),\n 'out', options.target)\n self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'linux')\n elif chromium_utils.IsMac():\n self._build_dir = os.path.join(os.path.dirname(options.build_dir),\n 'xcodebuild', options.target)\n self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'mac')\n else:\n raise NotImplementedError(\n 'Platform \"%s\" is not currently supported.' % sys.platform)\n self._staging_dir = slave_utils.GetStagingDir(self._src_dir)\n\n self._symbol_dir_base = options.dirs['symbol_dir_base']\n self._www_dir_base = options.dirs['www_dir_base']\n self._build_name = slave_utils.SlaveBuildName(self._src_dir)\n self._symbol_dir_base = os.path.join(self._symbol_dir_base,\n self._build_name)\n self._www_dir_base = os.path.join(self._www_dir_base, self._build_name)\n\n self._version_file = os.path.join(self._chrome_dir, 'VERSION')\n\n if options.default_chromium_revision:\n self._chromium_revision = options.default_chromium_revision\n else:\n self._chromium_revision = slave_utils.SubversionRevision(self._chrome_dir)\n if options.default_webkit_revision:\n self._webkit_revision = options.default_webkit_revision\n else:\n self._webkit_revision = slave_utils.SubversionRevision(self._webkit_dir)\n if options.default_v8_revision:\n self._v8_revision = options.default_v8_revision\n else:\n self._v8_revision = slave_utils.SubversionRevision(self._v8_dir)\n self.last_change_file = os.path.join(self._staging_dir, 'LAST_CHANGE')\n # The REVISIONS file will record the revisions information of the main\n # components Chromium/WebKit/V8.\n self.revisions_path = os.path.join(self._staging_dir, 'REVISIONS')\n self._build_revision = build_revision\n # Will be initialized in GetLastBuildRevision.\n self.last_chromium_revision = None\n self.last_webkit_revision = None\n self.last_v8_revision = None\n\n self._files_file = os.path.join(self._tool_dir,\n archive_utils.FILES_FILENAME)\n self._test_files = self.BuildOldFilesList(TEST_FILE_NAME)\n\n self._dual_upload = options.factory_properties.get('dual_upload', False)\n self._archive_files = None", "def __init__(self, name: str=None, categories: List[str]=None, nutrients: List[IngredientObjectNutrients]=None, calorie_conversion_factor: IngredientObjectCalorieConversionFactor=None, protein_conversion_factor: float=None, components: List[IngredientObjectComponents]=None, portions: List[IngredientObjectPortions]=None, common_name: str=None, footnote: str=None, search_term: str=None, score: str=None): # noqa: E501\n self.swagger_types = {\n 'name': str,\n 'categories': List[str],\n 'nutrients': List[IngredientObjectNutrients],\n 'calorie_conversion_factor': IngredientObjectCalorieConversionFactor,\n 'protein_conversion_factor': float,\n 'components': List[IngredientObjectComponents],\n 'portions': List[IngredientObjectPortions],\n 'common_name': str,\n 'footnote': str,\n 'search_term': str,\n 'score': str\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'categories': 'categories',\n 'nutrients': 'nutrients',\n 'calorie_conversion_factor': 'calorie_conversion_factor',\n 'protein_conversion_factor': 'protein_conversion_factor',\n 'components': 'components',\n 'portions': 'portions',\n 'common_name': 'common_name',\n 'footnote': 'footnote',\n 'search_term': 'search_term',\n 'score': 'score'\n }\n self._name = name\n self._categories = categories\n self._nutrients = nutrients\n self._calorie_conversion_factor = calorie_conversion_factor\n self._protein_conversion_factor = protein_conversion_factor\n self._components = components\n self._portions = portions\n self._common_name = common_name\n self._footnote = footnote\n self._search_term = search_term\n self._score = score", "def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()", "def build_model(self) -> DM:\n model = DM()\n model[self.modelroot] = content = DM()\n\n content['potential'] = DM()\n content['potential']['key'] = self.potential_key\n content['potential']['id'] = self.potential_id\n content['implementation'] = DM()\n content['implementation']['key'] = self.potential_LAMMPS_key\n content['implementation']['id'] = self.potential_LAMMPS_id\n\n for subset in self.subsets:\n subset.build_model(content)\n\n self._set_model(model)\n return model", "def build_document(self):\n pass", "def build(self, *args, **kwargs):\n return", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'date_created': 'datetime',\n 'date_modified': 'datetime',\n 'version': 'int',\n 'division': 'DomainEntityRef',\n 'campaign_status': 'str',\n 'callable_time_set': 'DomainEntityRef',\n 'contact_list': 'DomainEntityRef',\n 'dnc_lists': 'list[DomainEntityRef]',\n 'always_running': 'bool',\n 'contact_sorts': 'list[ContactSort]',\n 'messages_per_minute': 'int',\n 'errors': 'list[RestErrorDetail]',\n 'sms_config': 'SmsConfig',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'date_created': 'dateCreated',\n 'date_modified': 'dateModified',\n 'version': 'version',\n 'division': 'division',\n 'campaign_status': 'campaignStatus',\n 'callable_time_set': 'callableTimeSet',\n 'contact_list': 'contactList',\n 'dnc_lists': 'dncLists',\n 'always_running': 'alwaysRunning',\n 'contact_sorts': 'contactSorts',\n 'messages_per_minute': 'messagesPerMinute',\n 'errors': 'errors',\n 'sms_config': 'smsConfig',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._date_created = None\n self._date_modified = None\n self._version = None\n self._division = None\n self._campaign_status = None\n self._callable_time_set = None\n self._contact_list = None\n self._dnc_lists = None\n self._always_running = None\n self._contact_sorts = None\n self._messages_per_minute = None\n self._errors = None\n self._sms_config = None\n self._self_uri = None", "def __init__(self, commit_hash=None, distro_hash=None, extended_hash=None,\n aggregate_hash=None, promote_name=None, timestamp=None,\n user=None, repo_hash=None, repo_url=None, component=None):\n self.swagger_types = {\n 'commit_hash': 'str',\n 'distro_hash': 'str',\n 'extended_hash': 'str',\n 'aggregate_hash': 'str',\n 'promote_name': 'str',\n 'timestamp': 'int',\n 'user': 'str',\n 'repo_hash': 'str',\n 'repo_url': 'str',\n 'component': 'str',\n }\n\n self.attribute_map = {\n 'commit_hash': 'commit_hash',\n 'distro_hash': 'distro_hash',\n 'extended_hash': 'extended_hash',\n 'aggregate_hash': 'aggregate_hash',\n 'promote_name': 'promote_name',\n 'timestamp': 'timestamp',\n 'user': 'user',\n 'repo_hash': 'repo_hash',\n 'repo_url': 'repo_url',\n 'component': 'component',\n }\n\n self._commit_hash = commit_hash\n self._distro_hash = distro_hash\n self._extended_hash = extended_hash\n self._aggregate_hash = aggregate_hash\n self._promote_name = promote_name\n self._timestamp = timestamp\n self._user = user\n self._repo_hash = repo_hash\n self._repo_url = repo_url\n self._component = component", "def _builder(o_name,_nodes,_tagged_reals):\n obj = _tagged_reals[o_name]\n \n if isinstance(obj,ElementaryReal):\n un = UncertainReal._archived_elementary(\n uid = obj.uid,\n x = obj.x\n )\n _tagged_reals[o_name] = un \n \n elif isinstance(obj,IntermediateReal): \n \n _node = _nodes[obj.uid] \n \n un = UncertainReal(\n obj.value,\n _vector_index_to_node( obj.u_components ),\n _vector_index_to_node( obj.d_components ),\n _ivector_index_to_node( obj.i_components, _nodes ),\n _node,\n )\n \n _tagged_reals[o_name] = un\n\n else:\n assert False, \"unexpected: {!r}\".format(obj)\n\n return un" ]
[ "0.66367", "0.6583926", "0.64170843", "0.62385815", "0.6159886", "0.6103416", "0.60937", "0.6093361", "0.5987617", "0.5961307", "0.59383756", "0.59383756", "0.58615595", "0.58615595", "0.58420074", "0.5829586", "0.5824573", "0.58184236", "0.58095497", "0.5804861", "0.5796754", "0.57773733", "0.57283515", "0.5716216", "0.5716216", "0.5716216", "0.56121534", "0.5607942", "0.5576009", "0.55731595", "0.5547864", "0.5542984", "0.5524512", "0.5517914", "0.54996896", "0.5473807", "0.54720426", "0.54691136", "0.5453865", "0.54530054", "0.54370767", "0.54350626", "0.54219544", "0.54194415", "0.5405691", "0.5400701", "0.5390119", "0.538898", "0.53773415", "0.5374843", "0.5367559", "0.5367379", "0.53507686", "0.53434974", "0.5323574", "0.53170073", "0.5295098", "0.5292586", "0.5290145", "0.5286395", "0.527607", "0.5275783", "0.5274599", "0.52745074", "0.52600014", "0.52600014", "0.525165", "0.52407634", "0.52379847", "0.5236001", "0.52357924", "0.5234562", "0.52286536", "0.52247334", "0.51994264", "0.51945555", "0.51901394", "0.5187187", "0.5186912", "0.5173924", "0.516457", "0.5163234", "0.5153915", "0.5151875", "0.51518613", "0.5145113", "0.51403946", "0.5134957", "0.51341194", "0.5128951", "0.5128181", "0.5126246", "0.5120277", "0.5115682", "0.5114809", "0.5107239", "0.5106801", "0.50985813", "0.509553", "0.50940937" ]
0.54430425
40
Builds the Property Filter Spec Object.
def get_prop_filter_spec(client_factory, obj_spec, prop_spec): prop_filter_spec = \ client_factory.create('ns0:PropertyFilterSpec') prop_filter_spec.propSet = prop_spec prop_filter_spec.objectSet = obj_spec return prop_filter_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_property_filter_spec(client_factory, property_specs, object_specs):\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_filter_spec.propSet = property_specs\r\n property_filter_spec.objectSet = object_specs\r\n return property_filter_spec", "def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\n prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n prop_filter_spec.propSet = prop_spec\n prop_filter_spec.objectSet = obj_spec\n return prop_filter_spec", "def _createSpecificProperty(self, filter_name):\n import uno\n from com.sun.star.beans import PropertyValue\n if filter_name == \"impress_html_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('IsExportNotes', 0, True, 0),\n PropertyValue('PublishMode', 0, 0, 0),\n PropertyValue('Width', 0, 640, 0),\n PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"impress_pdf_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('ExportNotesPages', 0, True, 0),\n PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif \"pdf_Export\" in filter_name :\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif filter_name in (\"draw_html_Export\", \"HTML (StarCalc)\"):\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"Text (encoded)\":\n property = PropertyValue('FilterFlags', 0, 'UTF8,LF', 0)\n else:\n return []\n\n return [property, ]", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterPropList, self).__init__(*args, **kwargs)\n\n # Construct the regular expression tag evaluators.\n nameregextag = self.thistag.find('PropNameRegex')\n if nameregextag != None:\n self.nameregex = RegexTag(nameregextag)\n else:\n self.nameregex = None\n\n valueregextag = self.thistag.find('PropValueRegex')\n if valueregextag != None:\n self.valueregex = RegexTag(valueregextag)\n else:\n self.valueregex = None\n\n # Make sure that at least one regular expression is specified.\n if self.nameregex == None and self.valueregex == None:\n raise ValueError('Required tag missing: '\\\n 'PropNameRegex or PropValueRegex')\n\n # Get the \"look for the first match\" flag.\n self.matchfirst = self.get_boolean('matchFirst')\n logger.debug('matchfirst = {0}'.format(self.matchfirst))\n\n # Get the path name.\n self.path = self.context.tokens['Path']\n logger.debug('path = {0}'.format(self.path))", "def build_property_spec(client_factory, type=\"VirtualMachine\",\r\n properties_to_collect=[\"name\"],\r\n all_properties=False):\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = all_properties\r\n property_spec.pathSet = properties_to_collect\r\n property_spec.type = type\r\n return property_spec", "def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterRevProp, self).__init__(*args, **kwargs)\n\n # Construct regular expression tag evaluators.\n nameregextag = self.thistag.find('PropNameRegex')\n if nameregextag != None:\n self.nameregex = RegexTag(nameregextag)\n else:\n self.nameregex = None\n\n valueregextag = self.thistag.find('PropValueRegex')\n if valueregextag != None:\n self.valueregex = RegexTag(valueregextag)\n else:\n self.valueregex = None\n\n # Make sure that at least one regular expression is specified.\n if self.nameregex == None and self.valueregex == None:\n raise ValueError('Required tag missing: '\\\n 'PropNameRegex or PropValueRegex')\n\n # Save the revision property details.\n self.propname = self.context.tokens['RevPropName']\n logger.debug('propname = {0}'.format(self.propname))\n self.propvalue = self.context.tokens['RevPropValue']\n logger.debug('propvalue = \"{0}\"'.format(self.propvalue))", "def _build(self, prefilt=None):\n self.make_filiation()\n if prefilt is not None:\n self.prefilter(filt=prefilt)\n self.make_trees()\n return", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def _build_filter_part(self, cls, filters, order_by=None, select=None):\r\n import types\r\n query_parts = []\r\n\r\n order_by_filtered = False\r\n\r\n if order_by:\r\n if order_by[0] == \"-\":\r\n order_by_method = \"DESC\";\r\n order_by = order_by[1:]\r\n else:\r\n order_by_method = \"ASC\";\r\n\r\n if select:\r\n if order_by and order_by in select:\r\n order_by_filtered = True\r\n query_parts.append(\"(%s)\" % select)\r\n\r\n if isinstance(filters, str) or isinstance(filters, unicode):\r\n query = \"WHERE %s AND `__type__` = '%s'\" % (filters, cls.__name__)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n query += \" ORDER BY itemName() %s\" % order_by_method\r\n elif order_by != None:\r\n query += \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n return query\r\n\r\n for filter in filters:\r\n filter_parts = []\r\n filter_props = filter[0]\r\n if type(filter_props) != list:\r\n filter_props = [filter_props]\r\n for filter_prop in filter_props:\r\n (name, op) = filter_prop.strip().split(\" \", 1)\r\n value = filter[1]\r\n property = cls.find_property(name)\r\n if name == order_by:\r\n order_by_filtered = True\r\n if types.TypeType(value) == types.ListType:\r\n filter_parts_sub = []\r\n for val in value:\r\n val = self.encode_value(property, val)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts_sub.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts_sub.append(self._build_filter(property, name, op, val))\r\n filter_parts.append(\"(%s)\" % (\" OR \".join(filter_parts_sub)))\r\n else:\r\n val = self.encode_value(property, value)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts.append(self._build_filter(property, name, op, val))\r\n query_parts.append(\"(%s)\" % (\" or \".join(filter_parts)))\r\n\r\n\r\n type_query = \"(`__type__` = '%s'\" % cls.__name__\r\n for subclass in self._get_all_decendents(cls).keys():\r\n type_query += \" or `__type__` = '%s'\" % subclass\r\n type_query +=\")\"\r\n query_parts.append(type_query)\r\n\r\n order_by_query = \"\"\r\n\r\n if order_by:\r\n if not order_by_filtered:\r\n query_parts.append(\"`%s` LIKE '%%'\" % order_by)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n order_by_query = \" ORDER BY itemName() %s\" % order_by_method\r\n else:\r\n order_by_query = \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n\r\n if len(query_parts) > 0:\r\n return \"WHERE %s %s\" % (\" AND \".join(query_parts), order_by_query)\r\n else:\r\n return \"\"", "def __init__(self, filter_methods: ConfigNodePropertyArray=None, filter_enable_safe_user_agents: ConfigNodePropertyBoolean=None, filter_safe_user_agents: ConfigNodePropertyArray=None, filter_excluded_paths: ConfigNodePropertyArray=None): # noqa: E501\n self.openapi_types = {\n 'filter_methods': ConfigNodePropertyArray,\n 'filter_enable_safe_user_agents': ConfigNodePropertyBoolean,\n 'filter_safe_user_agents': ConfigNodePropertyArray,\n 'filter_excluded_paths': ConfigNodePropertyArray\n }\n\n self.attribute_map = {\n 'filter_methods': 'filter.methods',\n 'filter_enable_safe_user_agents': 'filter.enable.safe.user.agents',\n 'filter_safe_user_agents': 'filter.safe.user.agents',\n 'filter_excluded_paths': 'filter.excluded.paths'\n }\n\n self._filter_methods = filter_methods\n self._filter_enable_safe_user_agents = filter_enable_safe_user_agents\n self._filter_safe_user_agents = filter_safe_user_agents\n self._filter_excluded_paths = filter_excluded_paths", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters", "def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def _generate_stats(self, host_state, filter_properties):\n\n filter_function = None\n\n if ('filter_function' in host_state.capabilities and\n host_state.capabilities['filter_function'] is not None):\n filter_function = str(\n host_state.capabilities['filter_function'])\n\n stats = utils.generate_stats(host_state, filter_properties)\n\n stats['filter_function'] = filter_function\n\n return stats", "def get_filters(self):", "def properties(self, filters={}):\n return self.__get_list_client(Property)(filters=filters)", "def test_fields_from_property():\n prop_template = PropertyTemplate(name=\"cookie eating template\", bounds=IntegerBounds(0, 1000))\n cond_template = ConditionTemplate(name=\"Hunger template\",\n bounds=CategoricalBounds([\"hungry\", \"full\", \"peckish\"]))\n prop = Property(name=\"number of cookies eaten\",\n template=prop_template,\n origin='measured',\n value=NominalInteger(27))\n cond = Condition(name=\"hunger level\",\n template=cond_template,\n origin='specified',\n value=NominalCategorical(\"hungry\"))\n\n prop_and_conds = PropertyAndConditions(property=prop, conditions=[cond])\n assert prop_and_conds.name == prop.name\n assert prop_and_conds.template == prop.template\n assert prop_and_conds.origin == prop.origin\n assert prop_and_conds.value == prop.value", "def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def _propertyFilter(self, entity, params):\n\n if 'property_conditions' not in params:\n raise ProtocolError()\n\n conditions = params['property_conditions']\n\n for field, allowed_values in conditions.iteritems():\n if entity.__getattribute__(field) not in allowed_values:\n return False\n\n return True", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "def __init__(self, filter_spec = [ [{},False] ]):\n\n Qt.QObject.__init__(self)\n\n\n # key = property name of Element object\n # value = displayed column name for tables showing choices and matches\n self.elem_property_vs_col_name = \\\n {'name':'Name', 'devname':'Dev. Name', 'cell':'Cell',\n 'family':'Family', 'girder':'Girder', 'group':'Group',\n 'index':'Lat. Index', 'length':'Eff.Len', 'phylen':'Phys. Len.',\n 'pv':'PV', 'sb':'sb', 'se':'se', 'symmetry':'Symmetry',\n 'virtual':'Virtual', 'sequence':'Sequence'}\n\n # key = property name of Element object & exclusion flag\n # value = displayed column name for table showing filters\n self.filter_property_vs_col_name = \\\n self.elem_property_vs_col_name.copy()\n self.filter_property_vs_col_name.update({'exclude':'Excl.'}) # adding extra column\n\n # Specify the default column order you want for tables showing\n # choices and matches.\n self.elem_property_list = ['family', 'name', 'devname', 'cell',\n 'girder', 'symmetry', 'group', 'virtual',\n 'sb', 'se', 'pv', 'length', 'phylen',\n 'index', 'sequence']\n self.col_name_list = [self.elem_property_vs_col_name[prop]\n for prop in self.elem_property_list]\n self.choice_dict = dict.fromkeys(self.elem_property_list)\n\n # Specify the default column order you want for table showing\n # filters.\n self.filter_property_list = self.elem_property_list[:]\n self.filter_property_list.insert(0, 'exclude')\n self.filter_col_name_list = [self.filter_property_vs_col_name[prop]\n for prop in self.filter_property_list]\n self.filter_dict = dict.fromkeys(self.filter_property_list)\n\n self.numeric_filter_list = ['index', 'phylen', 'length', 'sb', 'se']\n self.not_implemented_filter_list = ['sequence']\n\n self.filter_spec = filter_spec\n\n self.allElements = ap.getElements('*')\n\n # Initialization of matching data information\n self.matched = [ [True]*len(self.allElements) ]\n self.combine_matched_list()\n self.update_choice_dict()\n\n # Apply initial filters provided by a user, if any.\n if self.filter_spec:\n isCaseSensitive = False\n self.filterData(range(len(self.filter_spec)), isCaseSensitive)\n\n self.selectedElements = []", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(AreaResource, self).build_filters(filters)\n \n if \"level\" in filters:\n orm_filters[\"layout__level\"] = int(filters[\"level\"])\n \n return orm_filters", "def _write_filter_params(self, spec):\n spec.switch_write_focus(self.REGIONS.FILTER_PARAMS.value)\n for param in self._filter_params:\n spec.write_value(param, data_type=DataType.FLOAT_64)", "def create_filter_from_args(self, args: dict) -> Filter:\n keys = set(args.keys())\n filter_args = {}\n\n if \"name\" in keys:\n value = args.get('name')\n if value != \"\":\n filter_args.update({\"text_filter\": args.get('name')})\n if \"product_in\" in keys:\n value = args.get('product_in')\n if value != \"\":\n filter_args.update({\"product_in\": 'true' if value == \"yes\" else 'false'})\n if \"human_in\" in keys:\n value = args.get('human_in')\n if value != \"\":\n filter_args.update({\"human_in\": 'true' if value == \"yes\" else 'false'})\n if \"institutional\" in keys:\n value = args.get('institutional')\n if value != \"\":\n filter_args.update({\"institutional\": 'true' if value == \"yes\" else 'false'})\n if \"format\" in keys:\n value = args.get('format')\n if value != \"\":\n filter_args.update({\"picture_format\": 'true' if value == \"vertical\" else 'false'})\n if \"credit\" in keys:\n value = args.get('credit')\n if value != \"\":\n filter_args.update({\"author_credits\": value})\n if \"limited_use\" in keys:\n value = args.get('limited_use')\n if value != \"\":\n filter_args.update({\"limited_usage\": 'true' if value == \"yes\" else 'false'})\n if \"tags\" in keys:\n value = args.get('tags')\n if value != \"\":\n filter_args.update({\"limited_usage\": value.split(';')})\n\n f = Filter(**filter_args)\n return f", "def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['transform_fn'] = self.transform_or_spec\n if self._also_track_spec is not None:\n specs['also_track'] = self._also_track_spec\n return specs", "def _init_optimizer_params(self):\n order = [\n [Peaking.__name__, True, True], # Peaking\n [LowShelf.__name__, True, True], # Low shelfs\n [HighShelf.__name__, True, True], # High shelfs\n [Peaking.__name__, True, False], # Peaking with fixed q\n [LowShelf.__name__, True, False], # Low shelfs with fixed q\n [HighShelf.__name__, True, False], # High shelfs with fixed q\n [Peaking.__name__, False, True], # Peaking with fixed fc\n [LowShelf.__name__, False, True], # Low shelfs with fixed fc\n [HighShelf.__name__, False, True], # High shelfs with fixed fc\n [Peaking.__name__, False, False], # Peaking with fixed fc and q\n [LowShelf.__name__, False, False], # Low shelfs with fixed fc and q\n [HighShelf.__name__, False, False], # High shelfs with fixed fc and q\n ]\n\n def init_order(filter_ix):\n filt = self.filters[filter_ix]\n ix = order.index([filt.__class__.__name__, filt.optimize_fc, filt.optimize_q])\n val = ix * 100\n if filt.optimize_fc:\n val += 1 / np.log2(filt.max_fc / filt.min_fc)\n return val\n\n # Initialize filter params as list of empty lists, one per filter\n filter_params = [[]] * len(self.filters)\n # Indexes to self.filters sorted by filter init order\n filter_argsort = sorted(list(range(len(self.filters))), key=init_order, reverse=True)\n remaining_target = self.target.copy()\n for ix in filter_argsort: # Iterate sorted filter indexes\n filt = self.filters[ix] # Get filter\n filter_params[ix] = filt.init(remaining_target) # Init filter and place params to list of lists\n remaining_target -= filt.fr # Adjust target\n filter_params = np.concatenate(filter_params).flatten() # Flatten params list\n return filter_params", "def init_filters(model: Model, settings: Model) -> None:\n filters = [\n {\"name\": \"Project\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Attachments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Priority\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Resolved\", \"filtration_type\": \"date\"},\n {\"name\": \"Labels\", \"filtration_type\": \"string\"},\n {\"name\": \"Created\", \"filtration_type\": \"date\"},\n {\"name\": \"Comments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Status\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Key\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Summary\", \"filtration_type\": \"string\"},\n {\"name\": \"Resolution\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Description\", \"filtration_type\": \"string\"},\n {\"name\": \"Components\", \"filtration_type\": \"string\"},\n ]\n for filter_ in filters:\n model.objects.create(\n name=filter_[\"name\"],\n filtration_type=filter_[\"filtration_type\"],\n settings=settings,\n )", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def filtered_by(self, property):\n C = copy.deepcopy(self)\n C.filter_by(property)\n return C", "def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeClass = kwargs.get(\"rspSubtreeClass\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n orderBy = kwargs.get(\"orderBy\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeClass is not None:\n opts+= \"&rsp-subtree-class=%s\" % rspSubtreeClass\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n if orderBy is not None:\n opts+= \"&order-by=%s\" % orderBy\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts", "def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order", "def __init__(self, type: int, filter: int):\n ...", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(UserResource, self).build_filters(filters)\n \n if \"area\" in filters:\n area_id = filters['area']\n area = Area.objects.get(id = area_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentArea = area)]\n \n elif \"environment\" in filters:\n environment_id = filters['environment']\n environment = Environment.objects.get(id = environment_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentEnvironment = environment)]\n \n return orm_filters", "def build(self, spec, prefix):\n make()", "def property_setup(self, properties):\n return properties", "def build_filters(self, view, filters=None):\n query_builder = self.get_query_builder(backend=self, view=view)\n return query_builder.build_query(**(filters if filters else {}))", "def buildReport(cls, queryList):\n boxList = list()\n for dslString,filterList in queryList:\n data = cls.__dataRequest(dslString[0])\n if data != '{}':\n for filter in filterList:\n try:\n if filter:\n filterObj = filter()\n filterObj.loadData(data)\n boxList.extend(filterObj.createBoxList())\n except Exception as e:\n devLogger.error(\"Could not create Filter object: \" + str(e))\n return boxList", "def __init__( self, filters=None, prx=None ):\n\n if filters is None:\n if prx is None:\n\n self._filter_list = rts2_wwwapi.rts2comm().get_filters()\n\n elif type(filters) == list:\n self._filter_list = filters\n\n elif type(filters) == dict:\n raise TypeError(\"Filters are should not be a dict, it probably should be None\")\n # this assumes that the keywords of the dictionary are \n # the fitler names and the value is the filter number. \n\n\n #sort by filter number and reverse look up. \n # this doesn't work in python3\n #for key, value in sorted(filters.iteritems(), key=lambda (k,v): (v,k)):\n #self._filter_list.append( key )\n\n elif type(filters) == str or type(filters) == unicode:\n self._filter_list = str(filters).split()\n\n else:\n raise TypeError(\"Unexpected filter type {}, type must be string, unicode, list or dict\".format(type(filters)))", "def build_from_mapping(mapping, module_params):\n filters = module_params.get('filters', {})\n for param in module_params:\n if param != 'filters' and module_params[param] and param in mapping:\n filters[mapping[param]] = module_params[param]\n return from_dict(filters)", "def _get_policy_object(\n platform,\n filters=None,\n pillar_key=\"acl\",\n pillarenv=None,\n saltenv=None,\n merge_pillar=True,\n):\n policy = _Policy()\n policy_filters = []\n if not filters:\n filters = []\n for filter_ in filters:\n if not filter_ or not isinstance(filter_, dict):\n continue # go to the next filter\n filter_name, filter_config = next(iter(filter_.items()))\n header = capirca.lib.policy.Header() # same header everywhere\n target_opts = [platform, filter_name]\n filter_options = filter_config.pop(\"options\", None)\n if filter_options:\n filter_options = _make_it_list({}, filter_name, filter_options)\n # make sure the filter options are sent as list\n target_opts.extend(filter_options)\n target = capirca.lib.policy.Target(target_opts)\n header.AddObject(target)\n filter_terms = []\n for term_ in filter_config.get(\"terms\", []):\n if term_ and isinstance(term_, dict):\n term_name, term_fields = next(iter(term_.items()))\n term = _get_term_object(\n filter_name,\n term_name,\n pillar_key=pillar_key,\n pillarenv=pillarenv,\n saltenv=saltenv,\n merge_pillar=merge_pillar,\n **term_fields\n )\n filter_terms.append(term)\n policy_filters.append((header, filter_terms))\n policy.filters = policy_filters\n log.debug(\"Policy config:\")\n log.debug(str(policy))\n platform_generator = _import_platform_generator(platform)\n policy_config = platform_generator(policy, 2)\n log.debug(\"Generating policy config for %s:\", platform)\n log.debug(str(policy_config))\n return policy_config", "def get_properties(self) -> List[ObserverPropertiesItem]:\n return [\n self._prop_builder.auto('Seed', type(self).seed),\n self._prop_builder.auto('Class filter', type(self).class_filter),\n self._prop_builder.auto('Random order', type(self).random_order),\n self._prop_builder.auto('Save gpu memory', type(self).save_gpu_memory),\n self._prop_builder.auto('Location filter ration', type(self).location_filter_ratio),\n self._prop_builder.auto('Dataset size', type(self).dataset_size),\n self._prop_builder.auto('Dataset config', type(self).dataset_config),\n self._prop_builder.auto('Switch training resets train pos ', type(self).switch_train_resets_train_pos),\n self._prop_builder.auto('Hide labels', type(self).is_hide_labels)\n ]", "def __init__(self, properties):\n self.attributes = {}\n self.output_info = {}\n for key, node in properties.walk():\n self.attributes[key[1]] = node.get_value().strip(\" '\")", "def get_prop_spec(client_factory, spec_type, properties):\r\n prop_spec = client_factory.create('ns0:PropertySpec')\r\n prop_spec.type = spec_type\r\n prop_spec.pathSet = properties\r\n return prop_spec", "def _build_query_filters(self, query: dict, filters: list) -> dict:\n\n for filter_tuple in filters:\n if not isinstance(filter_tuple, tuple) or len(filter_tuple) != 3:\n LOG.error(\"polling_filters tuple %s : invalid format or does not contain 3 elements - skipping this filter\", filter_tuple)\n continue\n if isinstance(filter_tuple[2], list) :\n # If \"value\" is a list of values then create a rule (json object) for each \n # value and use \"OR\" condition.\n condition = {'condition': \"OR\",\n 'rules': []}\n for value in filter_tuple[2]:\n rule = {}\n # Prepend fieldname with \"table.\" string\n rule['field'] = f\"table.{filter_tuple[0]}\"\n rule['operator'] = filter_tuple[1]\n rule['value'] = value\n condition['rules'].append(rule)\n query['rules'].append(condition)\n else:\n # Create a single rule for this tuple\n rule = {}\n field_name = f\"table.{filter_tuple[0]}\"\n rule['field'] = field_name\n rule['operator'] = filter_tuple[1]\n rule['value'] = filter_tuple[2]\n query['rules'].append(rule)\n return query", "def __generateFilter(self, selectionPairs):\n filter = None\n for (selSyntax, argSyntax) in selectionPairs:\n if self._arg.has_key(argSyntax) and self._arg[argSyntax] != '':\n if filter is None:\n filter = {}\n filter[selSyntax] = self._arg[argSyntax]\n \n return filter", "def __init__(self, parent, file_manager):\n self.parent = parent.GetParent()\n\n # List of selected data\n self.data = parent.GetSelectedData()\n\n # Output tier name\n self.tier_name = parent.GetFiltererdTierName()\n\n # Output format\n self.annot_format = parent.GetAnnotationFormat()\n\n # List of files/tiers to filter\n self.file_manager = file_manager\n\n # for \"rel\" filter only\n try:\n self.y_tier_name = parent.GetRelationTierName()\n except AttributeError:\n self.y_tier_name = None\n\n # for \"tag\", \"loc\" and \"dur\" filters\n try:\n # Match all or match any of the filters\n self.match_all = parent.GetMatchAll()\n except AttributeError:\n self.match_all = None", "def _build_filters(self, criteria: Q):\n composed_query = query.Q()\n\n if criteria.connector == criteria.AND:\n for child in criteria.children:\n if isinstance(child, Q):\n composed_query = composed_query & self._build_filters(child)\n else:\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n lookup = lookup_class(stripped_key, child[1])\n if criteria.negated:\n composed_query = composed_query & ~lookup.as_expression()\n else:\n composed_query = composed_query & lookup.as_expression()\n else:\n for child in criteria.children:\n if isinstance(child, Q):\n composed_query = composed_query | self._build_filters(child)\n else:\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n lookup = lookup_class(stripped_key, child[1])\n if criteria.negated:\n composed_query = composed_query | ~lookup.as_expression()\n else:\n composed_query = composed_query | lookup.as_expression()\n\n return composed_query", "def __init__(self, spec):\n self.spec = spec", "def build_filters(self, filters=None):\n filters.pop('username')\n return super(UserResource, self).build_filters(filters)", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterCommitList, self).__init__(*args, **kwargs)\n\n # Save the \"stop on first match\" flag.\n self.matchfirst = self.get_boolean('matchFirst')\n logger.debug('matchFirst = {0}'.format(self.matchfirst))\n\n # Construct a regular expression tag evaluator for the path\n # names.\n pathregextag = self.thistag.find('PathRegex')\n if pathregextag != None:\n self.pathregex = RegexTag(pathregextag)\n else:\n self.pathregex = None\n\n # Construct a regular expression tag evaluator for the change\n # types.\n typeregextag = self.thistag.find('ChgTypeRegex')\n if typeregextag != None:\n self.typeregex = RegexTag(typeregextag)\n else:\n self.typeregex = None\n\n # Require at least one regex tag.\n if self.typeregex == None and self.pathregex == None:\n raise ValueError(\n 'Required tag missing: PathRegex or ChgTypeRegex')", "def get_prop_spec(client_factory, spec_type, properties):\n prop_spec = client_factory.create('ns0:PropertySpec')\n prop_spec.type = spec_type\n prop_spec.pathSet = properties\n return prop_spec", "def _create_filter_object(form_data: Dict) -> Q:\n filter_object = Q(title__icontains=form_data[\"title\"])\n filter_object &= Q(author__icontains=form_data[\"author\"])\n filter_object &= Q(\n publication_language__icontains=form_data[\"publication_language\"]\n )\n if form_data[\"publication_date_start\"]:\n filter_object &= Q(\n publication_date__gte=form_data[\"publication_date_start\"]\n )\n if form_data[\"publication_date_end\"]:\n filter_object &= Q(publication_date__lte=form_data[\"publication_date_end\"])\n return filter_object", "def testUsingFilterTool(self):\n pass", "def __init__(self, filters, event_file_path, device_name):\n super().__init__(device_name=device_name)\n self._filters_dict = {}\n self.event_file_path = event_file_path\n self.load_filters(filters)", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['bijector'] = self.transform_or_spec\n return specs", "def condition_filters(self):\r\n return filters.Filters(self)", "def build_filters(self, filters=None):\n\n if filters is None:\n filters = {}\n\n orm_filters = super(EmployeeResource, self).build_filters(filters)\n\n if 'role' in filters:\n ids = (Employee.by_assignment_role(filters['role'])\n .values_list('id', flat=True))\n orm_filters['pk__in'] = ids\n\n return orm_filters", "def __init__(self, p_description: str, property_name: str):\n self._p_description = p_description\n self._property_name = property_name\n self._property_data = None\n self._vectorized_data = None\n self._categories = None", "def _add_matcher_specific_properties_to_json(self):\n return {\n 'unaryNumericMatcherData': {\n 'dataType': self._data_type,\n 'value': self._original_value,\n }\n }", "def _add_matcher_specific_properties_to_json(self):\n return {\n 'unaryNumericMatcherData': {\n 'dataType': self._data_type,\n 'value': self._original_value,\n }\n }", "def _add_matcher_specific_properties_to_json(self):\n return {\n 'unaryNumericMatcherData': {\n 'dataType': self._data_type,\n 'value': self._original_value,\n }\n }", "def assert_filter_builds_to(self, expect, filter, _chain_filters=None):\n final_query = {'bool': {'must_not': [RESEARCH.to_dict()]}}\n\n if expect:\n final_query['bool']['must'] = expect\n main, nested = filter.build(_chain_filters)\n assert final_query == main.to_dict()\n\n return main, nested", "def __init__(\n self,\n kind=None,\n project=None,\n namespace=None,\n ancestor=None,\n filters=(),\n projection=(),\n order=(),\n distinct_on=(),\n limit=None):\n self.kind = kind\n self.project = project\n self.namespace = namespace\n self.ancestor = ancestor\n self.filters = filters or ()\n self.projection = projection\n self.order = order\n self.distinct_on = distinct_on\n self.limit = limit", "def make_filter_specification(cls, filter_string):\n try:\n return parse_filter(filter_string)\n except ParseException as err:\n raise ValueError('Expression parameters have errors. %s' % err)", "def get_filter_config(\n platform,\n filter_name,\n filter_options=None,\n terms=None,\n prepend=True,\n pillar_key=\"acl\",\n pillarenv=None,\n saltenv=None,\n merge_pillar=True,\n only_lower_merge=False,\n revision_id=None,\n revision_no=None,\n revision_date=True,\n revision_date_format=\"%Y/%m/%d\",\n):\n if not filter_options:\n filter_options = []\n if not terms:\n terms = []\n if merge_pillar and not only_lower_merge:\n acl_pillar_cfg = _get_pillar_cfg(\n pillar_key, saltenv=saltenv, pillarenv=pillarenv\n )\n filter_pillar_cfg = _lookup_element(acl_pillar_cfg, filter_name)\n filter_options = filter_options or filter_pillar_cfg.pop(\"options\", None)\n if filter_pillar_cfg:\n # Only when it was able to find the filter in the ACL config\n pillar_terms = filter_pillar_cfg.get(\n \"terms\", []\n ) # No problem if empty in the pillar\n terms = _merge_list_of_dict(terms, pillar_terms, prepend=prepend)\n # merge the passed variable with the pillar data\n # any filter term not defined here, will be appended from the pillar\n # new terms won't be removed\n filters = []\n filters.append(\n {\n filter_name: {\n \"options\": _make_it_list({}, filter_name, filter_options),\n \"terms\": terms,\n }\n }\n )\n return get_policy_config(\n platform,\n filters=filters,\n pillar_key=pillar_key,\n pillarenv=pillarenv,\n saltenv=saltenv,\n merge_pillar=merge_pillar,\n only_lower_merge=True,\n revision_id=revision_id,\n revision_no=revision_no,\n revision_date=revision_date,\n revision_date_format=revision_date_format,\n )", "def buildRegFilterList(self, filename, listname='regFilterList'):", "def get_params(self):\n outputs = ['sample',\n 'ratio_params',\n 'despike_params',\n 'autorange_params',\n 'bkgcorrect_params']\n\n out = {}\n for o in outputs:\n out[o] = getattr(self, o)\n\n out['filter_params'] = self.filt.params\n out['filter_sequence'] = self.filt.sequence\n out['filter_used'] = self.filt.make_keydict()\n\n return out", "def from_facets(*args, **kwargs):\n facets = Facets(self._default_library, *args, **kwargs)\n filter = Filter(facets=facets)\n qu = MockQuery(\"query string\", filter=filter)\n built = qu.build(search)\n\n # Return the rest to be verified in a test-specific way.\n return built", "def _build_filters(self, criteria: Q):\n # Decide the function based on the connector type\n func = and_ if criteria.connector == criteria.AND else or_\n params = []\n for child in criteria.children:\n if isinstance(child, Q):\n # Call the function again with the child\n params.append(self._build_filters(child))\n else:\n # Find the lookup class and the key\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n\n # Instantiate the lookup class and get the expression\n lookup = lookup_class(stripped_key, child[1], self.model_cls)\n if criteria.negated:\n params.append(~lookup.as_expression())\n else:\n params.append(lookup.as_expression())\n\n return func(*params)", "def filters(self):\n return {\n 'port_channels': port_channels\n }", "def _create_properties(self): # pylint: disable=no-self-use\n properties = {}\n properties[\"product\"] = \"eventhub.python\"\n properties[\"version\"] = __version__\n properties[\"framework\"] = \"Python {}.{}.{}\".format(*sys.version_info[0:3])\n properties[\"platform\"] = sys.platform\n return properties", "def _get_filters(self, request, queryset, view): # noqa\n self.opts = queryset.model._meta\n filter_fields = getattr(view, \"filter_fields\", None)\n self.exclude = {}\n self.filters = {}\n\n if filter_fields:\n blacklist = RexList(getattr(view, \"filter_blacklist\", []))\n mapping = self._get_mapping(view)\n\n for fieldname_arg in self.query_params:\n raw_value = self.query_params.get(fieldname_arg)\n if raw_value in [\"''\", '\"\"']:\n raw_value = \"\"\n\n negate = fieldname_arg[-1] == \"!\"\n\n if negate:\n filter_field_name = fieldname_arg[:-1]\n TARGET = self.exclude\n else:\n TARGET = self.filters\n filter_field_name = fieldname_arg\n\n if filter_field_name in self.excluded_query_params:\n continue\n if self.ignore_filter(request, filter_field_name, view):\n continue\n try:\n if filter_field_name in blacklist:\n raise InvalidQueryArgumentError(fieldname_arg)\n parts = None\n if \"__\" in filter_field_name:\n parts = filter_field_name.split(\"__\")\n filter_field_name = parts[0]\n op = parts[-1]\n else:\n op = \"\"\n processor = getattr(\n self,\n \"process_{}\".format(filter_field_name),\n getattr(view, \"drfqs_filter_{}\".format(filter_field_name), None),\n )\n\n if (filter_field_name not in filter_fields) and (not processor):\n self.unknown_arguments.append((fieldname_arg, filter_field_name))\n continue\n # raise InvalidQueryArgumentError(filter_field_name)\n if raw_value is None and not processor:\n continue\n # field is configured in Serializer\n # so we use 'source' attribute\n if filter_field_name in mapping:\n real_field_name = mapping[filter_field_name].source\n # if '.' in real_field_name:\n # real_field_name = real_field_name.split('.')[0]\n # field_name = real_field_name.replace('.', '__')\n else:\n real_field_name = filter_field_name\n\n if processor:\n payload = {\n \"field\": filter_field_name,\n \"request\": request,\n \"param\": fieldname_arg,\n \"negate\": negate,\n \"op\": op,\n \"field_name\": real_field_name,\n \"parts\": parts,\n \"value\": raw_value,\n \"real_field_name\": real_field_name,\n }\n _f, _e = processor(dict(self.filters), dict(self.exclude), **payload)\n self.filters.update(**_f)\n self.exclude.update(**_e)\n else:\n if not raw_value:\n continue\n # field_object = opts.get_field(real_field_name)\n value_type = self.field_type(real_field_name)\n if parts:\n f = \"{}__{}\".format(real_field_name, \"__\".join(parts[1:]))\n else:\n f = filter_field_name\n if op in [\"in\", \"contained_by\"]:\n value = raw_value.split(\",\")\n elif op == \"acontains\":\n value = raw_value.split(\",\")\n f = f.replace(\"__acontains\", \"__contains\")\n elif op == \"isnull\":\n value = parse_bool(raw_value)\n elif value_type == bool:\n value = parse_bool(raw_value)\n else:\n value = raw_value\n TARGET[f] = value\n except ValueError:\n raise InvalidQueryValueError(fieldname_arg, raw_value)\n except QueryFilterException:\n raise\n except Exception as e:\n logger.exception(e)\n raise\n return self.filters, self.exclude", "def build(self):\n return self.hyperparams.items()", "def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters", "def _build_filter_set(self, column_config_name, service_name=None, **filters):\n\n if not service_name:\n service_name = column_config_name\n\n if not self._column_configs.get(service_name):\n self._get_col_config(service_name, fetch_name=column_config_name)\n\n caomColConfig = self._column_configs[service_name]\n\n mashupFilters = []\n for colname, value in filters.items():\n\n # make sure value is a list-like thing\n if np.isscalar(value,):\n value = [value]\n\n # Get the column type and separator\n colInfo = caomColConfig.get(colname)\n if not colInfo:\n warnings.warn(\"Filter {} does not exist. This filter will be skipped.\".format(colname), InputWarning)\n continue\n\n colType = \"discrete\"\n if (colInfo.get(\"vot.datatype\", colInfo.get(\"type\")) in (\"double\", \"float\", \"numeric\")) \\\n or colInfo.get(\"treatNumeric\"):\n colType = \"continuous\"\n\n separator = colInfo.get(\"separator\")\n freeText = None\n\n # validate user input\n if colType == \"continuous\":\n if len(value) < 2:\n warningString = \"{} is continuous, \".format(colname) + \\\n \"and filters based on min and max values.\\n\" + \\\n \"Not enough values provided, skipping...\"\n warnings.warn(warningString, InputWarning)\n continue\n elif len(value) > 2:\n warningString = \"{} is continuous, \".format(colname) + \\\n \"and filters based on min and max values.\\n\" + \\\n \"Too many values provided, the first two will be \" + \\\n \"assumed to be the min and max values.\"\n warnings.warn(warningString, InputWarning)\n else: # coltype is discrete, all values should be represented as strings, even if numerical\n value = [str(x) for x in value]\n\n # check for wildcards\n\n for i, val in enumerate(value):\n if ('*' in val) or ('%' in val):\n if freeText: # freeText is already set cannot set again\n warningString = \"Only one wildcarded value may be used per filter, \" + \\\n \"all others must be exact.\\n\" + \\\n \"Skipping {}...\".format(val)\n warnings.warn(warningString, InputWarning)\n else:\n freeText = val.replace('*', '%')\n value.pop(i)\n\n # craft mashup filter entry\n entry = {}\n entry[\"paramName\"] = colname\n if separator:\n entry[\"separator\"] = separator\n if colType == \"continuous\":\n entry[\"values\"] = [{\"min\": value[0], \"max\":value[1]}]\n else:\n entry[\"values\"] = value\n if freeText:\n entry[\"freeText\"] = freeText\n\n mashupFilters.append(entry)\n\n return mashupFilters", "def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()", "def create_filter(args: dict) -> dict | None:\n if 'ip' in args:\n args['networkInterfaces.ipv4'] = args.pop('ip')\n expression_list = []\n for arg in args:\n value = args.get(arg)\n if arg == 'riskScore':\n restriction = \"GREATER_THAN_OR_EQUAL_TO\"\n values_list = [arg_to_number(value)]\n else:\n restriction = \"IN\"\n values_list = argToList(value)\n\n values_res = [{\"value\": val} for val in values_list]\n expression = {\n \"propertyName\": arg,\n \"restrictionType\": restriction,\n \"propertyValues\": values_res\n }\n expression_list.append(expression)\n if expression_list:\n return {\"criteria\": {\"criteriaList\": [{\"expressionList\": expression_list}], \"predicateType\": \"AND\"}}\n else:\n return None", "def __init__(self, *properties):\n self._properties = properties", "def _add_matcher_specific_properties_to_json(self):\n return {\n 'betweenMatcherData': {\n 'dataType': self._data_type,\n 'start': self._original_lower,\n 'end': self._original_upper\n }\n }", "def __init__(self, *args, **kw):\n\n # Extract options from source strings.\n\n self.code_line = kw.get(\"code_line\")\n if self.code_line:\n self.code_line = self.code_line.rstrip(\"\\n\")\n else:\n return\n\n self.prop_line = kw.get(\"prop_line\")\n if self.prop_line:\n self.prop_line = self.prop_line.rstrip(\"\\n\")\n else:\n return\n\n self.ast_dict = kw.get(\"ast_dict\")\n if self.ast_dict:\n # name │ lineno │ value │ namespace\n\n # Collection will check if namespace exists,\n # and create new collection if necessary.\n pass\n else:\n pass\n\n # Set instance attribute values.\n self.prop_args, self.prop_kw = eval(self.prop_line, globals())\n # Ensure keywords are case matched.\n self.prop_kw = {CasePicker.to_snake(k):v for k, v in self.prop_kw.items()}\n\n self.field_name = self.ast_dict[\"name\"]\n self.default_val = self.ast_dict[\"value\"]\n\n # Set options from DPROPERTY specifiers.\n self.data_type = self.prop_kw.get(\"type\", type(self.default_val))\n ##self.default_val = self.data_type(self.default_val)\n self.category = self.prop_kw.get(\"category\", \"\")\n #self.category = self.category.split(\"|\") # Category|Subcategory\n self.display_name = self.prop_kw.get(\"display_name\", CasePicker.to_pascal(self.field_name))\n\n # Set input ranges.\n self.from_ = self.prop_kw.get(\"from\",\n self.prop_kw.get(\"min\", None))\n self.to = self.prop_kw.get(\"to\",\n self.prop_kw.get(\"max\", None))\n try:\n self.from_, self.to = self.prop_kw.get(\"range\")\n except:\n pass", "def __init__(self, \n cutoff_frequency, \n order, \n filter_type=\"maximally_flat\"):\n self.cutoff_freq = cutoff_frequency\n self.order = order\n self.filter_type = filter_type\n \n #TODO: Initialise filter based on maximally flat prototype", "def build_parameters(pobj):\n ViscosityWilke.build_parameters(pobj)", "def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})", "def build_query(self):\r\n\r\n # this filter is required\r\n query = Metric.query.filter(Metric.metric_type == self.metric_type)\r\n\r\n # get query parameters (parameters which are not here are ignored)\r\n is_active = request.args.get('is_active')\r\n frequency = request.args.get('frequency')\r\n threshold_type = request.args.get('threshold_type')\r\n sort = request.args.get('sort')\r\n\r\n # process each parameter, and if valid add it as a query condition\r\n if is_active is not None:\r\n is_active = is_active.lower() == 'true'\r\n query = Metric.query.filter_by(is_active=is_active)\r\n if frequency is not None:\r\n try:\r\n frequency = Frequency.from_name(frequency)\r\n except ValueError as e:\r\n msg = f\"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}\"\r\n abort(400, message=msg)\r\n query = query.filter_by(frequency=frequency)\r\n if threshold_type is not None:\r\n try:\r\n threshold_type = ThresholdType.from_name(threshold_type)\r\n except ValueError as e:\r\n msg = f\"Invalid 'threshold_type': {threshold_type}. Use one of \" \\\r\n f\"{ThresholdType.values()}\"\r\n abort(400, message=msg)\r\n query = query.filter_by(threshold_type=threshold_type)\r\n if sort is not None and sort.lstrip(\"-\") == 'metric_id':\r\n query = query.order_by(Metric.metric_id.desc())\r\n else:\r\n query = query.order_by(Metric.metric_id)\r\n\r\n return query", "def amh_attr_filter_query(self):\n \n attr_filter_query = \"\"\"\n WITH {final_cte_name} as (\n -- Pull list of devices that were active (has any row; don't need TVT >0) in the past 4 weeks\n SELECT DISTINCT device_id\n FROM tubidw.all_metric_hourly\n WHERE DATE_TRUNC('week',hs) >= dateadd('week',-4,DATE_TRUNC('week',GETDATE()))\n AND DATE_TRUNC('week',hs) < DATE_TRUNC('week',GETDATE())\n {attr_filter} -- attribute filters dynamically populate here\n -- TODO: currently can't get a metric/attribute combo filter, like \"devices that watched at least 50% of a specific content_id\"\n )\n \"\"\"\n return attr_filter_query", "def _initFilterTable(self):\n\n t = self.tableWidget_filter # shorthand notation\n\n ### Header population & properties\n t.setHorizontalHeaderLabels(self.data.filter_col_name_list)\n t.horizontalHeader().setMovable(True)\n\n ### Item population\n nRows = len(self.data.filter_spec)\n t.setRowCount(nRows)\n for (j, spec) in enumerate(self.data.filter_spec):\n for (i, filter_prop) in enumerate(self.data.filter_property_list):\n if filter_prop is not 'exclude':\n if filter_prop in spec[0]:\n item_string = spec[0][filter_prop]\n else:\n item_string = ''\n t.setItem(j,i,\n Qt.QTableWidgetItem(item_string))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsEnabled) # Make it editable\n else:\n t.setItem(j,i,Qt.QTableWidgetItem(''))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsUserCheckable|\n Qt.Qt.ItemIsEnabled) # Make it checkable\n if spec[1]: # exclusion flag\n t.item(j,i).setCheckState(Qt.Qt.Checked)\n else:\n t.item(j,i).setCheckState(Qt.Qt.Unchecked)\n\n\n\n ### Presentation formatting\n t.resizeColumnsToContents()\n for i in range(t.columnCount()):\n if t.columnWidth(i) > self.max_auto_adjust_column_width:\n t.setColumnWidth(i,self.max_auto_adjust_column_width)", "def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'tolerance'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)", "def _build_param_dict(self):\n self._build_common_param_dict()\n\n self._param_dict.add(Parameter.NUM_AVG_SAMPLES,\n r'ScansToAverage>([\\d]+)</ScansToAverage>',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Scans to Average\",\n description=\"Number of samples to average (must be even)\",\n range=INT16,\n startup_param=True,\n direct_access=False,\n default_value=4,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.MIN_COND_FREQ,\n r'MinimumCondFreq>([\\d]+)</MinimumCondFreq',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Minimum Conductivity Frequency\",\n range=INT16,\n description=\"Minimum conductivity frequency to enable pump turn-on.\",\n startup_param=True,\n direct_access=False,\n default_value=500,\n units=Units.HERTZ,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.PUMP_DELAY,\n r'PumpDelay>([\\d]+)</PumpDelay',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Pump Delay\",\n range=INT16,\n description=\"Time to wait after minimum conductivity frequency is reached before turning pump on.\",\n startup_param=True,\n direct_access=False,\n default_value=60,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.AUTO_RUN,\n r'AutoRun>(.*)</AutoRun',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Auto Run\",\n description=\"Enable automatic logging when power is applied: (true | false).\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.IGNORE_SWITCH,\n r'IgnoreSwitch>(.*)</IgnoreSwitch',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Ignore Switch\",\n description=\"Disable magnetic switch position for starting or stopping logging: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.OPTODE,\n r'OPTODE>(.*)</OPTODE',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Optode Attached\",\n description=\"Enable optode: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.VOLT1,\n r'ExtVolt1>(.*)</ExtVolt1',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Volt 1\",\n description=\"Enable external voltage 1: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n\n self._build_ctd_specific_params()", "def construct_params(self):\n\n return {\"expand\": self.get_expand()}", "def test_parse_filter_params_success(self):\n filter_params = {\n \"resolution\": \"daily\",\n \"time_scope_value\": \"-10\",\n \"time_scope_units\": \"day\",\n \"region\": FAKE.word(),\n \"payer_tenant_id\": FAKE.uuid4(),\n \"product_service\": FAKE.word(),\n }\n serializer = OCIFilterSerializer(data=filter_params)\n self.assertTrue(serializer.is_valid())", "def filter_by_property(self, properties=None, **kwargs):\r\n\t\tif properties is None:\r\n\t\t\tproperties = {}\r\n\t\tproperties.update(kwargs)\r\n\t\tresult_list = ElementList()\r\n\t\tfor element in self:\r\n\t\t\tif all(k in element.properties and element.properties[k] == v\r\n\t\t\t\t\tfor k, v in properties.items()):\r\n\t\t\t\tresult_list.append(element)\r\n\t\treturn result_list", "def _build_filter_chain(self):\n result = None\n for klass in self.filters:\n tmp = klass(self, self.args, result)\n logging.info(\"%s %s\", klass, tmp.active)\n if tmp.active:\n result = tmp\n return result or (lambda x: x)", "def __init__(self):\n\n super().__init__(\n filter_models=[\n DoorKalmanFilter(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=DoorVirtualSensorModel(modalities={\"image\"}),\n ),\n DoorKalmanFilter(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=DoorVirtualSensorModel(\n modalities={\"pos\", \"sensors\"}\n ),\n ),\n ],\n state_dim=3,\n )", "def __init__(self, image, filter_name, cutoff, order = 0):\n self.filter_name = filter_name\n self.image = image\n if filter_name == 'ideal_l':\n self.filter = self.get_ideal_low_pass_filter\n elif filter_name == 'ideal_h':\n self.filter = self.get_ideal_high_pass_filter\n elif filter_name == 'butterworth_l':\n self.filter = self.get_butterworth_low_pass_filter\n elif filter_name == 'butterworth_h':\n self.filter = self.get_butterworth_high_pass_filter\n elif filter_name == 'gaussian_l':\n self.filter = self.get_gaussian_low_pass_filter\n elif filter_name == 'gaussian_h':\n self.filter = self.get_gaussian_high_pass_filter\n\n self.cutoff = cutoff\n self.order = order", "def initialize_filter(self):\n shape = self.filter_size + (self.input_shape[-1], self.channels)\n self.filter = self.filter_initializer(shape)", "def initialize(self):\n \n #####################################\n # Filter Entity Pointers\n #####################################\n \n # CCSM Data Stream Name Space pointers \n #\n self.ccsm_debug_ptr = self.get_ns_pointer(\"CCSM/DEBUG\")\n self.delete_ptr = self.get_ns_pointer(\"CCSM/DELETE\")\n self.set_create_ptr = self.get_ns_pointer(\"CCSM/SET_CREAT\")\n self.set_destroy_ptr = self.get_ns_pointer(\"CCSM/SET_DESTROY\")\n self.callback_ptr = self.get_ns_pointer(\"CCSM/CALLBACK\")\n self.comp_create_ptr = self.get_ns_pointer(\"CCSM/COMP_CREATE\")\n self.comp_destroy_ptr = self.get_ns_pointer(\"CCSM/COMP_DESTROY\")\n self.add_ptr = self.get_ns_pointer(\"CCSM/ADD\")\n self.remove_ptr = self.get_ns_pointer(\"CCSM/REMOVE\")\n \n # This list is created to make all of the namespace pointers\n # iterable. So there can be a simple 'catch-all' loop \n # for CCSM events in process().\n #\n self.pointers_list = [\n self.ccsm_debug_ptr,\n self.delete_ptr,\n self.set_create_ptr,\n self.set_destroy_ptr,\n self.callback_ptr,\n self.comp_create_ptr,\n self.comp_destroy_ptr,\n self.add_ptr,\n self.remove_ptr\n ]\n \n \n ################################\n # Filter Configuration Options\n ################################\n\n # This last parameter will simply determine if the events used\n # to generate the above intervals will be passed on to any\n # following filters, or destroyed.\n self.consume = self.params['consume']\n\n # The outpath for the CCSM narration.\n self.outpath = None\n if self.params.has_key('outfile'):\n self.outpath = self.params['outfile']\n \n \n ##############################################\n # Filtered Event Structures For Finalize\n ##############################################\n \n # List of ccsm events taken from the pipeline.\n self.ccsm_events = []" ]
[ "0.76403946", "0.65880555", "0.6298685", "0.6076335", "0.597899", "0.5965586", "0.5764983", "0.5655848", "0.56413084", "0.5557564", "0.5519107", "0.5488389", "0.54726166", "0.5452801", "0.544979", "0.5363146", "0.5314718", "0.51662815", "0.5132564", "0.5092276", "0.50902605", "0.5073957", "0.50721383", "0.5053869", "0.50466317", "0.5040117", "0.50374806", "0.50288486", "0.5009367", "0.5004019", "0.500366", "0.5003003", "0.49684358", "0.495946", "0.4946729", "0.49383578", "0.49220863", "0.49183092", "0.49182343", "0.49132785", "0.4897421", "0.4872786", "0.4869966", "0.4865726", "0.48561665", "0.4854717", "0.48404962", "0.48391452", "0.48340693", "0.48330542", "0.4825901", "0.48213708", "0.4818236", "0.48107922", "0.48082265", "0.4797853", "0.47976586", "0.4791619", "0.47882596", "0.47857642", "0.478279", "0.47793868", "0.4779081", "0.4779081", "0.4779081", "0.4774393", "0.47713766", "0.47706386", "0.47377118", "0.47342807", "0.47326782", "0.4725868", "0.47248918", "0.47220618", "0.4720516", "0.47196594", "0.4719143", "0.47144607", "0.47028953", "0.46998936", "0.4698896", "0.46935683", "0.46917972", "0.46871588", "0.46815404", "0.4679029", "0.4676659", "0.46742308", "0.4673798", "0.4673173", "0.4672065", "0.4665251", "0.46649057", "0.4664693", "0.46608537", "0.4658366", "0.4657661", "0.46547797", "0.4654338", "0.4649783" ]
0.66637796
1
Gets the list of properties for the collection of objects of the type specified.
def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties): client_factory = vim.client.factory if len(obj_list) == 0: return [] prop_spec = get_prop_spec(client_factory, type, properties) lst_obj_specs = [] for obj in obj_list: lst_obj_specs.append(get_obj_spec(client_factory, obj)) prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs, [prop_spec]) return vim.RetrieveProperties(vim.get_service_content().propertyCollector, specSet=[prop_filter_spec])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_properties_for_a_collection_of_objects(vim, type,\n obj_list, properties):\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory,\n lst_obj_specs, [prop_spec])\n return retrieve_properties_ex(vim,\n vim.service_content.propertyCollector,\n [prop_filter_spec])", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_objects(vim, type, properties_to_collect=[\"name\"], all=False):\r\n client_factory = vim.client.factory\r\n object_spec = build_object_spec(client_factory,\r\n vim.get_service_content().rootFolder,\r\n [build_recursive_traversal_spec(client_factory)])\r\n property_spec = build_property_spec(client_factory, type=type,\r\n properties_to_collect=properties_to_collect,\r\n all_properties=all)\r\n property_filter_spec = build_property_filter_spec(client_factory,\r\n [property_spec],\r\n [object_spec])\r\n return vim.RetrieveProperties(vim.get_service_content().propertyCollector,\r\n specSet=[property_filter_spec])", "def properties(self, filters={}):\n return self.__get_list_client(Property)(filters=filters)", "def supported_type_properties() -> List[TypeProperty]:\n types_props: List[TypeProperty] = []\n for det in PLACE_DETECTORS:\n types_props.extend(det.supported_types_and_properties())\n\n return types_props", "def collect_properties(service_instance, view_ref, obj_type, path_set=None,\n include_mors=False):\n collector = service_instance.content.propertyCollector\n\n # Create object specification to define the starting point of\n # inventory navigation\n obj_spec = vmodl.query.PropertyCollector.ObjectSpec()\n obj_spec.obj = view_ref\n obj_spec.skip = True\n\n # Create a traversal specification to identify the path for collection\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()\n traversal_spec.name = 'traverseEntities'\n traversal_spec.path = 'view'\n traversal_spec.skip = False\n traversal_spec.type = view_ref.__class__\n obj_spec.selectSet = [traversal_spec]\n\n # Identify the properties to the retrieved\n property_spec = vmodl.query.PropertyCollector.PropertySpec()\n property_spec.type = obj_type\n\n if not path_set:\n property_spec.all = True\n\n property_spec.pathSet = path_set\n\n # Add the object and property specification to the\n # property filter specification\n filter_spec = vmodl.query.PropertyCollector.FilterSpec()\n filter_spec.objectSet = [obj_spec]\n filter_spec.propSet = [property_spec]\n\n # Retrieve properties\n props = collector.RetrieveContents([filter_spec])\n\n data = []\n for obj in props:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n\n if include_mors:\n properties['obj'] = obj.obj\n\n data.append(properties)\n return data", "def _PropList(self):\n prop_list = []\n\n if self.HASH_PROPERTIES is None and self.HASH_EXCLUDE is None:\n return prop_list\n\n # TODO(ckl): comprehensive list of \"internal\" properties\n exclude_list = self.HASH_EXCLUDE or tuple()\n exclude_list += metadata_api.GetFieldNames(self, ui_readonly=True)\n # TODO(raulg): The deleted can be removed from the exclude_list after all\n # records have been purged of deleted fields.\n exclude_list += ('deleted', 'key_subtype', 'key_order', 'key_name')\n\n for prop in self._properties:\n if '__' in prop and not prop.endswith('key_name'):\n continue\n if self.HASH_PROPERTIES is not None and prop not in self.HASH_PROPERTIES:\n continue\n if self.HASH_EXCLUDE is not None and prop in exclude_list:\n continue\n prop_list.append(prop)\n\n prop_list.sort()\n return prop_list", "def list_all_properties(self):\n properties = list(self.property_only_graph.nodes())\n properties = [SchemaProperty(_prop, self) for _prop in properties]\n return properties", "def get_objects(vim, type, properties_to_collect=None, all=False):\n if not properties_to_collect:\n properties_to_collect = [\"name\"]\n\n client_factory = vim.client.factory\n trav_spec = vim_util.build_recursive_traversal_spec(client_factory)\n object_spec = vim_util.build_object_spec(client_factory,\n vim.service_content.rootFolder,\n [trav_spec])\n property_spec = vim_util.build_property_spec(\n client_factory, type_=type,\n properties_to_collect=properties_to_collect,\n all_properties=all)\n property_filter_spec = vim_util.build_property_filter_spec(client_factory,\n [property_spec],\n [object_spec])\n property_collector = vim.service_content.propertyCollector\n return retrieve_properties_ex(vim,\n property_collector,\n [property_filter_spec])", "def getPropertiesAll():", "def _tp__get_typed_properties(self):\n try:\n return tuple(getattr(self, p) for p in self._tp__typed_properties)\n except AttributeError:\n raise NotImplementedError", "def get_object_properties(vim, collector, mobj, type, properties):\r\n client_factory = vim.client.factory\r\n if mobj is None:\r\n return None\r\n usecoll = collector\r\n if usecoll is None:\r\n usecoll = vim.get_service_content().propertyCollector\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = (properties is None or len(properties) == 0)\r\n property_spec.pathSet = properties\r\n property_spec.type = type\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = mobj\r\n object_spec.skip = False\r\n property_filter_spec.propSet = [property_spec]\r\n property_filter_spec.objectSet = [object_spec]\r\n return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec])", "def list_property(\n self, key: str) -> Collection[Tuple[str, PropertyAttribute]]:\n return self._env.list_property(key)", "def get_properties(self):\n return self.properties", "def iterProperties(cls):\n meta = cls.staticMetaObject\n for i in range(meta.propertyCount()):\n yield meta.property(i).name()", "def ListPropertyValuesOfType(res_dict, prop, res_type):\n return [r['properties'][prop] for r in res_dict if r['type'] == res_type]", "def getProperties(self, owner: unicode) -> List[ghidra.program.model.util.PropertyMap]:\n ...", "def properties(self) -> List[TaskPropertyModel]:\n return self._properties", "def get_object_properties(vim, collector, mobj, type, properties):\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = (properties is None or len(properties) == 0)\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim,\n usecoll,\n [property_filter_spec])", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def bson_properties(self):\n return []", "def getProperties(self):\n return self.properties", "def get_properties(self) -> List[ObserverPropertiesItem]:\n return [\n self._prop_builder.auto('Seed', type(self).seed),\n self._prop_builder.auto('Class filter', type(self).class_filter),\n self._prop_builder.auto('Random order', type(self).random_order),\n self._prop_builder.auto('Save gpu memory', type(self).save_gpu_memory),\n self._prop_builder.auto('Location filter ration', type(self).location_filter_ratio),\n self._prop_builder.auto('Dataset size', type(self).dataset_size),\n self._prop_builder.auto('Dataset config', type(self).dataset_config),\n self._prop_builder.auto('Switch training resets train pos ', type(self).switch_train_resets_train_pos),\n self._prop_builder.auto('Hide labels', type(self).is_hide_labels)\n ]", "def get_properties(self):\n return self.properties", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def getProperties(self, prop_colour):\n props = database_creator.db.query(\n \"SELECT name FROM main_property_deck WHERE property_colour = :prop_colour\", prop_colour=prop_colour)\n properties = []\n for i in props:\n properties.append(i[\"name\"])\n return properties", "def get_properties():", "def get_instance_properties(self):\n return [p for p in self.session.query(self.Property).all() \\\n if p.is_simple() and p.is_instance_property()]", "def properties(self):\n return self._props", "def properties_get(self):\n return self._get('properties')", "def get_property_list(self,filtr):\n\n\n return self.dp.get_property_list(filtr)", "def properties(self):\n\n return self._properties", "def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes", "def children(self, type=None):\n c = []\n\n depth = 1\n properties = {}\n\n props = [dav.DisplayName()]\n multiprops = [dav.ResourceType()]\n response = self._query_properties(props + multiprops, depth)\n properties = response.expand_simple_props(\n props=props, multi_value_props=multiprops\n )\n\n for path in list(properties.keys()):\n resource_types = properties[path][dav.ResourceType.tag]\n resource_name = properties[path][dav.DisplayName.tag]\n\n if type is None or type in resource_types:\n url = URL(path)\n if url.hostname is None:\n # Quote when path is not a full URL\n path = quote(path)\n # TODO: investigate the RFCs thoroughly - why does a \"get\n # members of this collection\"-request also return the\n # collection URL itself?\n # And why is the strip_trailing_slash-method needed?\n # The collection URL should always end with a slash according\n # to RFC 2518, section 5.2.\n if (isinstance(self, CalendarSet) and type == cdav.Calendar.tag) or (\n self.url.canonical().strip_trailing_slash()\n != self.url.join(path).canonical().strip_trailing_slash()\n ):\n c.append((self.url.join(path), resource_types, resource_name))\n\n ## TODO: return objects rather than just URLs, and include\n ## the properties we've already fetched\n return c", "def getProperties():", "def retrieve_properties_ex(vim, prop_coll, spec_set, max_count=500):\n objcont = []\n client_factory = vim.client.factory\n opts = client_factory.create('ns0:RetrieveOptions')\n opts.maxObjects = max_count\n res = vim.RetrievePropertiesEx(prop_coll,\n specSet=spec_set,\n options=opts)\n while True:\n if res and res.objects:\n objcont.extend(res.objects)\n if hasattr(res, \"token\") and res.token:\n res = vim.ContinueRetrievePropertiesEx(prop_coll, token=res.token)\n else:\n break\n return objcont", "def get_result_properties(self):\n return [p for p in self.session.query(self.Property).all() \\\n if p.is_simple() and p.is_result_property()]", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def properties(self) -> List[ProductionFlowItemProperty]:\n return self._properties", "def _get_managed_objects_properties(self, vim_type, properties=None):\n # Get Root Folder\n root_folder = self.content.rootFolder\n\n if properties is None:\n properties = ['name']\n\n # Create Container View with default root folder\n mor = self.content.viewManager.CreateContainerView(\n root_folder, [vim_type], True)\n\n # Create Traversal spec\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n name=\"traversal_spec\",\n path='view',\n skip=False,\n type=vim.view.ContainerView\n )\n\n # Create Property Spec\n property_spec = vmodl.query.PropertyCollector.PropertySpec(\n type=vim_type, # Type of object to retrieved\n all=False,\n pathSet=properties\n )\n\n # Create Object Spec\n object_spec = vmodl.query.PropertyCollector.ObjectSpec(\n obj=mor,\n skip=True,\n selectSet=[traversal_spec]\n )\n\n # Create Filter Spec\n filter_spec = vmodl.query.PropertyCollector.FilterSpec(\n objectSet=[object_spec],\n propSet=[property_spec],\n reportMissingObjectsInResults=False\n )\n\n return self.content.propertyCollector.RetrieveContents([filter_spec])", "def list_properties(self, class_specific=True, group_by_class=True):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n properties = [{'class': self.name,\n 'properties': self.se.full_class_only_graph.nodes[self.uri]['properties']}]\n if not class_specific:\n # find all parent classes\n if self.output_type == \"PythonClass\":\n parents = [_item.uri for _item in self.ancestor_classes]\n else:\n parents = [self.se.cls_converter.get_uri(_item) for _item in self.ancestor_classes]\n # update properties, each dict represent properties associated with the class\n for _parent in parents:\n properties.append({\n \"class\": _parent,\n \"properties\": self.se.full_class_only_graph.nodes[_parent]['properties']\n })\n result = restructure_output(self,\n properties,\n inspect.stack()[0][3],\n self.output_type)\n if group_by_class:\n return result\n else:\n ungrouped_properties = []\n for _item in result:\n ungrouped_properties += _item['properties']\n return ungrouped_properties", "def list(cls):\n return [cls.__dict__.get(name) for name in dir(cls) if (\n not callable(getattr(cls, name)) and not name.startswith(\"_\")\n )]", "def transform_property_info_list(se, prop_list, output_type):\n props = [{\"description\": _prop.get(\"description\"),\n \"domain\": transform_schemaclasses_lst(se,\n _prop.get(\"domain\"),\n output_type),\n \"range\": transform_schemaclasses_lst(se,\n _prop.get(\"range\"),\n output_type),\n \"curie\": se.cls_converter.get_curie(_prop.get(\"uri\")),\n \"label\": se.cls_converter.get_label(_prop.get(\"uri\")),\n \"uri\": _prop.get(\"uri\"),\n \"object\": se.get_property(_prop.get(\"uri\"))} for _prop in prop_list]\n return props", "def properties(cls):\n nameList = dir(cls)\n hashValue = hash(repr(nameList))\n if not cls._cache or cls._cache[0] != hashValue:\n result = [(name, getattr(cls, name)) for name in nameList\n if not name.startswith(\"_\") and\n not inspect.ismethod(getattr(cls, name))]\n cls._cache = hashValue, result\n return cls._cache[1]", "def get_field_list_by_type(self, field_type):\n field_list = []\n for field in self.fields:\n if field.get_field_type() == field_type:\n field_list.append(field)\n return field_list", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def listAllMemberProperties(self, exclude_props=[], include_props=None):\n if not self.is_compatible(): return []\n all_props = set([])\n for member in self.getAllMembers():\n user = member.getUser()\n for sheet in user.getOrderedPropertySheets():\n all_props = all_props | set(sheet.propertyIds())\n\n # property sheet hasn't id property, next we add it manually\n all_props = (all_props | set(['id'])) - set(exclude_props)\n if include_props: all_props = all_props & set(include_props)\n return list(all_props)", "def get_objects_by_type(self, *types) -> List[TgnObject]:\n if not types:\n return list(self.objects.values())\n types_l = [o.lower() for o in types]\n return [o for o in self.objects.values() if o.type.lower() in types_l]", "def get_all_by_type(self, obj_type):\n objects = []\n node = None\n data = []\n with lzma.open(os.path.join('resources', self.game, 'dumps',\n '{}.dump.xz'.format(obj_type)), 'rt', encoding='latin1') as df:\n for line in df.readlines():\n match = re.match('^\\*\\*\\* Property dump for object \\'\\S+ (\\S+)\\'.*$', line)\n if match:\n objects.append(match.group(1))\n if node:\n node.load_from_string_list(data)\n data = [line]\n node = self.get_node_by_full_object(match.group(1))\n else:\n data.append(line)\n\n if node:\n node.load_from_string_list(data)\n\n return objects", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def getlist(self, key, type=None):\n if key not in self:\n return []\n values = super().__getitem__(key)\n if type is not None:\n values = [type(value) for value in values]\n return values", "def get_fields(cls):\n return map(lambda x: getattr(cls, x), cls.get_field_names())", "def get_properties_code(self, obj):\n return []", "def property_types(klass):\n # All attributes.\n attributes = [getattr(klass, a) for a in dir(klass)]\n # Only those attributes which are editable model properties.\n not_editable = (ndb.ModelKey, ndb.ComputedProperty)\n properties = [\n a for a in attributes\n if isinstance(a, ndb.Property) and not isinstance(a, not_editable)\n ]\n\n property_types = {}\n for p in properties:\n typ = None\n if p._name in ['uid', 'created', 'modified']:\n # These are read-only from the client's perspective, so they\n # have no updatable \"type\".\n continue\n elif getattr(p, '_repeated', None):\n typ = list\n elif isinstance(p, ndb.StringProperty):\n typ = str\n elif isinstance(p, ndb.TextProperty):\n typ = unicode\n elif isinstance(p, ndb.BooleanProperty):\n typ = bool\n elif isinstance(p, ndb.IntegerProperty):\n typ = int\n elif isinstance(p, ndb.DateProperty):\n typ = 'date'\n elif isinstance(p, ndb.DateTimeProperty):\n typ = 'datetime'\n\n if typ:\n property_types[p._name] = typ\n\n if hasattr(klass, 'json_props'):\n # Trim the '_json' ending.\n property_types.update({\n p[:-5]: 'json' for p in klass.json_props\n if p in property_types.keys()\n })\n\n # Delete the text-based json properties, since we don't want the\n # client to access them directly.\n for p in klass.json_props:\n property_types.pop(p, None)\n\n return property_types", "def problem_fact_collection_property(fact_type):\n def problem_fact_collection_property_function_mapper(getter_function):\n ensure_init()\n from org.optaplanner.optapy import PythonWrapperGenerator\n from org.optaplanner.core.api.domain.solution import \\\n ProblemFactCollectionProperty as JavaProblemFactCollectionProperty\n getter_function.__return = PythonWrapperGenerator.getArrayClass(fact_type.__javaClass)\n getter_function.__optaplannerPlanningEntityCollectionProperty = {\n 'annotationType': JavaProblemFactCollectionProperty\n }\n return getter_function\n return problem_fact_collection_property_function_mapper", "def getPropertyNames(self):\n return self._property_names", "def properties(self):\n raise NotImplementedError", "def get_properties(self):\n\n properties = {}\n for iface_name in self.all_interfaces:\n iface = getattr(self, iface_name, None)\n if iface:\n properties.update(iface.get_properties())\n return properties", "def getProperties(groupId, contractId):\n\tprint \"Getting properties for group %s and contract %s\" % (groupId, contractId)\n\tproperty_parameters = { \"contractId\":contractId, \"groupId\":groupId }\n\tproperty_result = getResult('/papi/v0/properties', property_parameters)\n\t\n\tif \"properties\" in property_result:\n\t\tproperty_items = property_result['properties']['items']\n\telse:\n\t\tproperty_items = []\n\n\treturn (property_items)", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def test_list_properties(self):\n pass", "def get_creature_type_properties(self, name):\n return self._get_monster_class(name).PROPERTIES", "def get_properties(self) -> List[ObserverPropertiesItem]:\n return [\n self._prop_builder.auto('Index from input', type(self).get_index_from_input),\n self._prop_builder.auto('Input index', type(self).active_input_index),\n ]", "def test_get_objects_with_properties(self):\n expected_result = self.spec.get(\"test_get_objects_with_properties\")\n expected_type = expected_result.get(\"_type\")\n expected_datastore_list = []\n\n for each_datastore in expected_result.get(\"datastore_infos\"):\n datastore_name = each_datastore[\"name\"]\n expected_datastore_list.append(datastore_name)\n datastore_list = []\n \n object_content = self.session.invoke_api(vim_util, \n 'get_objects', \n self.vim, \n 'Datastore', \n 100, \n ['name'])\n for one_object in object_content.objects:\n self.assertEqual(one_object.obj._type, expected_type)\n if hasattr(one_object, 'propSet'):\n dynamic_properties = one_object.propSet\n prop_dict = {}\n for prop in dynamic_properties:\n if prop.name == \"name\":\n datastore_list.append(prop.val)\n \n for each_ds_name in datastore_list:\n self.assertTrue(each_ds_name in datastore_list)", "def _get_columns(model):\n return {c.key: c for c in _get_mapper(model).iterate_properties\n if isinstance(c, ColumnProperty)}", "def get_all(cls, collection):\n trailers = Database.find(collection=collection, query={})\n return [cls(**trailer) for trailer in trailers]", "def get_properties(self, *fields: str) -> Dict[str, fields.Schema]:\n properties = {}\n for field in fields:\n properties.update(self.get_property(field))\n return properties", "def props(kls):\n from sqlalchemy.orm.properties import RelationshipProperty\n return [x.key for x in kls.__mapper__.iterate_properties if type(x) != RelationshipProperty]", "def properties(self):\n return self.properties_with_uid[1:]", "def relations(cls):\n return [c.key for c in cls.__mapper__.iterate_properties\n if isinstance(c, RelationshipProperty)]", "def all_properties(request):\n\n properties = Property.objects.all()\n\n context = {\n 'properties': properties,\n }\n\n return render(request, 'properties/properties.html', context)", "def get_properties(self):\n self.unimpl_base_class()", "async def get_metadata_for_object_type(\n dbcon: DBConnection, object_type: str) -> Iterable[object_models.ObjectMetadata]:\n q = '''select metadata.object_type, metadata.object_id, metadata.key, metadata.value\n from object_metadata as metadata\n where metadata.object_type=%s'''\n return [object_models.ObjectMetadata(*row) for row in await dbcon.fetch_all(q, (object_type,))]", "def properties(self):\n pass", "def planning_entity_collection_property(entity_type):\n def planning_entity_collection_property_function_mapper(getter_function):\n ensure_init()\n from org.optaplanner.optapy import PythonWrapperGenerator\n from org.optaplanner.core.api.domain.solution import \\\n PlanningEntityCollectionProperty as JavaPlanningEntityCollectionProperty\n getter_function.__optaplannerPlanningEntityCollectionProperty = {\n 'annotationType': JavaPlanningEntityCollectionProperty\n }\n getter_function.__return = PythonWrapperGenerator.getArrayClass(entity_type.__javaClass)\n return getter_function\n return planning_entity_collection_property_function_mapper", "def get_plotable_result_properties(self):\n return [p for p in self.session.query(self.Property).all() \\\n if p.is_plotable() and p.is_result_property()]", "def _get_properties(config: argparse.Namespace) -> tuple[set[str], set[str]]:\n property_classes = {BUILTIN_PROPERTY}\n property_names: set[str] = set() # Not returning 'property', it has its own check.\n if config is not None:\n property_classes.update(config.property_classes)\n property_names.update(\n prop.rsplit(\".\", 1)[-1] for prop in config.property_classes\n )\n return property_classes, property_names", "def get_plotable_instance_properties(self):\n return [p for p in self.session.query(self.Property).all() \\\n if p.is_plotable() and p.is_instance_property()]", "def get_all(self, event_type):\r\n get_trait_name = storage.models.Trait.get_name_by_type\r\n return [TraitDescription(name=t['name'],\r\n type=get_trait_name(t['data_type']))\r\n for t in pecan.request.storage_conn\r\n .get_trait_types(event_type)]", "def all():\n repo = Repository()\n colls = repo.get_objects_with_cmodel(CollectionObject.COLLECTION_CONTENT_MODEL,\n type=CollectionObject)\n return colls", "def get_all_items(self, object_type):\n\n if object_type not in NetBoxObject.__subclasses__():\n raise ValueError(f\"'{object_type.__name__}' object must be a sub class of '{NetBoxObject.__name__}'.\")\n\n return self.base_structure.get(object_type.name, list())", "def _getPropertyInfo(self, propertyId):\n retList = []\n # this information might need to be implemented and requested from\n # NvCameraTools(?)\n # property type, count\n\n propInfo = getPropertyInfo(propertyId)\n if (propInfo != None):\n return propInfo\n else:\n raise NvCameraException(NvError_BadValue, \"Invalid property id!\")", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def properties(self) -> t.Mapping[str, PropertyDefinition]:\n return self._named_properties", "def extract_type_properties(registry, invalidated_item_type):\n return registry['types'][invalidated_item_type].schema['properties']", "def props(self):\n return self._props", "def props(self):\n return self._props", "def obj_classes(self) -> ObjClassCollection:\n return self._obj_classes", "def list_items(self) -> List[Dict[str, Any]]:\n return [c.to_dict() for c in self._objects.values()]", "def ConvertToPropertyValues(self):\n result = []\n for key in iter(self):\n value = self[key]\n item = value\n if isinstance(value, dict): # check that first level is not itself a (sub)dict\n item = None\n elif isinstance(value, (tuple, list)): # check every member of the list is not a (sub)dict\n if len(value) == 0: # Property values do not like empty lists\n value = None\n else:\n for i in range(len(value)):\n if isinstance(value[i], dict):\n value[i] = None\n item = value\n elif isinstance(value, (datetime.datetime, datetime.date, datetime.time)):\n item = SFScriptForge.SF_Basic.CDateToUnoDateTime(value)\n pv = uno.createUnoStruct('com.sun.star.beans.PropertyValue')\n pv.Name = key\n pv.Value = item\n result.append(pv)\n return result", "def __get__(self,obj,objtype):\n if not obj:\n return [getattr(objtype,a) for a in self.attribs]\n else:\n return [getattr(obj,a) for a in self.attribs]", "def get_dynamic_properties(vim, mobj, property_names, obj_type=None):\n if not obj_type:\n obj_type = mobj._type\n obj_content = get_object_properties(\n vim, None, mobj, obj_type, property_names)\n properties = {}\n if obj_content:\n dynamic_properties = obj_content[0].propSet\n for dynamic_property in dynamic_properties:\n property_name = dynamic_property.name\n property_value = dynamic_property.val\n properties[property_name] = property_value\n return properties", "def properties(self) -> Sequence['outputs.GoogleCloudContentwarehouseV1PropertyResponse']:\n return pulumi.get(self, \"properties\")", "def _query_properties(self, props=None, depth=0):\n root = None\n # build the propfind request\n if props is not None and len(props) > 0:\n prop = dav.Prop() + props\n root = dav.Propfind() + prop\n\n return self._query(root, depth)", "def getProperties(properties =['electrical_props', '__description'], \r\n sproperty ='electrical_props'):\r\n #------------------------------------\r\n from .database import GeoDataBase\r\n #-----------------------------------\r\n def _fs (v): \r\n \"\"\" Sanitize value and put on list \r\n :param v: value \r\n :Example:\r\n \r\n >>> _fs('(416.9, 100000.0)'))\r\n ...[416.9, 100000.0]\r\n \"\"\"\r\n try : \r\n v = float(v)\r\n except : \r\n v = tuple([float (ss) for ss in \r\n v.replace('(', '').replace(')', '').split(',')])\r\n return v\r\n # connect to geodataBase \r\n try : \r\n _dbObj = GeoDataBase()\r\n except: \r\n _logger.debug('Connection to database failed!')\r\n else:\r\n _gammaVal = _dbObj._retreive_databasecolumns(properties)\r\n if sproperty in properties: \r\n indexEprops = properties.index(sproperty )\r\n try:\r\n _gammaVal [indexEprops] = list(map(lambda x:_fs(x),\r\n _gammaVal[indexEprops]))\r\n except TypeError:\r\n _gammaVal= list(map(lambda x:_fs(x),\r\n _gammaVal))\r\n return _gammaVal", "def all(self, *args, **kwargs):\n list_to_return = []\n if not self.object_type:\n return list_to_return\n class_name = eval(self.object_type)\n if self.objects_id:\n for id in self.objects_id.split(';'):\n if id:\n list_to_return.append(class_name.objects.get(id=id))\n return list_to_return", "def get_property_setters(self, doclist):\n\t\tfrom webnotes.utils import cstr\n\t\tproperty_dict = {}\n\t\t# final property dict will be\n\t\t# {\n\t\t#\tdoc_type: {\n\t\t#\t\tfieldname: [list of property setter dicts]\n\t\t#\t}\n\t\t# }\n\n\t\tdoc_type_list = list(set(\n\t\t\td.doctype=='DocType' and d.name or d.parent\n\t\t\tfor d in doclist))\n\t\tin_string = '\", \"'.join(doc_type_list)\n\t\tfor ps in webnotes.conn.sql(\"\"\"\\\n\t\t\tSELECT doc_type, field_name, property, property_type, value\n\t\t\tFROM `tabProperty Setter`\n\t\t\tWHERE doc_type IN (\"%s\")\"\"\" % in_string, as_dict=1):\n\t\t\tproperty_dict.setdefault(ps.get('doc_type'),\n\t\t\t\t\t{}).setdefault(cstr(ps.get('field_name')), []).append(ps)\n\n\t\treturn property_dict, doc_type_list", "def get_property_names(self, *, is_allprop):\n # Let default implementation return supported live and dead properties\n propNames = super().get_property_names(is_allprop=is_allprop)\n # Add fieldnames as properties\n tableName, primKey = self.provider._split_path(self.path)\n if primKey is not None:\n conn = self.provider._init_connection()\n fieldlist = self.provider._get_field_list(conn, tableName)\n for fieldname in fieldlist:\n propNames.append(\"{%s:}%s\" % (tableName, fieldname))\n conn.close()\n return propNames" ]
[ "0.7357272", "0.65244734", "0.6457637", "0.6373247", "0.6370086", "0.6297487", "0.62510055", "0.62469465", "0.6221552", "0.6219856", "0.6206821", "0.610221", "0.60433024", "0.60395074", "0.5994236", "0.59763896", "0.59744734", "0.59715617", "0.59705645", "0.5961555", "0.5959065", "0.59303087", "0.5918242", "0.59056026", "0.58923954", "0.58923954", "0.5886471", "0.584636", "0.58417064", "0.5829882", "0.5808986", "0.58084875", "0.5807184", "0.5795458", "0.5791771", "0.5775612", "0.57424766", "0.57320774", "0.57240266", "0.5701565", "0.56518656", "0.5647091", "0.56463236", "0.56093615", "0.56041867", "0.55645937", "0.5501777", "0.5501777", "0.54868245", "0.5460871", "0.54446644", "0.54268354", "0.54003954", "0.53991294", "0.5392144", "0.53853095", "0.53780025", "0.5373204", "0.5356042", "0.53462815", "0.5342633", "0.53245586", "0.5317548", "0.5317237", "0.53123915", "0.5302224", "0.5297311", "0.5284549", "0.52828753", "0.52760684", "0.5264856", "0.5253623", "0.5238826", "0.5231093", "0.52270275", "0.5225584", "0.52224725", "0.5212537", "0.5194762", "0.51922697", "0.5175301", "0.5167952", "0.5165419", "0.51549345", "0.515477", "0.5153461", "0.5151599", "0.5139016", "0.5139016", "0.513531", "0.5134448", "0.51230866", "0.51133883", "0.5113197", "0.51122624", "0.5111801", "0.5106435", "0.51013374", "0.50997406", "0.5087215" ]
0.74611396
0
Take the top c cards of each stack and return a copy
def copy_stacks(self, c1, c2): return ( deque([n for n in self._s1][-c1:]), deque([n for n in self._s2][-c2:]) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop_top_card(self):\n return self.pop_card(top=True)", "def top_draw(self):\n top_card = self.cards.pop(0)\n return top_card", "def top(self):\n return self.get_cards()[-1]", "def get_card_at_top_index(deck):\n \n small_joker_value = get_small_joker_value(deck)\n if deck[0] == get_big_joker_value(deck):\n return deck[get_small_joker_value(deck)]\n else:\n return deck[deck[0]]", "def most_likely_top_card(self, deck):\n question_string = \"If {}have been removed from your deck, which of the following cards is most likely to be the top card of your deck?\"\n answer_suffix = \"is most likely to be the top card\"\n reduced_deck = copy.deepcopy(deck)\n\n cards_to_remove = random.choice(range(10,21))\n print \"Chose to remove {} cards\".format(cards_to_remove)\n removed_cards = {}\n while sum([ removed_cards[key] for key in removed_cards.keys() ]) < cards_to_remove:\n drawn_card = random.choice([ card for card in reduced_deck.decklist if card.count > 1 ])\n print \"Removed a copy of {} from the deck.\".format(drawn_card.name)\n drawn_card.count -= 1\n print \"{} cards remain in the deck.\".format(sum([ card.count for card in reduced_deck.decklist ]))\n # if the card is in the group we've already removed, just\n # increment, otherwise add to that set.\n if drawn_card.name in [ c for c in removed_cards.keys()]:\n removed_cards[drawn_card.name] += 1\n else:\n removed_cards[drawn_card.name] = 1\n print \"Removed: {}\".format(removed_cards)\n reduced_deck_size = sum([ card.count for card in reduced_deck.decklist ])\n\n removed_cards_string = \"\"\n for key in removed_cards.keys():\n c = removed_cards.pop(key)\n copy_plural = \"copies\" if c > 1 else \"copy\"\n if len(removed_cards.keys()) == 0:\n removed_cards_string += \"and \"\n removed_cards_string += \"{0} {1} of {2}, \".format(c, copy_plural, key)\n print removed_cards_string\n print question_string.format(removed_cards_string)\n\n choices = 4\n chosen_cards = []\n print \"The deck is now: {}\".format(reduced_deck.decklist)\n while len(chosen_cards) < choices:\n this_card = random.choice(reduced_deck.decklist)\n print \"Chose: {}\".format(this_card.name)\n # Second test is so that we don't have to deal with ties - however,\n # now we just have to make sure that there are at least 4 different\n # card counts remaining in the deck, which should be the norm - but\n # it's not guaranteed!\n if (this_card not in chosen_cards) and (this_card.count not in [ card.count for card in chosen_cards ]):\n chosen_cards.append(this_card)\n print \"List now contains: {}\".format([ card.name for card in chosen_cards ])\n\n card_odds_pairings = []\n for card in chosen_cards:\n # top_card_odds = hypergeom.sf(1, reduced_deck_size, card.count, 1)\n top_card_odds = card.count / float(reduced_deck_size)\n card_odds_pairings.append((top_card_odds, card.name))\n sorted_odds_pairings = sorted(card_odds_pairings, key=operator.itemgetter(0))\n print \"Cards with odds: {}\".format(sorted_odds_pairings)\n\n question_string = question_string.format(removed_cards_string)\n correct = sorted_odds_pairings[-1][1]\n # No need to shuffle: this list is already in a random order!\n possible = [ card[1] for card in card_odds_pairings]\n\n return question_string, correct, possible, answer_suffix, \"the top card of your deck\"", "def top(self, N: int) -> list:\n if N == 0:\n return []\n items = self.stack[-N:]\n self.stack = self.stack[:-N]\n return items", "def get_card_at_top_index(deck: List[int]) -> int:\n top_card = deck[0]\n\n if top_card == max(deck):\n top_card = get_small_joker_value(deck)\n keystream = deck[top_card]\n else:\n keystream = deck[top_card]\n\n return keystream", "def get_top_discard_card(self):\n discard_pile_index = len(self.discard_pile) - 1\n return self.discard_pile[discard_pile_index]", "def top_tiles(self):\n sorted_tiles = self.tiles_by_score()\n top_tiles = sorted_tiles[:NUM_TOP_TILES]\n return top_tiles", "def Stack():\n return []", "def get_top_card(self):\r\n if len(self.deck_of_cards)==0:\r\n raise DeckEmptyError(\"Invalid operation: No more elements in Deck\")\r\n else: \r\n return self.deck_of_cards.pop()", "def first_top_bottom():\n identity = NB_CARDS * np.identity(NB_CARDS)\n # First outcome, the deck isn't changed\n shuffled_deck_1 = deepcopy(identity)\n # Second outcome, the first card is at the bottom and every other cards goes\n # up\n shuffled_deck_2 = deepcopy(identity)\n for i in range(NB_CARDS-1):\n shuffled_deck_2[i] = deepcopy(identity[i+1])\n shuffled_deck_2[NB_CARDS-1] = deepcopy(identity[0])\n # Now we have to combine the two outcome with probability half\n return (shuffled_deck_1 + shuffled_deck_2)/2", "def top(self):", "def pop_card(self, top):\n if len(self.cards) == 0:\n return None\n else:\n if len(self.cards) == 1 and self.last_card_callback is not None:\n self.last_card_callback(self.cards[0])\n\n\n if top:\n result = self.cards.pop()\n else:\n result = self.cards.pop(0)\n\n # Update positions in case we can spread things out now\n self.update_position()\n return result", "def get_topmost_card(self):\n if len(self.cards) > 0:\n return self.cards[-1]\n else:\n return None", "def read_stacks(image):\n\n\t# Offsets for where the stacks are given 1600x900 game window size\n\twidth = 128\n\theight = 30\n\tbase_x = 46\n\tbase_y = 238\n\n\tdigits = {}\n\tfor file in glob.glob(\"card_back/cards/*.png\"):\n\t\tdigit = file.rsplit(\"/\", 1)[1].split(\".\")[0]\n\t\tdigits[str(digit)] = cv2.imread(file)\n\n\tstacks = []\n\tfor x_stack in range(8):\n\t\tstack = []\n\t\tfor y_stack in range(5):\n\t\t\tsub_amount = int(math.floor(x_stack / 2))\n\t\t\tcoord_x = base_x + (width * x_stack) - sub_amount\n\t\t\tcoord_y = base_y + (height * y_stack)\n\t\t\tcrop_image = image[coord_y:coord_y + 16, coord_x:coord_x + 16]\n\n\t\t\tresult_scores = [cv2.matchTemplate(crop_image, digits[str(i)], cv2.TM_SQDIFF) for i in range(10)]\n\t\t\tcard_type = result_scores.index(min(result_scores))\n\n\t\t\tstack.append(card_type)\n\n\t\tstacks.append(stack)\n\n\treturn stacks", "def topMoves(board, limit):\r\n spots = set()\r\n top_list = []\r\n top_queue = PriorityQueue()\r\n\r\n # For each piece on the board\r\n # TODO: This should be all I need\r\n\r\n #board.print_board()\r\n #print(board.get_filled_coordinates())\r\n\r\n for n in board.get_filled_coordinates():\r\n\r\n # For each potential connect space within range\r\n for m in attackArea(n, board.connect):\r\n\r\n (x, y) = m\r\n\r\n # If the connect space is on the board, add to list of potential spots\r\n if board.cell_exists(x, y) and m not in board.get_filled_coordinates():\r\n spots.add(m)\r\n\r\n trackingList = []\r\n\r\n # Evaluate potential of each spot, and add to queue\r\n for p in spots:\r\n top_queue.put((evaluatePosition(board, p) * (-1), p))\r\n trackingList.append(str((evaluatePosition(board, p) * (-1), p)))\r\n\r\n for z in range(limit):\r\n top_list.append(top_queue.get())\r\n\r\n #print(\"Queue: \" + str(trackingList))\r\n\r\n for record in top_list:\r\n #print(str(record))\r\n pass\r\n\r\n # return map(lambda (x, y): (-x, y), top_list)\r\n return top_list[0]", "def top(self) -> int:\n top = self.stack.pop()\n self.stack.append(top)\n for i in range(len(self.stack) - 1):\n self.stack.append(self.stack.pop())\n return top", "def triple_cut(deck_of_cards):\n new_deck =[]\n big_joker_value = get_big_joker_value(deck_of_cards)\n small_joker_value = get_small_joker_value(deck_of_cards)\n\t \n index1 = deck_of_cards.index(small_joker_value)\n index2 = deck_of_cards.index(big_joker_value)\n index_top_card = min(index1, index2)\n index_bottom_card = max(index1, index2)\n # This function will give us the joker that is on the top and the joker \n # that is in the bottom of the deck regardless of their value\n \n new_top = deck_of_cards[(index_bottom_card + 1):]\n # Creates a deck that is to be moved the top, from the lower joker and\n # below \n middle = deck_of_cards[index_top_card : index_bottom_card + 1]\n # Middle portion of the deck that is not moved that is in between the jokers\n new_bottom = deck_of_cards[:index_top_card]\n # The deck portion that is to be moved to the bottom, from higher joker and\n # above.\n deck = new_top + middle + new_bottom\n deck_of_cards[:] = deck\n # This will then give a new deck that shifts the cards above the higher \n # joker to the end and the cards below the lower joker to the top.", "def load_stack_top_into_d():\n return ['@SP', 'A=M', 'D=M']", "def get_card_at_top_index(deck_of_cards):\n big_joker_value = get_big_joker_value(deck_of_cards)\n small_joker_value = get_small_joker_value(deck_of_cards)\n first_card = deck_of_cards[0]\n \n if first_card == big_joker_value:\n return deck_of_cards[small_joker_value]\n else:\n return deck_of_cards[first_card]\n # Consideres two cases where if the first_card is big_joker_value then \n # the card that has the index small_joker_value is returned. Else it will \n # return the card that has the index of the first_card.", "def top(stack):\n if empty_stack(stack):\n raise IndexError(\"Stack is empty!\")\n else:\n return stack.top.value", "def top(self):\r\n return self.topele", "def pop_bottom_card(self):\n return self.pop_card(top=False)", "def pop(stack):\n item = top(stack)\n stack.top = stack.top.next\n stack.size = stack.size - 1\n return item", "def top(heap):\n return heap[_root()]", "def top(state):\n if len(state[STACK]) <= 0:\n return -1\n else:\n return state[STACK][-1]", "def pull():\r\n i = 0\r\n num = stack[i]\r\n while i < len(stack) - 1:\r\n stack[i] = stack[i + 1]\r\n i += 1\r\n return num", "def compare_top_card(p1, p2):\n\n # must have cards\n assert p1.cards_in_play, \"Player %s has no cards to compare\" % p1.number\n assert p2.cards_in_play, \"Player %s has no cards to compare\" % p2.number\n\n p1_card = p1.cards_in_play[0]\n p2_card = p2.cards_in_play[0]\n\n if p1_card > p2_card:\n return p1\n if p2_card > p1_card:\n return p2\n return None", "def new_deck(cla):\r\n\treturn [cla(i) for i in range(52)]", "def build_population(self, fronts):\n population = []\n\n for front in fronts:\n if len(population) + len(front) > self.pop_size:\n front.sort(reverse=True)\n fill_count = self.pop_size - len(front)\n population += front[:fill_count]\n break\n\n population += front\n\n return population", "def behead(self, index):\n self.stack[::], items = self.stack[:index], self.stack[index:]\n return items", "def get_top_cheese(self, index):\n if len(self._stools[index]) == 0:\n return None\n else:\n cheese = self._stools[index][-1]\n return cheese", "def top(self):\n if self.is_empty():\n raise Empty('Stack is empty!')\n last = (self._front + self._size - 1) % len(self._data)\n return self._data[last]", "def top(self) -> int:\n while len(self.queue1) != 1:\n self.queue2.append(self.queue1.pop(0))\n a = self.queue1[0]\n self.queue2.append(a)\n self.queue1 = self.queue2\n self.queue2 = []\n return a", "def top(self) -> int:\n if self.empty():\n raise ValueError(\"empty stack\")\n while len(self.que) != 1:\n self.tem_que.append(self.que.pop(0))\n ret = self.que.pop()\n self.tem_que.append(ret)\n while len(self.tem_que) > 0:\n self.que.append(self.tem_que.pop(0))\n return ret", "def personal_top_three(scores: list) -> list:\n scores_inverted = [~score for score in scores]\n heapify(scores_inverted)\n return [~heappop(scores_inverted) for _ in range(min(len(scores), 3))]", "def get_top(self):\n elements = self.S.get_maximal_elements()\n data = {}\n alot = Nat().get_top()\n for e in elements:\n data[e] = alot\n return Multiset(data, self.S)", "def topCountries(top=10):\r\n #top 10 deadly countries\r\n countries = agg('country')[:top].index\r\n #grab aggregated data for these countries\r\n dataOfTop10 = agg(['year','country']).query(\"country in @countries\")### interesting...\r\n #unstack data\r\n dataOfTop10 = dataOfTop10.unstack(1)\r\n #remove multiindexes\r\n dataOfTop10 = dataOfTop10.transpose().reset_index(level=0, drop=True).transpose()\r\n #sort by year\r\n dataOfTop10.sort_index(inplace=True)\r\n return dataOfTop10", "def copies_in_top_five(self, deck):\n question_string = \"After drawing your opening hand with one copy of {card}, how likely is it that another copy of {card} is in the top five cards of your deck?\"\n answer_suffix = 'percent'\n # That's another reason why we don't choose a card earlier: we might be\n # interested in a card with a specific quality.\n chosen_card = random.choice([ card for card in deck.decklist if card.count > 1 ])\n remaining_copies = chosen_card.count - 1\n remaining_deck = sum([c.count for c in deck.decklist]) - 7\n\n in_top_five_chance = hypergeom.sf(1, remaining_deck, remaining_copies, 5)\n in_top_five_chance = in_top_five_chance * 100\n correct_string = \"{:.2f}\".format(in_top_five_chance)\n\n wrongs = self.gen_wrong(in_top_five_chance, 'percent', 4)\n possible = wrongs + [correct_string]\n random.shuffle(possible)\n\n print \"Chance of a copy of {} in the next five cards: {}\".format(chosen_card.name, correct_string)\n return question_string.format(card=chosen_card.name), correct_string, possible, answer_suffix, chosen_card", "def producer(battle_queue, first_player) -> List:\n\n\n if battle_queue.is_over():\n if not battle_queue.get_winner():\n return [0]\n elif battle_queue.get_winner().get_name() == first_player:\n\n return [battle_queue.get_winner().get_hp()]\n\n return [battle_queue.get_winner().get_hp() * -1]\n accumulator = []\n\n o = battle_queue.peek()\n\n actions = o.get_available_actions()\n copy_cat = [battle_queue.copy()] * len(actions)\n m = [j.copy() for j in copy_cat]\n\n for i in range(len(actions)):\n m2 = m[i].peek()\n\n\n\n mover(actions[i], m2)\n\n #m[i]._content = m[i]._content[1::]\n\n if not m[i].is_empty():\n m[i].remove()\n\n\n\n for i in m:\n accumulator += [max(producer(i, first_player))]\n\n return accumulator", "def __init__(self):\n        self.stack=[]\n        self.top1=-1\n        ", "def do_top(cs, args):\n resp, data = cs.repositories.get_top(args.count)\n utils.print_list(data, ['name', 'count'], sortby='count')", "def get_top(n):\n \n coins = []\n coin_count = 0\n page = 1\n while coin_count < n:\n data = json.loads(requests.get(URL_TOP_COINS.format(page)).text)\n for coin in data:\n coins.append({\"gecko_id\": coin['id'], 'symbol': coin['symbol'].upper(), 'logo':coin['image']})\n page += 1\n coin_count += len(data)\n sleep(0.3)\n return coins[:n]", "def peek(self):\n tmp = []\n peek = None\n for i in range(len(self.stack)):\n peek = self.stack.pop()\n tmp.append(peek)\n while tmp != []:\n self.stack.append(tmp.pop())\n return peek", "def pop(self) -> int:\n #if len(self.sk2.stack) == 0:\n if self.sk2.is_empty():\n for i in range(len(self.sk1.stack)):\n self.sk2.push(self.sk1.top())\n self.sk1.pop()\n a = self.sk2.top()\n self.sk2.pop()\n return a", "def top(self) -> int:\n if self.top == -1:\n return \n elif self.top == 0:\n return(self.q1[self.top])\n else:\n tp = self.top\n while tp != 0:\n self.q2.append(self.q1.pop(0))\n tp -= 1 \n temp = self.q1[self.top]\n while len(self.q2) != 0:\n self.q1.append(self.q2.pop(0))\n return temp", "def __get_top(self, result, top=10):\n result = result.sort_values(by=\"bias_score\", ascending=False).drop_duplicates(subset='productid', keep=\"first\")\n print(result)\n result = result[:top].sort_values(by=\"final_score\", ascending=False).productid\n\n return list(result)", "def getPlayerSlice(self, bottom = 0, top = 10):\r\n if bottom > top:\r\n top, bottom = bottom, top\r\n return self.ranks[bottom:top]", "def read_concat_5dczi(czis):\n stacks = []\n for czi in czis:\n stacks.append(read_czi(czi, True))\n stack, frames = concatenate_5dstacks(stacks)\n return stack, frames", "def get_card_topurchase(self, state, available_cards):\n #get diff_state\n diff_state = self._create_diff_state(state)\n\n available = False\n while not available:\n #get Cheapest next desired Card\n next_get = self._get_next_cheapest_desired_card(diff_state)\n\n if next_get == CardEnum.NoCard:\n next_get = CardEnum.NoCard\n available = True\n continue\n\n diff_state[next_get] = None\n # removes next cheapest from possibles, prevents inf loop\n\n if next_get in available_cards:\n available = True\n\n return next_get", "def get_top_char(probas, char_size, top_n=5):\n p = np.squeeze(probas)\n p[np.argsort(p)[:-top_n]] = 0.0\n p = p / np.sum(p)\n ch_id = np.random.choice(char_size, 1, p=p)[0]\n return ch_id", "def top(self) -> 'int':\n if self.empty():\n return\n \n # top = None\n while self.que_one:\n if len(self.que_one) == 1:\n top = self.que_one[0]\n self.que_two.append(self.que_one.pop(0))\n\n \n while self.que_two:\n if len(self.que_two) == 1:\n top = self.que_two[0]\n self.que_one.append(self.que_two.pop(0))\n return top", "def remove_rarest_third(cards_by_xp, cards_by_rarity):\n cards_by_rarity.reverse()\n top_third = cards_by_rarity[:(len(cards_by_rarity) / 3)]\n # Remove the most rare cards from the list of cards by XP\n for j in top_third:\n if j in cards_by_xp:\n cards_by_xp.remove(j)\n return cards_by_xp, cards_by_rarity, top_third", "def pop(self) -> int:\n tmp = list()\n while self.stack:\n tmp.append(self.stack.pop())\n \n ret = tmp.pop()\n self.head = tmp[-1] if tmp else None\n while tmp:\n self.stack.append(tmp.pop())\n \n print(self.stack)\n return ret", "def __init__(self):\n self.stack1 = []\n self.stack2 = []\n self.front = None", "def top( self , i , j ):\n return self._get_top( i , j )", "def __init__(self):\n # Use list to represent Stack \n self.s1 = []\n self.s2 = []\n # Note the first item to make peek() faster\n self.first = None", "def stack_channel(stacks, channel):\n cstack = stacks[0][channel].copy()\n frames = []\n for i in range(1, len(stacks)):\n frames.append(len(cstack))\n cstack = np.vstack([cstack, stacks[i][channel]])\n return cstack, frames", "def top(self):\n try:\n if self.size() > 0:\n return self.items[len(self.items) - 1]\n else:\n raise IndexError('Cannot get top item, stack is empty.')\n except IndexError as err:\n print(err)\n raise", "def top(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Stack is empty')\n\t\treturn self._head._element", "def peek(self) -> int:\n tmp_list = ArrayStack(10)\n for i in range(self.data.get_size()):\n if self.data.get_size() == 1:\n res = self.data.pop()\n tmp_list.push(res)\n else:\n tmp_list.push(self.data.pop())\n self.data = ArrayStack(10)\n for i in range(tmp_list.get_size()):\n self.data.push(tmp_list.pop())\n return res", "def personal_top_three(scores):\n return sorted(scores, reverse=True)[:3]", "def personal_top_three(scores):\n return sorted(scores, reverse=True)[:3]", "def top(self) -> int:\n last = self.pop()\n self.push(last)\n return last", "def strongest(self):\n pps = collections.Counter()\n for crd in self:\n pps += collections.Counter( {crd.suit:crd.hc} )\n return sorted(pps.items(), reverse=True, key=lambda x:x[1])", "def get_top_tags(tags):\n # ~3x faster\n return list(sorted(tags.items(), key=itemgetter(1), reverse=True))[:TOP_NUMBER]", "def top(self):\n if self.is_empty():\n raise Empty('Stack is empty')\n return self._head._element # top of stack is at head of list", "def get_cards(self):\n return deepcopy(self._cards)", "def __init__(self, cards, stack_style=SQUARED):\n self.cards = cards\n self.stack_style = stack_style", "def top(self, top):\n self.ptr.top(top)", "def shuffle(deck):\n nShuffle = random.randint(0, deck.size - 1)\n for i in range(nShuffle):\n enqueue(deck, front(deck))\n dequeue(deck)\n frontCard = front(deck)\n dequeue(deck)\n return frontCard", "def top(self):\n if self.stack == []:\n return None\n return self.stack[-1]", "def pop_card(self):\n return self.cards.pop()", "def read_all_stack(self):\n return self.STACK", "def top(self) -> int:\n self._aux()\n ret = self.q1[0]\n self.q2.append(self.q1.popleft())\n self.q1, self.q2 = self.q2, self.q1\n return ret", "def Deal():\r\n cardsout = []\r\n cardoptions = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]\r\n topcardoptions = [0,2,3,4,5,6]\r\n topcard = topcardoptions[random.randint(0,5)]\r\n cardoptions.pop(cardoptions.index(topcard))\r\n cardsout.append(topcard)\r\n\r\n if SHOWHAPPENINGS == True:\r\n disp = card_dict[topcard]\r\n print(\"Topcard is: {}\".format(disp)) \r\n\r\n for i in range(4):\r\n numcards = 0\r\n while numcards < 5:\r\n possiblerange = len(cardoptions) - 1\r\n cardindex = random.randint(0,possiblerange)\r\n card = cardoptions[cardindex]\r\n cardsout.append(card)\r\n cardoptions.pop(cardoptions.index(card))\r\n PlayerHands[i].append(card)\r\n numcards += 1\r\n PlayerHands[i] = sorted(PlayerHands[i]) #putting into ascending order\r\n if i == 0 or i == 2:\r\n PlayerHands[i].append(\"RedTeam\")\r\n else: \r\n PlayerHands[i].append(\"BlackTeam\")\r\n \r\n PlayerHands[0].append(PLAYER1)\r\n PlayerHands[1].append(PLAYER2)\r\n PlayerHands[2].append(PLAYER3)\r\n PlayerHands[3].append(PLAYER4)\r\n #PlayerHand format = [card1,card2,card3,card4,card5,Team,Name]\r\n\r\n return topcard", "def best_card(cards, trump=None, lead=None):\n\tval_map = {}\n\tfor c in cards:\n\t\tval = VALUE_MAP[c[0]]\n\t\tif lead == c[1]:\n\t\t\tval *= 10\n\t\tif trump == c[1]:\n\t\t\tval *= 100\n\t\t\tif c[0] == 'J':\n\t\t\t\tval = val*10 + 5\n\t\tif trump == same_color(c[1]) and c[0] == 'J':\n\t\t\tval = val*1000 + 3\n\n\t\tval_map[c] = val\n\n\treturn sorted(val_map.items(), key=lambda x: x[1], reverse=True)[0][0]", "def reveal_top_card(self):\n if self.get_length() != 0:\n if not self.get_topmost_card().get_exposed():\n self.get_topmost_card().flip_card()", "def __call__(self, x, top_n=None):\n if top_n is None:\n top_n = self.num_layers\n\n h_list = []\n h_curr = x\n for layer in self:\n h_curr = layer(h_curr)\n h_list.append(h_curr)\n return concat.concat(h_list[-top_n:], 1)", "def top(self) -> int:\n while len(self.data) != 1:\n self.help.append(self.data.popleft())\n tmp = self.data.popleft()\n self.help.append(tmp)\n self.data, self.help = self.help, self.data\n return tmp", "def get_top_five_countries():\n countries=country_populations.split('\\n')\n top_5=[]\n count=0\n for country in countries:\n if count<6:\n data= country.split('\\t')\n top_5.append(data[1])\n count+=1\n top_5.remove('Country')\n return top_5", "def play_war(deck):\n a_cards = deck[:int(len(deck)/2)]\n b_cards = deck[int(len(deck)/2):]\n a_stash = []\n b_stash = []\n print(\"\\na_cards: %s, a_stash: %s, \\nb_cards: %s, b_stash: %s\" % (a_cards, a_stash, b_cards, b_stash))\n round = 1\n while a_cards and b_cards:\n # The pop() here means we play with the card that is at the end of the list\n a_card = a_cards.pop()\n b_card = b_cards.pop()\n\n # This is the case if the drawn cards are of equal value\n if a_card[list(a_card.keys())[0]] == b_card[list(b_card.keys())[0]]:\n if len(a_cards) > 0 and len(b_cards) > 0:\n a_stash.extend([a_card]+[a_cards.pop()])\n b_stash.extend([b_card]+[b_cards.pop()])\n print(\"\\n-----------------IT'S A WAR!!!!!!!-----------------\")\n print(\"\\na_cards: %s, a_stash: %s, \\nb_cards: %s, b_stash: %s\" % (a_cards, a_stash, b_cards, b_stash))\n continue\n else:\n continue\n \n # This is the case when a_card wins over the b_card\n elif a_card[list(a_card.keys())[0]] > b_card[list(b_card.keys())[0]]:\n a_cards = [a_card, b_card] + a_stash + b_stash + a_cards\n a_stash = []\n b_stash = []\n\n # This is the case when b_card wins over the a_card\n elif b_card[list(b_card.keys())[0]] > a_card[list(a_card.keys())[0]]:\n b_cards = [b_card, a_card] + b_stash + a_stash + b_cards\n a_stash = []\n b_stash = []\n\n print(\"\\na_cards: %s, a_stash: %s, \\nb_cards: %s, b_stash: %s\" % (a_cards, a_stash, b_cards, b_stash))\n\n print(\"After round %s: \\na_cards_count: %s, a_stash_count: %s, b_cards_count: %s, b_stash_count: %s\" %\n (round, len(a_cards), len(a_stash), len(b_cards), len(b_stash)))\n round += 1\n\n if(len(a_cards) > len(b_cards)):\n print(\"A_cards wins!!!\")\n elif(len(b_cards) > len(a_cards)):\n print(\"B_cards wins!!!\")\n else:\n print(\"Both the set of cards are empty! It's a tie!\")", "def top(self) -> int:\n r = self.data.get()\n self.push(r)\n return r", "def peek(self):\n if len(self.stack2):\n top = self.stack2.pop()\n self.stack2.append(top)\n return top\n\n return self.front", "def top(self):\n return self[0]", "def PantrySorterEmptyShelf(Shelf: Shelf, stackableFood, unstackableFood):\n print(stackableFood, unstackableFood)\n unstackableFood = sorted(unstackableFood, reverse=True)\n stackableFood = sorted(stackableFood, reverse=True)\n print(\"sorted\\n{}\\n{}\".format(unstackableFood, stackableFood))\n for foodList in [unstackableFood, stackableFood]:\n for food in foodList:\n if(food.height >= Shelf.height):\n #can never be added because doesn't fit on Shelf\n print(\"Could not add {} due to exceeding height of Shelf.\".format(food))\n foodList.remove(food)\n iShelf = 0\n remainingWidth = Shelf.width\n\n while (not Shelf.isFull() and (len(stackableFood) != 0 or len(unstackableFood) != 0)) :\n\n Shelf.createStack()\n print(\"stack created\")\n # here I am trying to get the ith stack on the shelf. but it didnt work for me. It was an object of type list(?)\n curStack = Shelf.stacks[iShelf]\n for food in stackableFood:\n # adds as many stackable food items to stack as possible\n if Shelf.height > curStack.height + food.height:\n # if this is not true, it cant be stacked anyways\n if not curStack.items:\n # just first item in stack\n if food.depth < remainingWidth:\n curStack.addItem(food)\n print(\"adding {}\".format(food))\n else:\n curStack.addItem(food)\n remainingWidth -= food.depth\n for food in curStack.items:#remove items that have been shelved\n stackableFood.remove(food)\n removeThisFromUnstackable = None\n for food in unstackableFood:\n # adds as a non stackable food item to stack if possible\n if curStack.stackable and Shelf.height > curStack.height + food.height:\n # if this is not true, it cant be stacked anyways\n if not curStack.items:\n # just first item in stack\n if food.depth < remainingWidth:\n remainingWidth -= food.depth\n curStack.addItem(food, False)\n unstackableFood.remove(food)\n break\n else:\n curStack.addItem(food, False)\n unstackableFood.remove(food)\n break\n\n iShelf += 1 \n try:\n if(len(stackableFood) != 0 and remainingWidth < stackableFood[-1].depth):\n if(len(unstackableFood) != 0 and remainingWidth < unstackableFood[-1].depth):\n Shelf.setFull(True)\n elif(len(unstackableFood) != 0 and remainingWidth < unstackableFood[-1].depth):\n if(len(unstackableFood) != 0 and remainingWidth < unstackableFood[-1].depth):\n Shelf.setFull(True) \n # checks if not even the smallest items in both lists fit onto shelf. \n except:\n print(\"avoided something\")\n print(\"end\")\n return Shelf", "def get_top_n_motif_scores(score_list,top_n):\r\n\treturn score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]", "def top(self) -> object:\n if self.is_empty()== True: # if size of array is 0, raise exception\n raise StackException\n return self.da.get_at_index(self.size()-1) # return the top of the stack (last element)\n pass", "def get_all_open_cards(self):\n print('Searching Trello cards..\\n')\n done_sources = []\n for list in self.my_lists:\n for card in list.list_cards(card_filter='open'):\n name = card.name.split()[0]\n done_sources.append(card)\n return done_sources", "def copy(stack):\n try:\n return stack[-1]\n except:\n print('error')", "def top(self):\n if len(self._data) == 0:\n raise StackError(\"Stek je prazan. Ne moze se izvrsiti funkcija top.\")\n else:\n return self._data[-1]", "def create_best_hand_bruteforce(cards):\n \n combos = unique_combinations(cards, 5)\n hands = [Hand(combo) for combo in combos]\n hands = sorted(hands, reverse=True)\n return hands[0]", "def __get_top_with_detail(self, result, top=10):\n result = result.sort_values(by=\"bias_score\", ascending=False).drop_duplicates(subset='productId', keep=\"first\")[\n :top]\n\n return result", "def top(self):\n if self.is_empty():\n raise RuntimeError(\"Attempt to get a top of the empty stack!\")\n return self._items[-1]", "def sort_stack(stack: Stack):\n # additional stack\n tmp = Stack()\n # find length\n n = 0\n while stack:\n tmp.push(stack.pop())\n n += 1\n while tmp:\n stack.push(tmp.pop())\n # balance the content between the 2 stacks, sorting one at the time\n mini = float('inf')\n for i in range(n):\n print(f'<- status: stack: {stack}, tmp: {tmp}')\n for _ in range(n-i):\n mini = min(mini, stack.peek())\n tmp.push(stack.pop())\n print(f'-> status: stack: {stack}, tmp: {tmp}')\n # push the min\n stack.push(mini)\n for _ in range(n-i):\n if tmp.peek() != mini: stack.push(tmp.pop())\n else: tmp.pop()\n # reset minimum\n mini = float('inf')\n return stack", "def top(self, *args):\n return _ida_hexrays.history_t_top(self, *args)", "def top(self):\n while not self.queue[self.tag].empty():\n temp = self.queue[self.tag].get()\n self.queue[1 - self.tag].put(temp)\n self.tag = 1 - self.tag\n return temp", "def dealHand(deck):\n hand = [] \n for i in range(7): \n hand.append(deck.pop())\n return hand", "def dealDraw(deck):\n hand = [] \n for i in range(7): \n hand.append(deck.pop())\n return hand" ]
[ "0.6781717", "0.6382215", "0.63411653", "0.63055867", "0.6273443", "0.6257995", "0.61584336", "0.61021817", "0.60572743", "0.6045613", "0.6024501", "0.6022065", "0.600286", "0.5925546", "0.5907862", "0.58364666", "0.5817762", "0.5810385", "0.5734856", "0.5729768", "0.5695732", "0.56908655", "0.5683966", "0.56606287", "0.56425786", "0.561248", "0.56100154", "0.5608585", "0.55587685", "0.5555961", "0.5545167", "0.5544231", "0.55300707", "0.5522472", "0.55131304", "0.55062515", "0.55040544", "0.54919076", "0.5490293", "0.5486988", "0.5482354", "0.54719627", "0.54692346", "0.5466076", "0.5453419", "0.54516023", "0.54442674", "0.5440701", "0.54238623", "0.5417416", "0.5414063", "0.54112023", "0.54096866", "0.54087687", "0.5408015", "0.5405372", "0.53971756", "0.538916", "0.5384312", "0.53811526", "0.53721166", "0.53689575", "0.5350098", "0.5350098", "0.53410184", "0.53324336", "0.5330006", "0.5324891", "0.53230226", "0.53224844", "0.53037375", "0.5300257", "0.5284946", "0.5278617", "0.52779734", "0.52667516", "0.526501", "0.5262462", "0.52563304", "0.52526176", "0.5251362", "0.5251115", "0.52506876", "0.524624", "0.5239776", "0.5230701", "0.52276856", "0.52206963", "0.5211215", "0.52026576", "0.5201156", "0.5198794", "0.5198298", "0.5194941", "0.517235", "0.5171774", "0.5167315", "0.5165423", "0.51493686", "0.5148403" ]
0.57403517
18
Return tuple (player number, s1, s2). The first element indicates the winner
def play(self): while len(self._s1) > 0 and len(self._s2) > 0: if self._serialize() in self._seen_games: # Game over player 1 wins return (1, *self.decks) self._seen_games.add(self._serialize()) n1, n2 = self._s1.pop(), self._s2.pop() if len(self._s1) >= n1 and len(self._s2) >= n2: # Play a sub game sub_game = Game(*self.copy_stacks(n1, n2)) res, _, _ = sub_game.play() else: res = 1 if n1 > n2 else 2 if res == 1: self._s1.appendleft(n1) self._s1.appendleft(n2) else: self._s2.appendleft(n2) self._s2.appendleft(n1) return (1 if len(self._s1) else 2, *self.decks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def winner(self):\n # Credit to Dariusz Walczak for inspiration.\n # http://stackoverflow.com/questions/1720421/merge-two-lists-in-python\n moves = [p.possible_moves(p.pieces, self) for p in self.players]\n if False in [mv == [] for mv in moves]:\n return (\"None\")\n else:\n cand = [(p.score, p.name) for p in self.players]\n return (sorted(cand, reverse=True)[0][1])", "def find_winner_scores(self):\n p1_score, p2_score = self.find_scores()\n if p1_score > p2_score:\n winner = 1\n elif p1_score < p2_score:\n winner = 2\n else:\n winner = 0\n return winner, p1_score, p2_score", "def determine_winner1(self): \r\n sorted_player_rank = self._rank()\r\n print(f\"sorted player rank: {sorted_player_rank}\")\r\n print(f\"winner is player {sorted_player_rank[0]}: with points {sorted_player_rank[0][1]}\")", "def determine_winner(score1, score2):\n if score1 == score2:\n return 'tie'\n elif score1 == 21:\n return 'player1'\n elif score2 == 21:\n return 'player2'\n elif score1 > 21 or score2 > 21:\n if score1 > 21 and score2 > 21:\n if score1 - 21 < score2 - 21:\n return 'player1'\n else:\n return 'player2'\n elif score2 < 21 < score1:\n return 'player2'\n elif score1 < 21 < score2:\n return 'player1'\n elif score1 < 21 and score2 < 21:\n if score1 - score2 > 0:\n return 'player1'\n else:\n return 'player2'\n else:\n return None", "def winner(data, event):\n if len(data) == 0:\n return ('none', 0)\n\n if event == 'Swim' or event == 'Run':\n winScore = 1000000000\n for i in data:\n s = time_seconds(i[event])\n if s < winScore:\n winScore = s\n name = i['name']\n else:\n winScore = -1\n for i in data:\n s = int(i[event])\n if s > winScore:\n winScore = s\n name = i['Name']\n\n return (name, winScore)", "def winner(board):\n black_count = board.count(-1)\n white_count = board.count(1)\n if black_count > white_count:\n return (-1, black_count, white_count)\n elif white_count > black_count:\n return (1, black_count, white_count)\n else:\n return (0, black_count, white_count)", "def determine_winner(self):\r\n for i in range(2):\r\n # Way of the Stone (capture opponent master)\r\n if not self.bitboard_king[i]:\r\n return 1 - i * 2\r\n # Way of the Stream (move master to opposite square)\r\n if self.bitboard_king[i] == self.WIN_BITMASK[i]:\r\n return i * 2 - 1\r\n return 0", "def swissPairings():\n\n # Ok This is where things get interesting, how in the world should i solve this problem\n # A question to the udacity reviewer. Shouldn't standings be passed in to this function since weve already called it in tournament_test.testPairings\n\n #anyways\n\n nextRoundPlayers = []\n standings = playerStandings()\n \n # since our players are ordered by wins, first place first and we have an even number of players,\n # this seems like a no-brainer to just have every 2 tuples starting from the beginning to be the next match\n # however this needs to to be implemented algorithmically\n \n #loop through our players and when we get to an even index, we get the previous two players and assign their ids and names to the next tuple \n #in nextRoundPlayers\n \n i = 0\n while i < len(standings):\n if i % 2 == 0:\n id1 = standings[i-1][0]\n name1 = standings[i-1][1]\n\n id2 = standings[i-2][0]\n name2 = standings[i-2][1]\n\n nextRoundPlayers.append((id1, name1, id2, name2))\n\n i += 1\n \n return nextRoundPlayers", "def get_winner(player_one_score, player_two_score):\n \n # Get winner depending on which player has highest score or if their \\\n # scores are equal\n if player_one_score > player_two_score:\n return 'Player One wins!'\n elif player_one_score < player_two_score:\n return 'Player Two wins!'\n else:\n return 'Tie game!'", "def Winner(self, whichPlayer, tResult, score, gameCount):\n\n if whichPlayer == 0:\n tResult[0] = tResult[0] + score[0]\n else:\n tResult[1] = tResult[1] + score[1]\n print(data['tResult'],\"player1 \", tResult[0],\"player2 \",tResult[1])\n if gameCount == 3:\n if tResult[0] > tResult[1]:\n print(data['mplayer1'],tResult[0] - tResult[1])\n else:\n print(data['mplayer2'],tResult[1] - tResult[0])\n return whichPlayer, score, gameCount, tResult", "def winner(self):\n if (self.player):\n return (0 == reduce(lambda x, y: x+y, self.board.p1vec))\n else:\n return (0 == reduce(lambda x, y: x+y, self.board.p2vec))", "def swissPairings():\n\n match_tup = ()\n matches_list = []\n player_count = 0 # keeps track of how many players per match\n players = playerStandings();\n for player in players:\n if player_count == 0:\n playerone = player\n player_count += 1\n elif player_count == 1:\n playertwo = player\n player_count += 1\n if player_count == 2: # match full, add match to list then reset\n match_tup = (playerone[0],playerone[1],playertwo[0],playertwo[1])\n matches_list.append(match_tup)\n player_count = 0\n return matches_list", "def swissPairings():\n\n player_list = playerStandings()\n match_list = []\n\n # assume its always even\n for i in xrange(0, len(player_list), 2):\n id1, name1, wins1, matches1 = player_list[i]\n id2, name2, wins2, matches2 = player_list[i+1]\n match_list.append((id1, name1, id2, name2))\n return match_list", "def find_winner(self):\n if type(self.data) is list:\n best_guess = min(self.data, key=lambda x: abs(x-self.two_thirds_of_the_average()))\n winning_guessers = ['Anonymous']\n if type(self.data) is dict:\n best_guess = min(self.data.values(), key=lambda x:\n abs(x-self.two_thirds_of_the_average()))\n winning_guessers = [guesser for guesser in self.data if\n self.data[guesser] == best_guess]\n return tuple(sorted(winning_guessers) + [best_guess])", "def winner(board):\n black_count = board.count(-1)\n white_count = board.count(1)\n if black_count > white_count:\n #if black_count + white_count != 64:\n # black_count += (64 - black_count - white_count)\n return (-1, black_count, white_count)\n elif white_count > black_count:\n #if black_count + white_count != 64:\n # white_count += (64 - black_count - white_count)\n return (1, black_count, white_count)\n else:\n return (0, black_count, white_count)", "def check_game_status(self):\n for player in (\"1\", \"2\"):\n row_win = np.apply_along_axis(\n lambda x: set(x) == {player}, 1, self.board\n ).any()\n col_win = np.apply_along_axis(\n lambda x: set(x) == {player}, 0, self.board\n ).any()\n d1_win = set(self.data[[0, 4, 8]]) == {player}\n d2_win = set(self.data[[2, 4, 6]]) == {player}\n if any([row_win, col_win, d1_win, d2_win]):\n return (\"win\", player)\n\n if self.counter[\"_\"] == 0:\n return (\"tie\", None)\n else:\n return (\"turn\", \"1\" if self.counter[\"1\"] == self.counter[\"2\"] else \"2\")", "def check_winner(self):\n for row in self.board.values():\n if all([mark == \"x\" for mark in row]):\n return self.player_1\n elif all([mark == \"o\" for mark in row]):\n return self.player_2\n\n # checks every column\n for i in range(3):\n first_row, second_row, third_row = self.board.values()\n if first_row[i] == \"x\" and second_row[i] == \"x\" and third_row[i] == \"x\":\n return self.player_1\n elif first_row[i] == \"o\" and second_row[i] == \"o\" and third_row[i] == \"o\":\n return self.player_2\n\n # checks the diagonals\n if self.board[\"a\"][0] == \"x\" and self.board[\"b\"][1] == \"x\" and self.board[\"c\"][2] == \"x\":\n return self.player_1\n if self.board[\"a\"][2] == \"o\" and self.board[\"b\"][1] == \"o\" and self.board[\"c\"][0] == \"o\":\n return self.player_2\n\n return None", "def winner(strategy0, strategy1):\n score0, score1 = play(strategy0, strategy1)\n if score0 > score1:\n return 0\n else:\n return 1", "def winner(strategy0, strategy1):\n score0, score1 = play(strategy0, strategy1)\n if score0 > score1:\n return 0\n else:\n return 1", "def winner(strategy0, strategy1):\n score0, score1 = play(strategy0, strategy1)\n if score0 > score1:\n return 0\n else:\n return 1", "def swissPairings():\n # LOGIC used in pairing :\n # Latest standings are extracted using \"players\" table.\n # From the standings, 2 players sets/tuples are chosen wherein the players have similar \"wins\".(Adjacent)\n #\n cur4 = conn.cursor()\n query = \"\"\"SELECT id, name, sum(wincount) as wins, sum(lose_count)+sum(wincount) as total\n from\n (((\n select p.id, p.name, count(winner) as wincount, '0' as lose_count\n from players p left join matches on p.id=winner group by p.id, p.name order by count(winner) desc)\n UNION\n (select p.id, p.name, '0' as wincount, count(loser) as lose_count\n from players p left join matches on p.id=loser group by p.id, p.name order by count(loser) desc\n )))\n as standings group by id, name order by wins desc, total asc;\"\"\"\n cur4.execute(query)\n rows = cur4.fetchall()\n\n # Below are the temporary variables used in processing.\n count = 1\n temp_pid = ()\n temp_name = ()\n pid = ()\n name = ()\n\n # For executing the test cases successfully, the returned datastructure\n # should be a list of tuples.\n outer_list = []\n inner_tuple = ()\n\n # Instantiating and returning the datastructure.\n for row in rows:\n # The function needs to send pid,name hence extracting them.\n pid = (row[0],)\n name = (row[1],)\n if count in {1, 3, 5, 7}:\n temp_pid = pid\n temp_name = name\n else:\n inner_tuple = temp_pid+temp_name+pid+name\n outer_list.append(inner_tuple)\n count = count+1\n return outer_list", "def winner(self):\n return (\"None\")", "def swissPairings():\n pairing = []\n\n conn = psycopg2.connect(\"dbname=tournament\")\n c = conn.cursor()\n c.execute(\"SELECT max(num_of_wins) FROM match\")\n max_win = c.fetchall()[0][0]\n\n for wins in range(0,max_win + 1): # loop through num_of_wins\n query = \"SELECT player.id, player.name FROM player, match WHERE player.id = match.id and num_of_wins = \" + str(wins)\n c.execute(query)\n res = c.fetchall()\n \n pairs= []\n flag = 0\n for e in res:\n if flag == 0: # it's the first element in the tuple\n pairs = e\n flag = 1\n else: # it's the second element in the tuple\n pairs += e \n flag = 0\n pairing.append(tuple(pairs))\n\n conn.close()\n \n return pairing", "def winner(self):\n\n\t\tfor player in [1,2]:\n\t\t\twon = np.full((self.boardSize), player)\n\n\t\t\t# Check diagonals\n\t\t\tif(np.array_equal(np.diag(self.board), won)): return player\n\t\t\tif(np.array_equal(np.diag(np.fliplr(self.board)), won)): return player\n\n\t\t\t# Check lines and columns\n\t\t\tfor i in range(self.boardSize):\n\t\t\t\tif(np.array_equal(self.board[i], won)): return player\n\t\t\t\tif(np.array_equal(self.board[:,i], won)): return player\n\n\t\t# Draw\n\t\tif(not(0 in self.board)): return 3\n\n\t\t# No win or draw\n\t\treturn 0", "def swissPairings():\n #get all players, sort by number of wins.create matches with the 2 adjacent players\n c.execute(\"\"\"SELECT id, playerName, num_wins\n FROM (SELECT winner_id, count(match_id) as num_wins \n FROM wins \n group by winner_id \n )as R1 right join tournament on R1.winner_id= tournament.id order by num_wins\"\"\")\n result= c.fetchall()\n for row in result:\n print row", "def choose_winner(): \r\n max_health = Titan.max_health()\r\n winners = tuple((titan.name for titan in Titan.titans if titan.health == max_health))\r\n return winners", "def __winner_in_line(self, line):\n\t\ttoken_sum = 0\n\t\tfor token in line:\n\t\t\ttoken_sum += token\n\t\t\tif token_sum == 4 * self.PLAYER1:\n\t\t\t\treturn self.PLAYER1\n\t\t\tif token_sum == 4 * self.PLAYER2:\n\t\t\t\treturn self.PLAYER2\n\t\t\tif token_sum < 0 < token or token_sum > 0 > token:\n\t\t\t\ttoken_sum = 0\n\t\treturn 0", "def swissPairings():\n # retreives player standings i.e. id, player, wins, matches\n standings = playerStandings()\n # pairs for next round are stored in this array.\n next_round = []\n\n # iterates on the standings results. As the results are already in\n # descending order, the pairs can be made using adjacent players, hence the\n # loop is set to interval of 2 to skip to player for next pair\n # in every iteration.\n for i in range(0, len(standings), 2):\n # each iteration picks player attributes (id, name) of current row\n # and next row and adds in the next_round array.\n next_round.append((standings[i][0], standings[i][1], standings[i+1][0], standings[i+1][1]))\n # pairs for next round are returned from here.\n return next_round", "def winner(self):\n\n if self.home_score > self.away_score:\n return HOME\n elif self.home_score < self.away_score:\n return VISITOR\n else:\n return TIE", "def get_winner(self):\n winner: Player = Player('none')\n points_winner = 0\n for player in self.players:\n for key, value in player.get_stats().items():\n print('{}: {}'.format(key, value))\n if key == 'points':\n if value >= points_winner:\n winner = player\n print()\n\n print('The winner is: ' + winner.get_name())\n return winner", "def winner(board):\n\t#For X\n\tiswinnerX = winnerForPlayer(board, X)\n\tiswinnerO = winnerForPlayer(board, O)\n\n\tif iswinnerX:\n\t\treturn X\n\tif iswinnerO:\n\t\treturn O\n\n\treturn None", "def winner(self):\n state = self.state\n if state == State.X_WON:\n return Square.X\n if state == State.O_WON:\n return Square.O\n return None", "def winner(board):\n # finite list of possible wins\n winnings = [\n (0, 0), (0, 1), (0, 2), \n (1, 0), (1, 1), (1, 2),\n (2, 0), (2, 1), (2, 2),\n (0, 0), (1, 0), (2, 0),\n (0, 1), (1, 1), (2, 1),\n (0, 2), (1, 2), (2, 2),\n (0, 0), (1, 1), (2, 2),\n (2, 0), (1, 1), (0, 2)\n ]\n # if the board has one of the lists in winnings \n # then the piece in one of those spots is the winner\n xcount = 0\n ocount = 0\n for i in range(len(winnings)):\n if(board[winnings[i][0]][winnings[i][1]] == X):\n xcount += 1\n if(board[winnings[i][0]][winnings[i][1]] == O):\n ocount += 1\n if((i + 1) % 3 == 0):\n if(ocount == 3 or xcount == 3):\n return board[winnings[i][0]][winnings[i][1]]\n else:\n ocount = 0\n xcount = 0\n return EMPTY", "def swissPairings(matchid):\n standings = playerStandings(matchid)\n matchup = []\n \"\"\"\n # if statement pops out player who hasnt had a bye yet\n # bye chosen from middle ranked players\n if len(standings)%2 != 0:\n bye_place = len(standings)/2\n while (standings[bye_place][6] == 1):\n bye_place = bye_place + 1\n bye = standings.pop(bye_place)\n #build match-up system\n for num in range(0, len(standings)):\n if num%2 == 0:\n matches.append(num)\n \"\"\"\n while (len(standings) != 0):\n to_match = standings.pop(0)\n next_player = 0\n print \"to match\"\n while (to_match[0] in standings[next_player][5]):\n print \"next player\"\n next_player = next_player + 1\n matched = standings.pop(next_player)\n matchup.append((to_match[0], to_match[1],\n matched[0], matched[1]))\n if (len(standings) == 1):\n bye = standings.pop(0)\n matchup.append((bye[0],bye[1]))\n return matchup", "def get_winner(self):\n diff = self.home_score - self.away_score\n if diff > 0:\n return self.home_team\n elif diff < 0:\n return self.away_team\n else:\n return None", "def get_winner(state):\n\n if", "def winner(board):\n for turn in [X,O]:\n for i in range(3):\n if board[i] == [turn, turn, turn]:\n return turn\n if board[0][i] == turn and board[1][i] == turn and board[2][i] == turn:\n return turn\n if board[0][0] == turn and board[1][1] == turn and board[2][2] == turn:\n return turn\n if board[0][2] == turn and board[1][1] == turn and board[2][0] == turn:\n return turn\n return None", "def swissPairings():\n\n p = playerStandings()\n pair = []\n for x in range(0, len(p), 2):\n y = (p[x][0],p[x][1],p[x+1][0],p[x+1][1])\n pair.append(y)\n return pair", "def utility(board):\n winning_player = winner(board)\n\n # Did X win?\n if winning_player == X:\n return 1\n\n # Did O win?\n if winning_player == O:\n return -1\n\n return 0", "def get_winner(self):\n combos = [\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8],\n [0, 3, 6],\n [1, 4, 7],\n [2, 5, 8],\n [0, 4, 8],\n [2, 4, 6],\n ]\n winner = None\n for combo in combos:\n a, b, c = combo\n if (\n self.board[a] is not None\n and self.board[a] == self.board[b]\n and self.board[a] == self.board[c]\n ):\n winner = self.board[a]\n break\n return winner", "def check_winner(self):\n if self.player1.chips <= BIG_BLIND_BET:\n return 2\n elif self.player2.chips <= BIG_BLIND_BET:\n return 1\n else:\n return 0", "def winner(board):\n for i in range(3):\n if board[i][0] == board[i][1] == board[i][2] != None:\n return board[i][0]\n for i in range(3):\n if board[0][i] == board[1][i] == board[2][i] != None:\n return board[0][i]\n if board[0][0] == board[1][1] == board[2][2]:\n return board[0][0]\n if board[0][2] == board[1][1] == board[2][0]:\n return board[0][2]\n return None", "def swissPairings():\n standings = playerStandings()\n match_list = []\n\n # Looks at indices in standings with even numbers and pairs them with\n # adjacent players (i.e. players with the most similar standing)\n for x in range(0, len(standings)/2):\n new_match = (standings[2 * x][0], standings[2 * x][1],\n standings[2 * x + 1][0], standings[2 * x + 1][1])\n match_list.append(new_match)\n return match_list", "def winner(self):\n for c in 'xo':\n for comb in [(0,3,6), (1,4,7), (2,5,8), (0,1,2), (3,4,5), (6,7,8), (0,4,8), (2,4,6)]:\n if all(self.spots[p] == c for p in comb):\n return c\n return None", "def swissPairings():\n # get a list that only includes id and name from our standings view\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT id,p_name FROM standings\")\n pair = cursor.fetchall()\n conn.close()\n # because the rules state that players should never match up more than once\n # we use playedCount to check that and build out a list of pairings\n # that hopefully includes everyone but has no repeat matches between rounds\n hasPartner = []\n pairsList = []\n pairLen = len(pair)\n for index, player1 in enumerate(pair):\n if not index in hasPartner:\n for index2 in range(index, pairLen):\n if playedCount(player1[0], pair[index2][0]) == 0:\n hasPartner.extend([index, index2])\n pairsList.append((player1[0], player1[1],\n pair[index2][0], pair[index2][1]))\n break\n return pairsList", "def utility(board):\n winners = winner(board)\n if (X == winners):\n return 1\n elif (O == winners):\n return -1\n return 0", "def test_winner(state_board):\n\tres = 3 #default value is tie game\n\tptsb = 0 #points for the black\n\tptsw = 0 #points for the white\n\t\n\t#looks in the board if there is an empty case while\n\t# counting the number of points for each player\n\tfor i in state_board:\n\t\tfor j in i:\n\t\t\tif j == 0:\n\t\t\t\tres = 0\n\t\t\telif j == 1:\n\t\t\t\tptsb += 1\n\t\t\telif j == 2:\n\t\t\t\tptsw += 1\n\t\n\t#if there is an empty case, looks for possibilities\n\t# for the other player, if no possibility test for the points\n\t#if no empty case\n\t# test for points\n\t#else return 0\n\tif res == 0:\n\t\tif possible(state_board,1) == []:\n\t\t\tif possible(state_board,2) == []:\n\t\t\t\tres = count_points(ptsb,ptsw)\n\t\t\telse:\n\t\t\t\tres = 5\n\t\telif possible(state_board,2) == []:\n\t\t\tres = 4\n\telse:\n\t\tres = count_points(ptsb,ptsw)\n\treturn res", "def __getWinner(self, s1: Simulator, s2: Simulator):\n w1, w2 = s1.getWinner(), s2.getWinner()\n \n if (w1 == w2):\n self.__winner = w1\n return\n \n if (w1 == 1):\n victoryTime1 = s1.getTime()\n victoryTime2 = s2.getTime()\n else:\n victoryTime2 = s1.getTime()\n victoryTime1 = s2.getTime()\n \n if (victoryTime1 < victoryTime2):\n self.__winner = 1\n elif (victoryTime1 > victoryTime2):\n self.__winner = 2\n else:\n self.__winner = 3", "def score_tuple( hand ):\n m = matches(hand)\n #print( m )\n #royal_flush -- a special case of straight flush.\n if flush(hand) and straight(hand) and hand[4].rank == 14:\n return (8, hand[4].rank, 0)\n #straight_flush\n elif flush(hand) and straight(hand):\n return (8, hand[4].rank, 0)\n #four_of_a_kind\n elif len(m) == 2 and m[0].count == 4:\n return (7, m[0].card.rank, 0)\n #full_house\n elif len(m) == 2 and m[0].count == 3 and m[1].count == 2:\n return (6, m[0].card.rank, m[1].card.rank)\n #flush\n elif flush(hand):\n return (5, hand[4].rank, 0)\n #straight\n elif straight(hand):\n return (4, hand[4].rank, 0)\n #three_of_a_kind\n elif len(m) == 3 and m[0].count == 3:\n return (3, m[0].card.rank, 0)\n #two_pair\n elif len(m) == 3 and m[0].count == 2 and m[1].count == 2:\n return (2, m[0].card.rank, m[1].card.rank)\n #one_pair\n elif len(m) == 4 and m[0].count == 2 and m[1].count == 1:\n return (1, m[0].card.rank, m[1].card.rank)\n # Simple high card. Is this adequate? We'll know if we get ties.\n else:\n return (0, hand[4].rank, 0) # or (0, m[0].card.rank, 0)", "def get_next_match_prefer_unfair_matches(population):\n win_player = None\n loss_player = None\n for player in population:\n if len(player.record) == 1:\n if player.record[0] == \"W\":\n win_player = player\n elif player.record[0] == \"L\":\n loss_player = player\n if win_player and loss_player:\n return win_player, loss_player\n # if we can't find an unfair match, just pick the first available\n return get_next_match_pick_first_available(population)", "def winner(row):\r\n x=0\r\n o=0\r\n win=False\r\n winner='X'\r\n empty=False #Better not to do '.' in row\r\n \r\n for char in row:\r\n if char=='X':\r\n x+=1\r\n o=0\r\n if char=='O':\r\n o+=1\r\n x=0\r\n if char=='T':\r\n x+=1\r\n o+=1\r\n if char=='.':\r\n x=0\r\n o=0\r\n empty=True\r\n\r\n #Check Winner\r\n if x==4:\r\n win=True\r\n winner='X'\r\n if o==4:\r\n win=True\r\n winner='O'\r\n return (win,winner,empty)\r\n #Done\r", "def who_won(self, board):\n winners = set()\n for x,y,z in self.wins:\n if board[x] == board[y] and board[y] == board[z]:\n winners.add(board[x])\n if 1 in winners and 2 in winners:\n return 3\n if 1 in winners:\n return 1\n if 2 in winners:\n return 2\n return 0", "def winner(board):\n #To determine the winner, I need to know the board's final value. \n token_value = utility(board)\n #if it's 1, X won. If it's -1, O won. Else, it was a tie.\n if(token_value == 1):\n return 'X'\n elif(token_value == -1):\n return 'O'\n else:\n return None", "def determine_winner(self,list1,list2):\r\n \r\n points_player1=0\r\n points_player2=0 \r\n \r\n if len(list1) !=DrawCardsGame.num_turns or\\\r\n len(list2) !=DrawCardsGame.num_turns:\r\n print(f\"Invalid Input, please make {DrawCardsGame.num_turns} draws for each player\")\r\n return f\"Invalid Input, please make {DrawCardsGame.num_turns} draws for each player\"\r\n \r\n \r\n for x in list1:\r\n points_player1+=DrawCardsGame.shades_points_dict[x[0]] * x[1]\r\n \r\n for x in list2:\r\n points_player2+=DrawCardsGame.shades_points_dict[x[0]] * x[1]\r\n \r\n if points_player1>points_player2:\r\n print(\"Congratulations!!!,Winner is player1\")\r\n return \"Winner player1\"\r\n elif points_player2>points_player1:\r\n print(\"Congratulations!!!,Winner is player2\")\r\n return \"Winner player2\"\r\n else:\r\n print(\"Its a draw\") \r\n return \"Its a draw\"", "def check_winner(self):\n if DotsAndBoxesState.score1 > 4: # Because the total score is fixed at nine, if player's score is greater than four,\n # then the player is the winner.\n return \"A\"\n else:\n return \"B\"", "def swissPairings(t_name):\n rank = playerStandings(t_name)\n pairs = []\n if len(rank) % 2 != 0:\n for i in range(len(rank), 0, -1):\n if played(rank[i-1][0], rank[i-1][0]) == False:\n ele = rank[i-1]\n reportMatch(ele[0], ele[0], ele[0])\n rank.remove(ele)\n break\n for i in range(0, len(rank)/2):\n p1 = rank[0]\n rank.remove(p1)\n for player in rank:\n if(played(p1[0], player[0])):\n continue\n p2 = player\n rank.remove(p2)\n break\n pairs.append((p1[0], p1[1], p2[0], p2[1]))\n return pairs", "def getMatchup(self, name):\n if self.atHome:\n return (name, self.opponent)\n else:\n return (self.opponent, name)", "def utility(board):\n winning_player = winner(board)\n\n if winning_player is X:\n return 1\n if winning_player is O:\n return -1\n \n return 0", "def pick_winner(self):\r\n self.convert_face_cards_to_numbers()\r\n main_suit = self.cards_int[0][0] # suit that dominates this round\r\n winner_index = 0 # by default 1st player wins until we find a bigger card in same suit\r\n winner_card_value = self.cards_int[0][1]\r\n for index, card in enumerate(self.cards_int[1:]):\r\n if main_suit == card[0]:\r\n if winner_card_value < card[1]:\r\n winner_index = index+1\r\n winner_card_value = card[1]\r\n\r\n return winner_index", "def swiss_pairings():\n\n DB = connect()\n c = DB.cursor()\n match_count = c.execute(\"SELECT COUNT(*) FROM matches\")\n c.execute(\"SELECT player_id, player_name FROM current_standings\")\n standings = c.fetchall()\n DB.commit\n c.execute(\"SELECT player_id, player_name FROM seed_initial_round\")\n seed = c.fetchall()\n DB.commit()\n\n \"\"\" Get player_count from count_players function \"\"\"\n player_count = count_players()\n \"\"\" determine if playercount is an even number \"\"\"\n if is_even(player_count) == True:\n pairings = []\n\n \"\"\" randomly seed matches if no matches have been played. \"\"\"\n if match_count == 0:\n for x in range(0, player_count-1, 2):\n pairings.append(seed[x] + seed[x+1])\n else:\n for x in range(0, player_count-1, 2):\n pairings.append(standings[x] + standings[x+1])\n\n else: raise ValueError(\"The tournament requires and even number of players. \\\n Please add or remove a single player.\")\n \"\"\" close the DB and return the match pairings \"\"\"\n DB.close()\n return pairings", "def calc_winner(self):\n pass", "def getWinner(board):\n players = [X, O]\n num_symbols_in_line = 3\n for player in players:\n # check rows\n for row in board:\n line_count = row.count(player)\n if line_count == num_symbols_in_line:\n return player\n \n # check columns\n for col_i in range(len(board[0])):\n line_count = 0\n for row_i in range(len(board)):\n if board[row_i][col_i] == player:\n line_count += 1\n if line_count == num_symbols_in_line:\n return player\n \n # check vertical from top left to bottom right\n line_count = 0\n for vert_cell in range(len(board)):\n if board[vert_cell][vert_cell] == player:\n line_count += 1\n if line_count == num_symbols_in_line:\n return player\n \n # check vertical from top right to bottom left\n line_count = 0\n col_i = len(board) - 1\n for row_i in range(len(board)):\n if board[row_i][col_i] == player:\n line_count += 1\n col_i -= 1\n if line_count == num_symbols_in_line:\n return player\n\n return None", "def _get_winner(computer_choice, player_choice):\n if player_choice not in choices:\n return 'Invalid choice'\n if computer_choice == player_choice:\n return tie\n if player_choice == defeated_by[computer_choice]:\n return win.format(player_choice, computer_choice)\n else:\n return lose.format(computer_choice,player_choice)", "def utility(board):\n winner_player = winner(board)\n if winner_player == X:\n return 1\n elif winner_player == O:\n return -1\n else:\n return 0", "def winner(board):\n for i in (O, X):\n for j in range(3):\n if (board[j][0] == i and board[j][1] == i and board[j][2] == i):\n return i\n if (board[0][j] == i and board[1][j] == i and board[2][j] == i):\n return i\n if (board[0][0] == i and board[1][1] == i and board[2][2] == i):\n return i\n if (board[2][0] == i and board[1][1] == i and board[0][2] == i):\n return i\n return None", "def decide_winner(self, user, computer):\n user_index = choice.index(user)\n computer_index = choice.index(computer)\n diff = user_index - computer_index\n if diff == -2 or diff == 1:\n return [1, 0]\n elif diff == 0:\n return [0, 0]\n else:\n return [0, 1]", "def winner(board):\n \n possible_wins = []\n row1 = board[0]\n row2 = board[1]\n row3 = board[2]\n col1 = [board[0][0],board[1][0],board[2][0]]\n col2 = [board[0][1],board[1][1],board[2][1]]\n col3 = [board[0][2],board[1][2],board[2][2]]\n diag1 = [board[0][0],board[1][1],board[2][2]]\n diag2 = [board[2][0],board[1][1],board[0][2]]\n \n possible_wins.append(row1)\n possible_wins.append(row2)\n possible_wins.append(row3)\n possible_wins.append(col1)\n possible_wins.append(col2)\n possible_wins.append(col3)\n possible_wins.append(diag1)\n possible_wins.append(diag2)\n \n for trait in possible_wins:\n if trait.count(\"X\") == 3:\n return \"X\"\n elif trait.count(\"O\") == 3:\n return \"O\"\n \n return None", "def score2(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= 1\r\n else:\r\n numOpp+=1\r\n return numPlayer-numOpp", "def swissPairings():\n standings = playerStandings()\n pairings = []\n if (len(standings) % 2 != 0):\n return \"Error, an evening number of players are supported\"\n for i, player in enumerate(standings):\n if (i % 2 == 0):\n id1, name1 = player[0], player[1]\n else:\n pairings.append( (id1, name1, player[0], player[1] ) )\n return pairings", "def evaluate_winner(line):\n\n trump = line.split('|')[1]\n candidates = line.split('|')[0]\n\n first = candidates.split(' ')[0]\n second = candidates.split(' ')[1]\n\n value_first = evaluate_value(first, trump.strip())\n value_second = evaluate_value(second, trump.strip())\n\n winner =[]\n\n if value_first > value_second:\n winner.append(first)\n elif value_first < value_second:\n winner.append(second)\n else:\n winner.append(first)\n winner.append(second)\n\n return winner", "def eval_winner(p1, p2):\n\n switch = {\n 1: \"Rock\",\n 2: \"Paper\",\n 3: \"Scissors\"}\n if((p1 == 1 and p2 == 3) or (p1 == 2 and p2 == 1) or (p1 == 3 and p2 == 2)):\n print(\"P1 threw \" + switch.get(p1))\n print(\"P2 threw \" + switch.get(p2))\n return 1\n elif((p2 == 1 and p1 == 3) or (p2 == 2 and p1 == 1) or (p2 == 3 and p1 == 2)):\n print(\"P1 threw \" + switch.get(p1))\n print(\"P2 threw \" + switch.get(p2))\n return 2\n else:\n print(\"P1 threw \" + switch.get(p1))\n print(\"P2 threw \" + switch.get(p2))\n return 0", "def comp101_game(points, server):\n \n player0_points = 0 # sets initial 'points' of both players\n player1_points = 0 \n final0_score = 0 # final 'score' of both players in a manner peculiar to\n final1_score = 0 # tennis\n remainder = [] # stores the remaining 'points' if the game has ended\n tennis_score = {0: 0, 1: 15, 2: 30, 3: 40, 4: 40} # use to convert\n # 'points' to tennis\n # 'scores'\n winner = None # initial winner of the game\n \n # tests every 'points' in 'points'\n for number in points:\n \n # finds the 'point' differences between both players and make\n # sure it is a positive value\n points_diff = abs(player0_points - player1_points)\n \n if (player0_points >= 4 or player1_points >= 4):\n \n # the case when a 'winner' is found and stores the \n # remaining 'points'\n if points_diff >= 2:\n if player0_points > player1_points:\n winner = 0\n final0_score = \"W\"\n \n else:\n winner = 1\n final1_score = \"W\"\n remainder.append(number)\n \n # the case when there is no 'winner' yet \n else:\n \n if number == 0:\n player0_points += 1\n\n else:\n player1_points += 1\n \n # updates the latest 'point' difference\n points_diff = abs(player0_points - player1_points)\n \n # ONLY runs if a player 'won' the game after exactly getting \n # his next 'point'\n if points_diff >= 2:\n \n if player0_points > player1_points:\n winner = 0\n final0_score = \"W\"\n \n else:\n winner = 1\n final1_score = \"W\"\n \n # if one of the player gets an \"advantage\"\n elif points_diff == 1:\n \n if player0_points > player1_points:\n final0_score = \"Ad\"\n final1_score = 40\n else:\n final0_score = 40\n final1_score = \"Ad\"\n \n # if no players get an \"advantage\" or 'wins' the game\n else:\n final0_score = 40\n final1_score = 40\n \n else:\n \n # adds a 'point' to a 'player' and converts player 'points' to \n # 'scores' in a manner peculiar to tennis\n if number == 0:\n player0_points += 1\n final0_score = tennis_score[player0_points]\n \n else:\n player1_points += 1\n final1_score = tennis_score[player1_points]\n \n # updates the latest score difference\n points_diff = abs(player0_points - player1_points)\n \n # checks if a player gets an \"advantage\" / 'wins' the game at exactly \n # his 4th 'point'\n if (player0_points == 4 or player1_points == 4):\n \n # when a player 'won' the game\n if points_diff >= 2:\n \n if player0_points > player1_points:\n winner = 0\n final0_score = \"W\"\n else:\n winner = 1\n final1_score = \"W\"\n \n # when a player gets an \"advantage\"\n elif points_diff == 1:\n \n if player0_points > player1_points:\n final0_score = \"Ad\"\n else:\n final1_score = \"Ad\" \n \n # determines which player score is displayed first based on 'server'\n if server == 0:\n score = str(final0_score) + \"-\" + str(final1_score)\n else:\n score = str(final1_score) + \"-\" + str(final0_score)\n \n return (score, winner, remainder)", "def getPlayer(board):\n count_x, count_o = 0, 0\n for row in board:\n count_x += row.count(X)\n count_o += row.count(O)\n if count_x > count_o:\n return O\n return X", "def reportMatch(winner, loser):\n DB = connect()\n c = DB.cursor()\n c.execute(\"SELECT id FROM matches WHERE pID1 = %s and pID2= %s or pID2 = %s and pID1= %s\", (winner,loser,))\n result= c.fetchone()\n c.execute(\"INSERT INTO wins VALUES(%s,%s,%s)\",(winner,loser,result[0],))\n DB.commit()\n DB.close()", "def winner(board):\n\n # Check for horizontal wins\n for row in board:\n if row[0] == row[1] == row[2] and row[0] is not None:\n return row[0]\n\n # Check for vertical wins\n for i in range(3):\n if board[0][i] == board[1][i] == board[2][i] and board[0][i] is not None:\n return board[0][i]\n\n # Check for diagonal wins\n if board[0][0] == board[1][1] == board[2][2] and board[0][0] is not None:\n return board[0][0]\n if board[0][2] == board[1][1] == board[2][0] and board[0][2] is not None:\n return board[0][2]\n\n # If there is no winner, return None\n return None", "def findNextSuitablePlayer(self, n):\n\t\tfor _ in range(len(self.getPlayers())):\n\t\t\tplayer, seat = self.findNthPlayerFromSeat(n, 1)\n\t\t\tif self.playerList[seat].money > 0 and self.playerList[seat].isHandLive == True:\n\t\t\t\treturn (player, seat)\n\t\t\telse:\n\t\t\t\tn = seat", "def winner(board):\n \n for m in [\"XXX\", \"OOO\"]:\n # horizontal\n for row in range(3):\n if board[row][0] == board[row][1] == board[row][2]:\n return board[row][0]\n # vertical\n for col in range(3):\n if board[0][col] == board[1][col] == board[2][col]:\n return board[0][col]\n # diagonal\n if board[0][0] == board[1][1] == board[2][2]:\n return board[1][1]\n if board[0][2] == board[1][1] == board[2][0]:\n return board[1][1]\n return None", "def winner(board):\n for i in range(3):\n firstnumber=board[i][0]\n if firstnumber!=EMPTY:\n secondnumber=board[i][1]\n if secondnumber==firstnumber:\n if board[i][2]==secondnumber:\n return secondnumber\n else:\n continue\n else:\n continue\n else:\n continue\n for i in range(3):\n firstnumber=board[0][i]\n if firstnumber!=EMPTY:\n secondnumber=board[1][i]\n if secondnumber==firstnumber:\n if board[2][i]==secondnumber:\n return secondnumber\n else:\n continue\n else:\n continue\n else:\n continue\n firstnumber=board[0][0]\n if firstnumber!= EMPTY:\n if board[1][1]==firstnumber:\n if board[2][2]==firstnumber:\n return firstnumber\n firstnumber=board[2][0]\n if firstnumber!= EMPTY:\n if board[1][1]==firstnumber:\n if board[0][2]==firstnumber:\n return firstnumber\n return None\n raise NotImplementedError", "def get_player(self, number):\n num = int(number)\n assert (num in [1, 2])\n return self.player_1 if num == 1 else self.player_2", "def winner(board):\n if board[0][0] != EMPTY and (board[0][0] == board[0][1] == board[0][2] \n or board[0][0] == board[1][1] == board[2][2] \n or board[0][0] == board[1][0] == board[2][0]):\n return board[0][0]\n\n elif board[1][1] != EMPTY and (board[1][0] == board[1][1] == board[1][2]\n or board[0][1] == board[1][1] == board[2][1]):\n return board[1][1]\n \n elif board[2][2] != EMPTY and (board[0][2] == board[1][2] == board[2][2]\n or board[2][0] == board[2][1] == board[2][2]):\n return board[2][2]\n \n elif board[2][0] != EMPTY and (board[2][0] == board[1][1] == board[0][2]):\n return board[2][0]\n \n else:\n None", "def declare_winner(board):\n results = count_chips(board, 0), count_chips(board, 1)\n winner = '¡El color {} ha ganado la partida!'\n for i in range(2):\n if results[i] > results[i - 1]:\n print(winner.format(PLAYER_COLORS[i]) + '\\n')\n if results[0] == results[1]:\n print('¡Empate!\\n')\n print('Puntajes:')\n for i in range(2):\n print('{}: {} punto(s)'.format(PLAYER_COLORS[i].title(), results[i]))", "def next_player(self):\n # Counter is a useful class that counts objects.\n count = Counter(self.board)\n if count.get('X', 0) > count.get('O', 0):\n return 'O'\n return 'X'", "def utility(board) -> int:\n winner_player = winner(board)\n if winner_player == X:\n return 1\n elif winner_player == O:\n return -1\n else:\n return 0", "def utility(board):\n\n # Determine winner\n victor = winner(board)\n\n # Assign proper values accordingly\n if victor == X:\n return 1\n elif victor == O:\n return -1\n else:\n return 0", "def get_winner(self, board):\r\n for p_id in self.player_ids:\r\n win_array = np.array([p_id] * self.board_width, dtype=np.int8)\r\n for i in range(self.board_width):\r\n # check rows\r\n if np.array_equal(board[i], win_array):\r\n return p_id\r\n # check columns\r\n elif np.array_equal(board[:, i], win_array):\r\n return p_id\r\n # check leading diagonal\r\n elif np.array_equal(np.diagonal(board), win_array):\r\n return p_id\r\n # check non-leading diagonal\r\n elif np.array_equal(np.diagonal(np.flipud(board)), win_array):\r\n return p_id\r\n # return nan if no wins losses or draws\r\n for i in np.nditer(board):\r\n if i == 0:\r\n return np.nan\r\n # must be a draw so return 0\r\n return 0", "def other_player(cls, player):\n return 0 if player == 1 else 1", "def player(board):\n xcount, ocount = 0, 0\n for row in board:\n xcount += row.count(X)\n ocount += row.count(O)\n if xcount > ocount:\n return O\n elif xcount == 0 and ocount == 0:\n return X\n elif xcount == ocount:\n return X", "def winner(board):\n\n for sign in [X, O]:\n for i in range(3):\n if board[0][i] == sign and board[1][i] == sign and board[2][i] == sign:\n return sign\n elif board[i][0] == sign and board[i][1] == sign and board[i][2] == sign:\n return sign\n\n if board[0][0] == sign and board[1][1] == sign and board[2][2] == sign:\n return sign\n elif board[2][0] == sign and board[1][1] == sign and board[0][2] == sign:\n return sign\n\n return None", "def determine_round_winner(self):\n\n if self.getX() + self.SIZE[0] < 0:\n # point for player two\n return 2\n elif self.getX() > Configuration.windowWidth:\n # point for player one\n return 1", "def winner(board):\r\n A = board[0]\r\n B = board[1]\r\n C = board[2]\r\n\r\n if A.count(\"X\") == 3 or B.count(\"X\") == 3 or C.count(\"X\") == 3:\r\n return X\r\n elif A.count(\"O\") == 3 or B.count(\"O\") == 3 or C.count(\"O\") == 3:\r\n return O\r\n elif A[0] == B[0] and A[0] == C[0]:\r\n if A[0] == X:\r\n return X\r\n elif A[0] == O:\r\n return O\r\n elif A[1] == B[1] and A[1] == C[1]:\r\n if A[1] == X:\r\n return X\r\n elif A[1] == O:\r\n return O\r\n elif A[2] == B[2] and A[2] == C[2]:\r\n if A[2] == X:\r\n return X\r\n elif A[2] == O:\r\n return O\r\n elif A[0] == B[1] and A[0] == C[2]:\r\n if A[0] == X:\r\n return X\r\n elif A[0] == O:\r\n return O\r\n elif A[2] == B[1] and A[2] == C[0]:\r\n if A[2] == X:\r\n return X\r\n elif A[2] == O:\r\n return O\r\n else:\r\n return None", "def __winner(self, x, y):\n\t\tlines = self.__extract_lines(x, y)\n\n\t\tfor line in lines:\n\t\t\twinner = self.__winner_in_line(line)\n\t\t\tif winner != 0:\n\t\t\t\treturn winner\n\n\t\treturn 0", "def row_wise_checking(player_):\n if board[0] == board[1] == player_:\n return 2\n elif board[1] == board[2] == player_:\n return 0\n elif board[3] == board[4] == player_:\n return 5\n elif board[4] == board[5] == player_:\n return 3\n elif board[6] == board[7] == player_:\n return 8\n elif board[7] == board[8] == player_:\n return 6\n else:\n return -1", "def getUtility(board):\n winner = getWinner(board)\n if winner == X:\n return 1\n if winner == O:\n return -1\n return 0", "def player(board):\n num_x = sum([list.count(X) for list in board])\n num_o = sum([list.count(O) for list in board])\n if num_x == num_o:\n return X\n else:\n return O", "def player(board):\n\n # Game is over\n if terminal(board):\n return None\n\n # Count number of occurences of X and O\n x_count = 0\n o_count = 0\n for row in board:\n for box in row:\n if box == X:\n x_count = x_count + 1\n elif box == O:\n o_count = o_count + 1\n # When move count is tied, X is next\n if x_count <= o_count:\n return X\n # When X has moved once more than O, next move is O\n else:\n return O", "def check_win(players: List[Player]) -> Tuple[bool, Optional[Player]]:\n total_players = len(players)\n for player in players:\n if player.influence == 0:\n total_players -= 1\n if total_players == 1:\n for player in players:\n if player.influence >0:\n return True, player\n return False, None", "def swissPairings():\n list_pair = []\n\n db = connect()\n c = db.cursor()\n query = (\"SELECT id, name \\\n FROM standings ORDER BY total_wins DESC;\")\n c.execute(query)\n listOfPairWin = c.fetchall()\n\n if len(listOfPairWin) % 2 == 0:\n for i in range(0, len(listOfPairWin), 2):\n listOfPlayersInPair = listOfPairWin[i][0], listOfPairWin[i][1], \\\n listOfPairWin[i+1][0], listOfPairWin[i+1][1]\n list_pair.append(listOfPlayersInPair)\n \n\n else:\n raise ValueError('You need to have even number of players!')\n \n\n db.close()\n return list_pair", "def winner(board):\n x_in_board = []\n o_in_board = []\n winning_positions = [\n [[0, 0], [0, 1], [0, 2]],\n [[1, 0], [1, 1], [1, 2]],\n [[2, 0], [2, 1], [2, 2]],\n [[0, 0], [1, 0], [2, 0]],\n [[0, 1], [1, 1], [2, 1]],\n [[0, 2], [1, 2], [2, 2]],\n [[0, 0], [1, 1], [2, 2]],\n [[0, 2], [1, 1], [2, 0]]\n ]\n\n for i in range(len(board)):\n for j in range(len(board)):\n if board[i][j] == X:\n x_in_board.append([i, j])\n elif board[i][j] == O:\n o_in_board.append([i, j])\n\n for i in winning_positions:\n if i[0] in x_in_board and i[1] in x_in_board and i[2] in x_in_board:\n return X\n elif i[0] in o_in_board and i[1] in o_in_board and i[2] in o_in_board:\n return O\n\n return None", "def swissPairings():\n\n # Returns a sorted list of player standings.\n playerStandingsList = playerStandings()\n\n # Open DB.\n conn, c = main.connect()\n\n # Get each player's details.\n c.execute(\"SELECT playerID, playerName from player;\")\n allPlayers = c.fetchall()\n totalPlayers = len(allPlayers)\n\n swissPairingsList = []\n\n # As this is a legacy function, bye rounds should not be accounted for.\n i = 0\n while i < totalPlayers:\n\n p1 = playerStandingsList[i]\n p2 = playerStandingsList[i+1]\n\n swissPairingsList.append((p1[0], p1[1], p2[0], p2[1]))\n\n i += 2\n\n # return the new list (id1, name1, id2, name2)\n return swissPairingsList", "def has_winner(self):\r\n\r\n\t\t\"Check for horizonal win\"\r\n\r\n\t\tfor x in range(0, 3):\r\n\r\n\t\t\tif self.game_board[x][0] == self.game_board[x][1] and self.game_board[x][1] == self.game_board[x][2]:\r\n\r\n\t\t\t\treturn self.game_board[x][0]\r\n\r\n\t\t\"Check for vertical win\"\r\n\r\n\t\tfor y in range(0, 3):\r\n\r\n\t\t\tif self.game_board[0][y] == self.game_board[1][y] and self.game_board[1][y] == self.game_board[2][y]:\r\n\r\n\t\t\t\treturn self.game_board[0][y]\r\n\r\n\t\t\"Check for diagonal from left to right\"\r\n\t\r\n\t\tif self.game_board[0][0] == self.game_board[1][1] and self.game_board[1][1] == self.game_board[2][2]:\r\n\t\t\treturn self.game_board[1][1]\t\r\n\r\n\t\tif self.game_board[0][2] == self.game_board[1][1] and self.game_board[1][1] == self.game_board[2][0]:\r\n\t\t\treturn self.game_board[1][1]\t\r\n\r\n\t\tif self.count == 8:\r\n\r\n\t\t\treturn \"Tie\"\r\n\r\n\t\telse:\r\n\r\n\t\t\treturn \"0\"\r\n\r\n\r\n\t\tpass" ]
[ "0.7398989", "0.7312626", "0.7270267", "0.7123724", "0.7110263", "0.7102775", "0.70441735", "0.70315427", "0.6907012", "0.6888181", "0.6876374", "0.6870772", "0.6866335", "0.6822034", "0.67885065", "0.6718184", "0.67145145", "0.6701902", "0.6701902", "0.6701902", "0.6685644", "0.6658558", "0.66033256", "0.65943676", "0.65841913", "0.65819955", "0.65748245", "0.6556906", "0.6529403", "0.64735377", "0.64224356", "0.6420875", "0.64201075", "0.6373649", "0.63717836", "0.6369104", "0.63687426", "0.6367309", "0.63604534", "0.6353573", "0.63526416", "0.63419586", "0.63376635", "0.6336922", "0.63246506", "0.63071245", "0.6302091", "0.6297078", "0.62939805", "0.6280195", "0.62764597", "0.62747616", "0.62728995", "0.6272271", "0.6266933", "0.62610734", "0.62534356", "0.6253355", "0.62261534", "0.62182", "0.62135386", "0.6202704", "0.6202474", "0.61979455", "0.6196843", "0.6193758", "0.61825573", "0.6180913", "0.61780375", "0.6175256", "0.6171777", "0.61583257", "0.614865", "0.6144806", "0.6140968", "0.6140468", "0.61360526", "0.61359626", "0.61341316", "0.6132242", "0.6131057", "0.6130042", "0.61074805", "0.6103229", "0.61009556", "0.6093715", "0.60909414", "0.6090875", "0.6084996", "0.60805595", "0.60706925", "0.6067216", "0.60655326", "0.60570395", "0.6056917", "0.60521245", "0.60501987", "0.6050032", "0.60487884", "0.60475254" ]
0.60619944
93
Initialize the parameters of the logistic regression
def __init__(self, input, n_in, n_out): # start-snippet-1 # initialize with 0 the weights W as a matrix of shape (n_in, n_out) self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True ) # initialize the baises b as a vector of n_out 0s self.b = theano.shared(value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True ) # symbolic expression for computing the matrix of class-membership probabilities where: # W is a matrix where column-k represent the separation hyper plain for class-k # x is a matrix where row-j represents input training sample-j # b is a vector where element-k represent the free parameter of hyper plane-k self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b) # symbolic description of how to compute prediction as class whose probability is maximal self.y_pred = T.argmax(self.p_y_given_x, axis=1) # end-snippet-1 # parameters of the model self.params = [self.W, self.b] # keep track of model input self.input = input
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_initial_params(model: LogisticRegression):\n n_classes = 15 # threat types\n n_features = 33 # Number of features in dataset\n model.classes_ = np.array([i for i in range(15)])\n\n model.coef_ = np.zeros((n_classes, n_features))\n if model.fit_intercept:\n model.intercept_ = np.zeros((n_classes,))", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def __init__(self, **kwargs):\n super(LogisticRegression, self).__init__()\n self.C = kwargs.pop(\"C\", 100)\n self.clf = _LogisticRegression(C=self.C, **kwargs)", "def __init__(self, log=True, normalize=False):\r\n self.model = LinearRegression(normalize=normalize)\r\n self.log = log", "def test_logistic_regression_c_parameter(params, X_train, X_test, y_train, y_test):", "def __init__(self, train, validation=None, initial_weight=None,\n loss_function_name='logistic',\n calculate_weight='gradient',\n regularizer=None, regularizer_p=None):\n # Initialize the super class with given data.\n # Transform the y into {0,1}\n y, tx = train\n y[np.where(y < 0)] = 0\n train = (y, tx)\n if validation:\n val_y, val_tx = validation\n val_y[np.where(val_y < 0)] = 0\n validation = (val_y, val_tx)\n super(LogisticRegression, self).__init__(train, validation,\n initial_weight=initial_weight,\n loss_function_name=loss_function_name,\n cal_weight=calculate_weight,\n regularizer=regularizer,\n regularizer_p=regularizer_p)\n # Set predicted label\n self.pred_label = [-1, 1]", "def __init__(self, estimator, **kwargs):\n super(LogisticRegression, self).__init__(\n estimator, **kwargs)\n\n self.estimator = estimator", "def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()", "def __init__(self):\n self.label = \"Logistic regression\"\n self.description = \"This tool is a useful complement to Weights-of-Evidence Calculate Response tool as Logistic Regression does not make the assumption of conditional independence of the evidence with regards to the training sites. Using the evidence and assocaited weights tables, this tool creates the outputs the response and standard deviation rasters. The calculations are based on the Gen_Class attribute in the weights table and the type of evidence. Please note that the Logistic Regression tool accepts a maximum of 6,000 unique conditions or it fails. Also note that there is an upper limit of 100,000 unit cells per class in each evidence raster layer. If a class in an evidence raster goes above this, the script contains a function to increase the unit cell size to ensure an upper limit of 100,000. These issues are unable to be fixed due to a hard coded limitation in the Logistic Regression executable sdmlr.exe.\"\n self.canRunInBackground = False\n self.category = \"Weights of Evidence\"", "def on_train_begin(self, logs={}):\n self._beta = []", "def _fit(self, _X, _y):\n\n self.model = linear_model.LogisticRegression(penalty=self.penalty, random_state=self.seed,\n solver='saga', n_jobs=self.n_jobs)\n self.model.fit(_X, _y)", "def __init__(self):\n logger.debug('Initializing %s model.' % self.__class__.__name__)\n self.dependent_attributes = ['_alpha',\n '_log_like',\n '_gradient','_K',\n '_log_det']\n self._previous_parameters = None # previous parameters from last call\n self.grad_method = None # could be {'finite_difference','adjoint'}\n self.noise_var_constraint = '+ve' # Gaussian noise variance constraint\n return", "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.linear_model\n self.model = sklearn.linear_model.LogisticRegression", "def log_reg(x_train, y_train):\n\n log_reg_classifier = LogisticRegression(max_iter=1000, solver='lbfgs')\n log_reg_classifier.fit(x_train, y_train)\n return log_reg_classifier\n\n # log_reg_classifier.fit(x_train, y_train)", "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def __init__(self, seed = None):\n self.data_dir = pkg_resources.resource_filename('logistic_control_variate', 'data/')\n self.generate_data(seed)\n # Holds logistic regression object for this example\n self.lr = None", "def __init__(self, estimator = LogisticRegression()): \n\t self.estimator = estimator", "def __init__(self,name,exp_base, random_seed=None,version=None):\n self.exp_base = exp_base\n self.log_fun = lambda x: np.log(x) / np.log(self.exp_base)\n self.exp_fun = lambda x: np.power(self.exp_base,x)\n\n super(LogNormalBehaviorModel, self).__init__(name, random_seed, version)", "def set_model_params(\n model: LogisticRegression, params: LogRegParams\n) -> LogisticRegression:\n model.coef_ = params[0]\n if model.fit_intercept:\n model.intercept_ = params[1]\n return model", "def train_logistic_regression(train_x, train_y):\n\n logistic_regression_model = LogisticRegression(penalty='l2', C=1.0)\n logistic_regression_model.fit(train_x, train_y)\n return logistic_regression_model", "def main():\r\n x = [\r\n [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ], [ 0,0 ], [ 0,0 ],\r\n [ 0,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 0,0 ], [ 1,0 ],\r\n [ 1,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ]\r\n ]\r\n\r\n # Encoding of the correct classes for the training material\r\n y = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0]\r\n b = BinaryLogisticRegression(x, y)\r\n b.fit()\r\n b.print_result()", "def on_train_begin(self, logs={}):\n self.losses = []\n self.accuracies = []", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n return least_squares_SGD(y, tx, initial_w, max_iters, gamma, loss_function=logistic_loss, gradient=logistic_grad)", "def __init__(self):\n super().__init__(derivatives=BCELossWithLogitsDerivatives())", "def LogisticRegression_sklearn(X_train, X_test, y_train, y_test):\n\n\tlog_reg = LogisticRegression()\n\tlog_reg.fit(X_train, y_train.ravel())\n\tyPred =log_reg.predict(X_test)\n\n\t#Printing metrics of the logistic regression model\n\tprint('Accuracy:', metrics.accuracy_score(y_test, yPred))\n\tprint('Precision:', metrics.precision_score(y_test, yPred))\n\tprint('Recall', metrics.recall_score(y_test, yPred))\n\n\t#confusion matrix\n\n\tconfusionMatrix = matrix.confusion_matrix(y_test, yPred)\n\tsb.heatmap(pd.DataFrame(confusionMatrix), annot= True, fmt='g')\n\tplt.title('Confustion matrix with default value 1')\n\tplt.ylabel('True values')\n\tplt.xlabel('Predicted values')\n\tplt.show()", "def initialise_parameters(self):\n # Weights\n init = select_w_init(self.w_init)\n if self.w_gain:\n gain = nn.init.calculate_gain('relu')\n init(self.relations, gain=gain)\n else:\n init(self.relations)\n\n # Biases\n if self.b_init:\n init = select_b_init(self.b_init)\n init(self.sbias)\n init(self.pbias)\n init(self.obias)", "def fit_logistic_regression():\n\n logger.debug(\"Running the fit_logistic_regression function now\")\n\n #Loading the configuration\n with open(os.path.join(\"config\",\"config.yml\"), \"r\") as f:\n config = yaml.safe_load(f)\n\n #Loading and pre processing the data\n logger.debug(\"Loading and pre processing the data\")\n train_df = load_data(config[\"load_data\"][\"train_file\"])\n train_df = pre_process_data(train_df, resample = True, resample_count = 500000)\n\n #Defining Pipeline\n pipeline = Pipeline([\n ('tfidf', TfidfVectorizer(analyzer='word', token_pattern=r'[A-Za-z0-9@-]+')),\n ('model', LogisticRegression(random_state=12345, verbose = 1, solver = 'saga')),\n ])\n\n #Defining parameters to vary\n parameters = {\n 'tfidf__max_df': (0.25, 0.5, 0.75),\n 'tfidf__max_features': (None, 5000, 10000, 50000),\n 'tfidf__ngram_range': ((1, 1), (1, 2)),\n 'model__C': (0.01, 1, 100)\n }\n\n scoring_list = [\"accuracy\", \"f1\", \"precision\", \"recall\", \"roc_auc\"]\n \n #Performing 5fold CV to determine best hyperparameters\n model = GridSearchCV(pipeline, parameters, cv=5,\n n_jobs=-1, verbose=1, scoring=scoring_list, refit='f1',)\n\n t0 = datetime.datetime.now()\n\n model.fit(train_df[\"Review\"].tolist(), train_df[\"Ratings\"].to_numpy())\n \n logger.info(\"Grid Search performed in {}\".format(str(datetime.datetime.now()-t0)))\n\n #Saving results\n res_df = pd.DataFrame(model.cv_results_)\n res_df.to_csv(os.path.join(config[\"summary_stats\"][\"save_location\"], \"LogisticRegressionResults.csv\"))\n \n #Saving the model\n pickle.dump(model, open(os.path.join(config[\"models\"][\"save_location\"], \"LogisticRegression.pkl\"),'wb'))\n\n return", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n self.params = {'weight': 0.0001 * np.random.randn(out_features, in_features), 'bias': np.zeros((out_features, 1))}\n self.grads = {'weight': np.zeros((out_features, in_features)), 'bias': np.zeros((out_features, 1))}\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def initialize_model_params():\n beta_0 = np.array([0., 0.])\n mu_0 = 0.\n return beta_0, mu_0", "def __init__(self, loglike, data, x, sigma):\n\n # add inputs as class attributes\n self.likelihood = loglike\n self.data = data\n self.x = x\n self.sigma = sigma", "def _initialize(self, X, resp, *arg, **kwarg):\n n_samples, _ = X.shape\n\n if self.mv_stat:\n weights, params = _estimate_mv_stat_parameters(\n self.stat, X, resp) # self.reg_covar\n else:\n weights, params = _estimate_1d_stat_parameters(\n self.stat, X, resp) # self.reg_covar\n weights /= n_samples\n\n self.weights_ = (weights if self.weights_init is None\n else self.weights_init)\n self.params_ = params if self.params_init is None else self.params_init", "def train_logistic_regression(X_train_input, y_train_input, C=1):\r\n from sklearn.linear_model import LogisticRegression\r\n logr_clf = LogisticRegression(C=C)\r\n logr_clf.fit(X_train_input, y_train_input)\r\n return logr_clf", "def __init__(self, loglike, data, x, sigma):\n\n # add inputs as class attributes\n self.likelihood = loglike\n self.data = data\n self.x = x\n self.sigma = sigma\n\n # initialise the gradient Op (below)\n self.logpgrad = LogLikeGrad(self.likelihood, self.data, self.x, self.sigma)", "def logistic(weights, data, targets, hyperparameters):\n y = logistic_predict(weights, data)\n\n #####################################################################\n # TODO: #\n # Given weights and data, return the averaged loss over all data #\n # points, gradient of parameters, and the probabilities given by #\n # logistic regression. #\n #####################################################################\n f = None\n df = None\n\n f = evaluate(targets, y)[0]\n\n N = len(data)\n M = len(weights) - 1 \n temp = np.ones([N, M + 1])\n temp[: N, : M] = np.array(data)\n\n\n df = np.zeros([M+1, 1])\n\n df[:, 0] = np.array([[np.mean([(y.flatten()[i] - targets.flatten()[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n # df = np.matrix([[np.mean([(y[i] - targets[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n #####################################################################\n # END OF YOUR CODE #\n #####################################################################\n return f, df, y", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.params = {'weight': np.random.normal(loc = 0, scale=0.0001, size=(out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n \n self.grads = {'weight': np.zeros((out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self, classific_method=\"LogisticRegression\"):\n\t\tself.classific_method = classific_method", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n\tif len(initial_w.shape)==2:\n\t\tinitial_w = initial_w.reshape((max(initial_w.shape)))\n\tif len(y.shape)==2:\n\t\ty = y.reshape((max(y.shape)))\n\n\tw = logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma)\n\t\n\tloss = calculate_nll(y, tx, w)\n\n\treturn w, loss", "def _fit(self):\n\n\t\tclf = LogisticRegression()\n\t\tclf.fit(inputs, labels)\n\n\t\treturn clf", "def __init_finalaf(self, i,h1,classes):\n self.params['W'+i]=np.random.randn(h1,classes)*self.weight_scale\n self.params['b'+i]=np.zeros(classes)", "def logistic(self, data, weights, biases):\n\n state_weight_prods = np.dot(data, weights)\n print(-state_weight_prods - biases)\n activations = 1.0 / (1 + np.exp(-state_weight_prods - biases))\n plt.plot(state_weight_prods, activations)\n plt.show()\n return activations", "def logistic_reg(training_data):\r\n \r\n \"\"\" Setting guesses for minimum and maximum values of regularization parameter then\r\n find the value of parameter that minimizes error on cross validation data. If\r\n local minimum is found the return this model. If not, extend minimum or maximum \r\n appropriately and repeat \"\"\"\r\n from sklearn.linear_model import LogisticRegression\r\n C_min = 1.0e-5\r\n C_max = 1.0e5\r\n regularization_flag = 1 # To set 1 until local minimum is found\r\n regularization_param = 0\r\n \r\n# while regularization_flag != 0:\r\n# regularization_param, regularization_flag = set_reg_param(training_data, cv_data, alpha_min, alpha_max)\r\n# if regularization_flag == -1:\r\n# \"\"\" The local minimum is at point less than alpha_min \"\"\"\r\n# alpha_min = alpha_min * 0.3\r\n# if regularization_flag == 1:\r\n# \"\"\" The local minimum is at point greater then alpha_max \"\"\"\r\n# alpha_max = alpha_max * 3\r\n \r\n lr = LogisticRegression (C=C_max, random_state=0)\r\n lr.fit(training_data.X, training_data.y)\r\n return lr, C_max", "def resetParams(self):\n self.prediction = cons.init_pred # Classifier payoff - initialized to a constant initial payoff value\n self.error = cons.init_err # Classifier error - initialized to a constant initial error value\n self.fitness = cons.init_fit # Classifier fitness - initialized to a constant initial fitness value", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n \n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n y = (y + 1) / 2 # [-1, 1] -> [0, 1]\n for n_iter in range(max_iters):\n # computes gradient and loss\n\n grad = compute_gradient_log(y, tx, w)\n loss = compute_loss_log(y, tx, w)\n\n #updates w\n\n w = w - gamma * grad\n # store w and loss\n\n ws.append(w)\n losses.append(loss)\n #print(\"logistic regression: Gradient Descent({bi}/{ti}): loss={l}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]), end=\"\\r\")\n \n return w, loss", "def initialize_parameters(self):\n for i in range(1, self.L):\n self.W[i - 1] = np.random.randn(self.layer_dims[i], self.layer_dims[i - 1]) * 0.01\n self.b[i - 1] = np.zeros((self.layer_dims[i], 1))", "def __init__(self, base_model='LogisticRegression', number_model=50, \n hidden_layer_sizes=(100,), activation='relu',\n kernel='poly', degree=3, gamma='auto',\n criterion='gini', reg_penalty='l2', reg=0.001, random_state=0):\n self.number_model = number_model\n r = random_state\n # Initialise all_model list\n self.all_model = []\n for i in range(number_model):\n if base_model=='Perceptron':\n curr_model = Perceptron(reg_penalty=reg_penalty, reg=reg,\n random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='MLPerceptron':\n curr_model = MLPerceptron(hidden_layer_sizes=hidden_layer_sizes,\n activation=activation, reg=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='LogisticRegression':\n curr_model = LogisticRegression(reg_penalty=reg_penalty,\n reg_inv=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='ModelSVM':\n curr_model = ModelSVM(kernel=kernel, degree=degree,\n gamma=gamma, reg=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='ModelDecisionTree':\n curr_model = ModelDecisionTree(criterion=criterion, random_state=i+r*100)\n self.all_model.append(curr_model.model)", "def __init__(self, *args, **kwargs):\n self.classes = [0,1] # (default to 0/1; replace during training)\n self.theta = np.array([]) # placeholder value before training\n\n if len(args) or len(kwargs): # if we were given optional arguments,\n self.train(*args,**kwargs) # just pass them through to \"train\"", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n\n # regularized logistic regression\n for iter in range(max_iters):\n # updating the weights\n grad = log_likelihood_gradient(y, tx, w)+2*lambda_*w\n # if iter % (max_iters//2) == 0:\n #print(log_likelihood_loss(y, tx, w)+lambda_*np.squeeze(w.T.dot(w)))\n w -= gamma*grad\n loss = log_likelihood_loss(y, tx, w)+lambda_*np.squeeze(w.T.dot(w))\n return w, loss", "def logistic_regression(y, tx, initial_w, max_iters, gamma, verbose=False):\n return gradient_descent(y, tx, initial_w, max_iters, gamma, \n compute_logistic_loss, compute_logistic_gradient, verbose=verbose)", "def fit(self, x, y):\n # Note Logistic Regression Runtime\n start_time = time.time()\n\n # Converting Pandas DataFrame to Numpy arrays\n if not type(x).__module__ == np.__name__:\n x = x.to_numpy()\n if not type(y).__module__ == np.__name__:\n y = y.to_numpy()\n\n # Insert a column of 1 in the feature vector X for the bias term in the weights\n x = np.insert(x,0,1,axis=1)\n \n # Verify dimension of input\n if len(x) != len(y):\n print(\"The number of input features vector must be to be the same as the number of target variables\")\n else:\n losses = self.gradient_descent(x,y)\n\n # Note end time\n end_time = time.time()\n\n # Log runtime\n print(\"Logistic Regression training time: {0:.2f}s\".format(end_time - start_time))\n \n return losses", "def init_loss_and_optimizer(self):\n self.criterion = CrossEntropyLoss()\n self.optimizer = Adam(self.model.parameters(), lr=self.hyper_parameters['lr'])", "def train_logisticRegression(data: np.array, labels: np.array)->None:\n\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n n_categories = np.size(labels, 1)\n\n data = np.hstack((np.ones((n_examples, 1)), data))\n\n print(data[0:5, :])\n\n X_train, X_test, y_train, y_test, idx_test = split_data(data, labels, 0.7)\n\n convergence_goal = 1e-3\n learning_rate = 0.01\n\n theta = np.random.uniform(size=((n_features+1, n_categories)))\n\n for i in range(n_categories):\n\n cost_var = 1\n\n previous_cost = 1e6\n iterations = 0\n cost_to_plot = []\n\n while cost_var > convergence_goal:\n iterations += 1\n cost, grad = costFunction(X_train, y_train[:, i], theta[:, i])\n theta[:, i] = update_theta(theta[:, i], grad, learning_rate)\n cost_var = previous_cost - cost\n previous_cost = cost\n if iterations == 1: cost_var = 1\n cost_to_plot.append(cost)\n # print(cost)\n\n plt.plot(range(iterations), cost_to_plot, 'g-', label = 'cost')\n plt.xlabel('iterations')\n plt.ylabel('cost')\n # plt.show()\n\n predictions = lrPredict(theta, X_test)\n\n print(predictions[0:5, :])\n print(y_test[0:5, :])\n\n accuracy = np.mean([p == l for p, l in zip(predictions, y_test)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass", "def __init__(\n self, log_likelihood: float, log_prior: float, weight: float, kwargs=None\n ):\n self.log_likelihood = log_likelihood\n self.log_prior = log_prior\n self.weight = weight\n self.kwargs = {\n tuple(key.split(\".\")) if isinstance(key, str) and \".\" in key else key: value\n for key, value in (kwargs or dict()).items()\n }", "def XavierInit(self):\n\n raw_std = (2 / (self.num_input + self.num_output))**0.5\n if 'relu' == self.act_function:\n init_std = raw_std * (2**0.5)\n elif 'sigmoid' == self.act_function:\n init_std = raw_std\n else:\n init_std = raw_std # * 4\n\n self.W = np.random.normal(0, init_std, (self.num_input, self.num_output))\n self.b = np.random.normal(0, init_std, (1, self.num_output))\n self.v_W = 0\n self.v_b = 0", "def __call__(self, parameter_values, random_state=None):\n self.train_model(parameter_values, random_state=random_state)\n log_dict = self.simulate(random_state)\n return log_dict", "def __init__(self,m):\n # initialize model parameters\n \n # w is the m x 1 vector of weights.\n # m: num of features\n self.w = np.random.rand(m)", "def initialize_parameters(n_a,n_x,n_y):\n np.random.seed(1)\n Wax=np.random.randn(n_a,n_x)*0.01 #input to hidden\n Waa=np.random.randn(n_a,n_a)*0.01 #hidden to hidden\n Wya=np.random.randn(n_y,n_a)*0.01 #hidden to output\n b=np.zeros((n_a,1)) #hidden bias\n by=np.zeros((n_y,1)) #output bias\n \n parameters={\"Wax\":Wax,\"Waa\":Waa,\"Wya\":Wya,\"b\":b,\"by\":by}\n return parameters", "def params_init(self) -> None:\n # Initialize weights and biases with uniform distribution.\n nn.init.uniform_(self.emb.weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].bias, self.init_lower, self.init_upper)\n for lyr in range(self.n_lyr):\n self.stack_rnn[2 * lyr].params_init()\n nn.init.uniform_(self.fc_h2e[0].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_h2e[0].bias, self.init_lower, self.init_upper)", "def logistic_pen(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def initialize(self): \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LR)", "def __init__(self, in_features, out_features):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n self.in_features = in_features\n self.out_features = out_features\n\n self.__MEAN = 0\n self.__STD = 0.0001\n\n self.params = {\n 'weight': np.random.normal(loc=self.__MEAN, scale=self.__STD, size=(out_features, in_features)), \n 'bias': np.zeros(out_features),\n }\n self.grads = {\n 'weight': None, \n 'bias': None,\n }\n\n self.input_cache = None\n ########################\n # END OF YOUR CODE #\n #######################", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n\n # logistic regression\n for n_iter in range(max_iters):\n # updating the weights\n grad = log_likelihood_gradient(y, tx, w)\n w -= gamma*grad\n if n_iter % (max_iters//10) == 0:\n print(log_likelihood_loss(y, tx, w))\n return w, log_likelihood_loss(y, tx, w)", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def __init__(self, x_train, model):\n self.x_train = x_train\n self.model = model", "def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model", "def trainRegressionModel(X,y):\n # # instantiate a logistic regression model, and fit with X and y\n # model = LogisticRegression()\n # model = model.fit(X, y)\n # # check the accuracy on the training set\n # print(model.score(X, y))\n #X['intercept'] = 1.0\n #del X['isCapitalized']\n #del X['isNN']\n #del X['isNNP']\n #del X['isJJ']\n #del X['isUpper']\n #del X['isPrecedingIN']\n logit = sm.Logit(y, X)\n result = logit.fit()\n print(result.summary())\n print(result.conf_int())\n model = LogisticRegression()\n model = model.fit(X, y)\n print(model.score(X, y))\n print(y.mean())\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n model2 = LogisticRegression()\n model2.fit(X_train, y_train)\n # predict class labels for the test set\n predicted = model.predict(X_test)\n print(predicted)\n for i in predicted:\n if i==1:\n print(\"Test:\"+str(i))\n print(max(predicted))\n #generate class probabilities\n probs = model2.predict_proba(X_test)\n print(probs)\n # generate evaluation metrics\n print(\"Accuracy: \"+str(metrics.accuracy_score(y_test, predicted)))\n print(\"AUC: \"+str(metrics.roc_auc_score(y_test, probs[:, 1])))\n print(metrics.confusion_matrix(y_test, predicted))\n print(metrics.classification_report(y_test, predicted))\n\n from sklearn.cross_validation import cross_val_score\n # evaluate the model using 10-fold cross-validation\n scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\n print(scores)\n print(scores.mean())", "def logistic_fit(self, penalty: str = 'l2', c: float = 1.0):\r\n self.LogisticModel = LogisticRegression(solver='liblinear', penalty=penalty, C=c).fit(self.x, self.y)", "def train(self):\n df = self.df\n self.scaler = MinMaxScaler()\n self.scaler.fit(df)\n df[df.columns] = self.scaler.transform(df)\n\n\n X_train, y_train = get_X_y(df, self.n_days, self.length , self.style)\n X_train = np.array(X_train)\n X_train.shape = (X_train.shape[0], X_train.shape[2])\n\n self.clf = LogisticRegression().fit(X_train, y_train)\n\n #es = EarlyStopping(monitor = 'accuracy',mode = 'min' , verbose = 1, patience = 100, restore_best_weights = True)", "def __init__(self, X = None, Y = None):\n if X:\n self.X = builder.X\n self.num_examples = self.X.shape[0]\n self.num_features = self.X.shape[1] - 1\n else:\n self.num_examples = 100\n self.num_features = 1\n self.X = self.default_single_feature_X()\n \n if Y:\n self.Y = Y\n else:\n self.Y = self.default_linear_related_Y()\n \n self.theta_vector = None", "def _init_model_params(self):\n super()._init_model_params()\n\n if 'e' in self.init_params:\n if self.init_type == 'uniform':\n if self.nr_no_train_de == 0:\n self.B = [\n np.full(\n (self.n_states, self.n_features[i]), 1.0 / self.n_features[i])\n for i in range(self.n_emissions)\n ]\n else:\n check_if_attributes_set(self, attr='e')\n else:\n if self.nr_no_train_de == 0:\n self.B = [\n np.random.rand(self.n_states, self.n_features[i])\n for i in range(self.n_emissions)\n ]\n for i in range(self.n_emissions):\n normalise(self.B[i], axis=1)\n\n else:\n check_if_attributes_set(self, attr='e')", "def init_parameters(self):\n stdv = 1. / math.sqrt(self.weight.data.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)", "def __init__(self, fitHisto=None, trainHisto=None, kernel='RBF', hParams={}):\n if fitHisto is None:\n raise ValueError(\"Must pass a fit histogram to GPFitter()!\")\n self.fitHisto = fitHisto\n self.trainHisto = trainHisto\n self.kernelFunc = kernel #internally the self.kernel variable will hold the actual kernel object.\n self.hParams = hParams\n # Fill all the arrays from the histos.", "def build_logistic_regr():\n logistic_pipeline = None\n\n logistic_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', LogisticRegression()), \n ])\n \n return logistic_pipeline", "def test_train_logist(x_train_variable, y_train_dep):\n # Ensure the function works\n try:\n lrc = cls.train_logistic(x_train_variable, y_train_dep)\n logging.info(\"Successful Logistic Model\")\n except Exception as err:\n logging.error(\"Errors in Fitting the Logistic Regression\")\n raise err\n return lrc", "def on_train_begin(self, logs={}):\n self.losses = []\n self.val_losses = []", "def __init__(self, parameters={}):\n # Assumes that a bias unit has been added to feature vector as the last feature\n # If usecolumnones is False, it should ignore this last feature\n self.params = {'usecolumnones': True}\n self.reset(parameters)", "def __init__(self, numpy_rng, input, n_in, hidden_layers_sizes, n_out):\n # instance variables\n self.numpy_rng = numpy_rng\n self.input = input\n self.n_in = n_in\n self.hidden_layers_sizes = hidden_layers_sizes\n self.n_layers = len(hidden_layers_sizes)\n self.n_out = n_out\n\n self.hidden_layers = []\n self.params = []\n\n self.initialize_variables()\n\n\n ################\n ## Prediction ##\n ################\n self.y_pred = self.logistic_regression_layer.y_pred", "def reg_logistic_regression(y, tx, lambda_ , initial_w, max_iters, gamma):\n \n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n y = (y + 1) / 2 # [-1, 1] -> [0, 1]\n \n for n_iter in range(max_iters):\n # computes gradient and loss\n\n grad = compute_gradient_log(y, tx, w)+2*lambda_*np.linalg.norm(w)\n loss = compute_loss_log(y, tx, w)+ lambda_*(np.linalg.norm(w)**2)\n\n #updates w\n\n w = w - gamma * grad\n # store w and loss\n\n ws.append(w)\n losses.append(loss)\n #print(\"regularised logistic regression: Gradient Descent({bi}/{ti}): loss={l}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]), end=\"\\r\")\n return w, loss", "def logistic_regression(y, tx, initial_w=None, max_iters=100, gamma=0.009, batch_size=1):\n # init parameters\n if np.all(initial_w == None): initial_w = np.zeros(tx.shape[1])\n threshold = 1e-8\n losses = []\n y = (1 + y) / 2\n # build tx\n w = initial_w\n\n # start the logistic regression\n for i in range(max_iters):\n # get loss and update w.\n for y_batch, tx_batch in batch_iter(y, tx, batch_size=batch_size, num_batches=1):\n w, _ = learning_by_gradient_descent(y_batch, tx_batch, w, gamma)\n # converge criterion\n losses.append(calculate_loss(y,tx,w))\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:\n break\n #if i % int(max_iters/5) == 0:\n #print(losses[-1],i,'/{tot}'.format(tot=max_iters))\n\n return w,losses[-1]", "def __init__(self, rng, input, n_in, n_hidden, n_out):\r\n\r\n # Since we are dealing with a one hidden layer MLP, this will translate\r\n # into a HiddenLayer with a tanh activation function connected to the\r\n # LogisticRegression layer; the activation function can be replaced by\r\n # sigmoid or any other nonlinear function\r\n self.hiddenLayer = HiddenLayer(rng=rng, input=input,\r\n n_in=n_in, n_out=n_hidden,\r\n activation=T.tanh)\r\n\r\n # The logistic regression layer gets as input the hidden units\r\n # of the hidden layer\r\n self.logRegressionLayer = LogisticRegression(\r\n input=self.hiddenLayer.output,\r\n n_in=n_hidden,\r\n n_out=n_out)\r\n\r\n # L1 norm ; one regularization option is to enforce L1 norm to\r\n # be small\r\n self.L1 = abs(self.hiddenLayer.W).sum() \\\r\n + abs(self.logRegressionLayer.W).sum()\r\n\r\n # square of L2 norm ; one regularization option is to enforce\r\n # square of L2 norm to be small\r\n self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \\\r\n + (self.logRegressionLayer.W ** 2).sum()\r\n\r\n # negative log likelihood of the MLP is given by the negative\r\n # log likelihood of the output of the model, computed in the\r\n # logistic regression layer\r\n self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood\r\n # same holds for the function computing the number of errors\r\n self.errors = self.logRegressionLayer.errors\r\n\r\n # the parameters of the model are the parameters of the two layer it is\r\n # made out of\r\n self.params = self.hiddenLayer.params + self.logRegressionLayer.params", "def _train(self, log_prob):\n raise NotImplementedError", "def __init_af(self,i,h1,h2):\n self.params['W'+i]=np.random.randn(h1,h2)*self.weight_scale\n self.params['b'+i]=np.zeros(h2)\n if self.use_batchnorm:\n self.params['gamma'+i]=np.ones(h2)\n self.params['beta'+i]=np.zeros(h2)", "def spark_LogisticRegression(*args, **kwargs): \n return LogisticRegression(*args, **kwargs)", "def logistic_regression(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = gradient_descent(y, tx, initial_w, max_iters, gamma, loss_f = model_logistic.loss, grad_f = model_logistic.grad, debug = debug)\n return get_last_ans(ws, losses)", "def __init__(self, num_parameters=1, init=0.25):\n super(PReLU, self).__init__()\n self.num_parameters = num_parameters\n self.weight = Parameter(Tensor(num_parameters).fill_(init))", "def Initialize(log_like, log_prior, model_func, mean, cov):\n\n curr_params = proposal_rule(cov, mean, (len(mean)-1)/2)\n print('Init params:', curr_params) \n print_params(curr_params, int((len(mean)-1)/2))\n curr_model = model_func(curr_params)\n print('Init model', curr_model)\n curr_like = log_like(curr_model)\n print('Init like:', curr_like) \n curr_prior = log_prior(curr_params)\n print('Init prior', curr_prior)\n return(curr_params, curr_model, curr_like, curr_prior)", "def stability_logistic(x, y, **kwargs):\n rlr = RandomizedLogisticRegression(n_jobs=kwargs.get('n_jobs', 4))\n if 'param' in kwargs:\n rlr.set_params(**kwargs['param'])\n rlr.fit(x, y)\n return rlr.get_support()", "def train_logistic_regression(x_train, y_train, learning_rate, fit_intercept=False, max_iter=500):\r\n if fit_intercept:\r\n intercept = np.ones(x_train.shape[0], 1)\r\n x_train = np.hstack((intercept, x_train)) # hstacks merges 2 arrays column wise\r\n weights = np.zeros(x_train.shape[1])\r\n for iteration in range(max_iter):\r\n weights = update_weights(x_train, y_train, weights, learning_rate)\r\n # printing cost for every 100 iterations\r\n if iteration % 100 == 0:\r\n print(calculate_cost(x_train, y_train, weights))\r\n return weights", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma, verbose=False): \n reg_loss, reg_grad = add_l2_reg(compute_logistic_loss, \n compute_logistic_gradient,\n lambda_)\n \n return gradient_descent(y, tx, initial_w, max_iters, gamma, reg_loss, reg_grad)", "def __init__(self, *args, **kwargs):\n self.params = kwargs\n self.output_len = kwargs['num_neurons']\n self.input_len = kwargs['input_len']\n self.weights = Vector(data=np.random.randn(self.output_len, self.input_len))\n self.biases = Vector(data=np.zeros((self.output_len, 1)))\n self.input_activations = None\n self.output_activations = Vector()", "def reset_parameters(self):\n logger.info('===== Initialize %s with normal distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_like_transformer_xl(n, p, std=0.02)", "def __init__(self, **kwargs):\n super(RidgeRegressionComb, self).__init__(**kwargs)\n self.time_window = None\n self.alphas = None\n self.lst_features = None\n self.target_var = None\n self.n_outputs = None\n self.history_buffer = None\n self.feature_aggregator = None\n self.target_aggregator = None\n self.model = None\n self.is_adaptive = None\n #self.pub_feature_rel = None\n self.pub_r2 = None\n self.pub_std = None\n # Feature space scaling parameters\n self.scaler = None\n self.r2 = None\n self.pub_mean = None\n self.mean = None\n self.std = None\n self.cache_file = []", "def __init__(self, lam=1.0):\n self.lam = lam\n\n # these are set in fit\n self.b = None # float\n self.w = None # (nvars, ) array", "def analysis(houses:pd.DataFrame) -> None:\n \n \"\"\"\n #Me just trying to fit the data without any outside influences\n f= f'SELLER_HOUSE ~ SQFT_PER + PRICE + C(LOCATION)' \n result= smf.logit(formula= str(f), data= houses).fit()\n print(result.summary2())\n y= ['SELLER_HOUSE']\n x= ['SQFT_PER', 'PRICE', 'LOC_699 - Not Defined', 'LOC_AA - Airport Area', 'LOC_CG - Columbus Grove',\n 'LOC_CV - Cypress Village', 'LOC_EASTW - Eastwood', 'LOC_EC - El Camino Real', 'LOC_GP - Great Park',\n 'LOC_IRSP - Irvine Spectrum', 'LOC_LGA - Laguna Altura', 'LOC_NK - Northpark', 'LOC_NW - Northwood', \n 'LOC_OC - Oak Creek', 'LOC_OH - Orchard Hills', 'LOC_OT - Orangetree', 'LOC_PS - Portola Springs', \n 'LOC_QH - Quail Hill', 'LOC_SH - Shady Canyon', 'LOC_SJ - Rancho San Joaquin', 'LOC_STG - Stonegate', \n 'LOC_Stonegate', 'LOC_TR - Turtle Rock', 'LOC_TRG - Turtle Ridge', 'LOC_UP - University Park',\n 'LOC_UT - University Town Center', 'LOC_WB - Woodbridge', 'LOC_WD - Woodbury', \n 'LOC_WI - West Irvine', 'LOC_WN - Walnut (Irvine)', 'LOC_WP - Westpark']\n x_train, x_test, y_train, y_test= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train, y_train.values.ravel())\n y_pred= logreg.predict(x_test)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test, y_test), 3))\n # This model is really bad\n \n \"\"\"\n \n \"\"\n houses= houses.drop(['DAYS_ON_MARKET', 'ADDRESS', 'LOCATION',\n 'STATUS', 'PROPERTY_TYPE', 'ZIP_CODE'], axis= 1)\n columns= houses.columns.values.tolist()\n y= ['SELLER_HOUSE']\n x= [i for i in columns if i not in y]\n \n # Over Sampling Using SMOTE \n x_train, _, y_train, _= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n x_columns= x_train.columns\n \n os= SMOTE(random_state= 0)\n os_x, os_y= os.fit_sample(x_train, y_train)\n os_x= pd.DataFrame(data= os_x, columns= x_columns)\n os_y= pd.DataFrame(data= os_y, columns= y)\n \n \n #Recursive Feature Elimination\n logreg= LogisticRegression(max_iter= 600)\n rfe= RFE(logreg, 20)\n rfe= rfe.fit(os_x, os_y.values.ravel())\n \n lst= [i for count, i in enumerate(x) if rfe.support_[count] == True]\n X= os_x[lst]\n Y= os_y['SELLER_HOUSE']\n \n \n #logit_model= sm.Logit(Y, X)\n #result= logit_model.fit()\n #print(result.summary2()) # Model choosen by RCE\n \n #These are features have a p-value less than 0.05\n final_x= ['BATHS', 'ZIP_92602.0', 'ZIP_92618.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n #final_x= ['ZIP_92602.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n X2= os_x[final_x]\n \n logit_model2= sm.Logit(Y, X2)\n result2= logit_model2.fit()\n print(result2.summary2()) # Final Model\n \n x_train2, x_test2, y_train2, y_test2= train_test_split(X2, Y, test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train2, y_train2)\n \n y_pred= logreg.predict(x_test2)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test2, y_test2), 2))\n \n conf_matrix= confusion_matrix(y_test2, y_pred)\n print(conf_matrix)\n # So 22+61 correct predictions and 13+44 wrong predictions\n \n logit_roc_auc = roc_auc_score(y_test2, logreg.predict(x_test2))\n fpr, tpr, _ = roc_curve(y_test2, logreg.predict_proba(x_test2)[:,1])\n plt.figure()\n plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)\n plt.plot([0, 1], [0, 1],'r--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()\n \"\"", "def __init__(self, dataset, model, quality_method, n_horizon = 10):\n LalEnv.__init__(self, dataset, model, quality_method)\n self.n_horizon = n_horizon", "def run_logistic_regression(training, testing, feature_cols, outcome_col):\n if 'intercept' not in training.columns:\n training['intercept'] = 1\n if 'intercept' not in testing.columns:\n testing['intercept'] = 1\n intercept_feature_cols = feature_cols + ['intercept']\n logit = sm.Logit(training[outcome_col], training[intercept_feature_cols])\n fitted_logit_model = logit.fit()\n logit_diagnostics = get_diagnostics(testing[outcome_col], testing[intercept_feature_cols], fitted_logit_model, model_type = 'logit')\n predicted_logit_probs = fitted_logit_model.predict(testing[intercept_feature_cols])\n\n return fitted_logit_model, logit_diagnostics, predicted_logit_probs", "def __init__(self):\n self.model = GaussianNB();\n self.X = iris.data\n self.y = iris.target", "def initialize_parameters(self, X):\n self.n_samples, self.n_visible = X.shape[:2]\n if self.marginal_description == 'discrete':\n values_in_data = set(np.unique(X).tolist())-set([self.missing_values])\n self.dim_visible = int(max(values_in_data)) + 1\n if not set(range(self.dim_visible)) == values_in_data:\n print(\"Warning: Data matrix values should be consecutive integers starting with 0,1,...\")\n assert max(values_in_data) <= 32, \"Due to a limitation in np.choice, discrete valued variables\" \\\n \"can take values from 0 to 31 only.\"\n self.initialize_representation()", "def __init__(self, K, scenario, distrib, σ=.5, α=0):\n self.K = K\n self.scenario = scenario\n self.distrib = distrib\n self.α = α\n self.σ = σ\n \n # initialize parameter theta\n if scenario == 'sparse':\n # sparse model\n self.θ = np.zeros(K)\n self.θ[0] = 0.5\n \n elif scenario == 'alpha':\n # exponential decrease model\n assert α != 0\n self.θ = np.ones(K)\n for k in range(1, K):\n self.θ[k] = self.θ[k] - (k/K)**self.α", "def init_params(self):\n self.conv = Conv(self.conv_layers[0][-1], self.out_channels, padding=self.padding,stride=self.stride)\n self.W = torch.randn(self.num_labels, self.cout_numel, requires_grad=True)\n self.T = torch.randn(self.num_labels, self.num_labels, requires_grad=True)", "def _initialize_parameters(self, layer_dimensions, layer_activations, cost_function):\n self.layer_dims = layer_dimensions\n self.layer_num = len(self.layer_dims)\n self.layer_activations = layer_activations\n self.parameters = {}\n self.cost_function = cost_function\n\n assert(len(self.layer_activations) == len(self.layer_dims),\n 'Number of layers in layer_dimensions: {} and layer_activations: {} are not matching'.format(self.layer_num, len(self.layer_activations)))\n\n for l in range(1, self.layer_num):\n self.parameters['W' + str(l)] = np.random.randn(self.layer_dims[l], self.layer_dims[l-1])\n self.parameters['b' + str(l)] = np.zeros(self.layer_dims[l], 1)" ]
[ "0.77980274", "0.7470837", "0.72140586", "0.6893265", "0.68832946", "0.6734596", "0.6729252", "0.66861194", "0.6655325", "0.65969175", "0.6565215", "0.65200406", "0.6474855", "0.64675874", "0.6451465", "0.6448672", "0.644657", "0.6324524", "0.6307155", "0.6290791", "0.62315893", "0.62301457", "0.6222248", "0.6154533", "0.61502033", "0.6148811", "0.6125417", "0.61204934", "0.61064243", "0.6093672", "0.6088524", "0.6080889", "0.6050219", "0.6046596", "0.6040557", "0.6031908", "0.6028566", "0.6023297", "0.60062516", "0.5993352", "0.5987748", "0.5983857", "0.59810305", "0.5980382", "0.5979143", "0.5969887", "0.5963381", "0.59565353", "0.5953519", "0.5939781", "0.59362453", "0.59268403", "0.5926609", "0.5925465", "0.59206647", "0.5918985", "0.5916183", "0.5914883", "0.59142774", "0.59103984", "0.590788", "0.5906587", "0.590478", "0.5889152", "0.58859813", "0.5872667", "0.58725154", "0.5858586", "0.58549976", "0.58484834", "0.58379346", "0.5833001", "0.58321357", "0.58266526", "0.58260936", "0.5820162", "0.58112425", "0.5806894", "0.5806598", "0.57934475", "0.579298", "0.5790313", "0.57857645", "0.5784461", "0.5782379", "0.5779636", "0.57781506", "0.57735914", "0.57718784", "0.57704157", "0.57665557", "0.57645375", "0.5755608", "0.5753251", "0.57422477", "0.5737859", "0.57312804", "0.57284355", "0.5725735", "0.5723162", "0.5720925" ]
0.0
-1
Return the mean of the negative loglikelihood of the prediction of this model under a given target distribution.
def negative_log_likelihood(self, y): # start-snippet-2 # y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1] # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and # T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch. #print "y.ndim = ",y.ndim return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y]) # end-snippet-2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def negative_log_likelihood(self):\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # Log-Probabilities (call it LP) with one row per example and\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\n # the mean (across minibatch examples) of the elements in v,\n # i.e., the mean log-likelihood across the minibatch.\n return -T.log(self.p_y_given_x)[T.arange(self.y.shape[0]), self.y]", "def negative_log_likelihood(self, y):\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])", "def negative_log_likelihood(self, y):\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])", "def negative_loglikelihood(targets, estimated_distribution):\n return -estimated_distribution.log_prob(targets)", "def negative_log_likelihood(self, y):\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])", "def negative_log_likelihood(self, y):\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])", "def negative_log_likelihood(self, y):\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\r\n # number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain\r\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\r\n # Log-Probabilities (call it LP) with one row per example and\r\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\r\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\r\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\r\n # the mean (across minibatch examples) of the elements in v,\r\n # i.e., the mean log-likelihood across the minibatch.\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])", "def negative_log_likelihood(self, y):\r\n \r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\r\n # end-snippet-2\r", "def negative_log_likelihood(self, y):\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e., number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]\r\n # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class\r\n # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]]\r\n # and T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v,\r\n # i.e., the mean log-likelihood across the minibatch.\r\n return T.log(self.p_y_given_x)[T.arange(y.shape[0]),y]", "def log_likelihood(y_true, y_pred):\n ll = np.sum(y_true * np.log(y_pred) - y_pred)\n return ll", "def __call__(self, y, pred, sample_weight=None):\n # logaddexp(0, v) == log(1.0 + exp(v))\n pred = pred.ravel()\n if sample_weight is None:\n return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))\n else:\n return (-2.0 / sample_weight.sum() *\n np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))", "def NLL(self,y):\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])", "def _negative_log_likelihood(self, y_true, y_pred):\n logL = 0\n # pre-calculate cumsum\n cumsum_y_pred = tf.cumsum(y_pred)\n hazard_ratio = tf.exp(y_pred)\n cumsum_hazard_ratio = tf.cumsum(hazard_ratio)\n if self.train_data['ties'] == 'noties':\n log_risk = tf.log(cumsum_hazard_ratio)\n likelihood = y_pred - log_risk\n # dimension for E: np.array -> [None, 1]\n uncensored_likelihood = likelihood * y_true\n logL = -tf.reduce_sum(uncensored_likelihood)\n else:\n # Loop for death times\n for t in self.train_data['failures']: \n tfail = self.train_data['failures'][t]\n trisk = self.train_data['atrisk'][t]\n d = len(tfail)\n dr = len(trisk)\n\n logL += -cumsum_y_pred[tfail[-1]] + (0 if tfail[0] == 0 else cumsum_y_pred[tfail[0]-1])\n\n if self.train_data['ties'] == 'breslow':\n s = cumsum_hazard_ratio[trisk[-1]]\n logL += tf.log(s) * d\n elif self.train_data['ties'] == 'efron':\n s = cumsum_hazard_ratio[trisk[-1]]\n r = cumsum_hazard_ratio[tfail[-1]] - (0 if tfail[0] == 0 else cumsum_hazard_ratio[tfail[0]-1])\n for j in range(d):\n logL += tf.log(s - j * r / d)\n else:\n raise NotImplementedError('tie breaking method not recognized')\n # negative average log-likelihood\n observations = tf.reduce_sum(y_true)\n return logL / observations", "def neg_log_like(output, target, sigma):\r\n \r\n exponent = -0.5*torch.sum(\r\n (target - output)**2/sigma**2\r\n , 1)\r\n\r\n log_coeff = -torch.sum(torch.log(sigma)) - len(sigma) * torch.log(torch.sqrt(torch.tensor(2*np.pi)))\r\n \r\n scale = 1 / len(exponent)\r\n \r\n return - scale * (log_coeff + exponent).sum()", "def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)", "def mean_absolute_error(y, y_pred, w):\n return np.average(np.abs(y_pred - y), weights=w)", "def calculate_negative_log_likelihood(self):\n data = self.played_points_hist[:self.t]\n kernel_matrix = self.kernel_fn(data, data, self.best_ard_params)\n c_matrix = kernel_matrix + (self.noise_sigma ** 2) * np.eye(data.shape[0])\n c_matrix_inv = np.linalg.inv(c_matrix)\n first_term = np.matmul(self.rews_hist[:self.t].T, np.matmul(c_matrix_inv, self.rews_hist[:self.t]))\n second_term = np.log(np.linalg.det(c_matrix))\n return first_term + second_term", "def get_lip_mean(self) -> float:\n if self._fitted:\n return self._model.get_lip_mean()\n else:\n raise ValueError(\"call ``fit`` before using ``get_lip_max``\")", "def loss_function(cls, logits, label, targeted):\n\n if targeted:\n adv_loss = - torch.gather(logits, 1, label)\n else:\n adv_loss = torch.gather(logits, 1, label)\n\n return adv_loss.mean()", "def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z", "def relative_likelihood(self):\n \n if self.num_hidden == 0:\n \n return T.exp(-self.compute_energy(self.x, self.batch_size))\n \n if self.num_hidden > 0:\n \n return T.exp(-self.compute_free_energy(self.x))", "def target_loss(self, inp_hist):\n active_targ = self.get_active_target(inp_hist)\n return torch.mean((inp_hist[:, 1:3] - active_targ) ** 2)", "def msll(Y_true, Y_pred, V_pred, Y_train):\n mt, st = Y_train.mean(), Y_train.std()\n ll = norm.logpdf(Y_true, loc=Y_pred, scale=np.sqrt(V_pred))\n rand_ll = norm.logpdf(Y_true, loc=mt, scale=st)\n msll = - (ll - rand_ll).mean()\n return msll", "def mean_bias_error(self) -> float:\n return float(np.sum(self.true - self.predicted) / len(self.true))", "def compute_loss(self,state,targets):\n reshaped_targets = targets.reshape(-1,2,1)\n\n # reshape targets to match the dimension of means\n for i in range(self.output_size-1):\n reshaped_targets = torch.cat((reshaped_targets,targets.reshape(-1,2,1)),2)\n\n # compute output distribution from model\n means,stds,weights = self.forward(state)\n\n # compute the mixed log pdf\n mixed_pdf = mixed_prob(means,stds,weights,reshaped_targets)\n\n # compute the average negative log likelihood\n return -torch.log(mixed_pdf).mean()", "def get_prediction_likelihood_without_complications(test_data, test_label, weight):\n pred, _ = get_output(weight, test_data)\n pred = pred[:,0]\n pred_like = np.multiply(test_label, np.log(pred + TOLERANCE)) + np.multiply(1.0-test_label, np.log(1.0-pred+ TOLERANCE))\n return np.exp(np.mean(pred_like))", "def mean_logprob(context, pos_lm):\n logprobs = []\n for tag in pos_lm.vocab:\n logprob = pos_lm.logscore(tag,context)\n if logprob != -float('inf'):\n logprobs.append(logprob)\n return np.mean(logprobs)", "def negative_log_likelihood(logits, targets):\n # if we rescale the targets so off is -1 and on is 1\n # then we can multiply through the logits\n # and sigmoid gives us the probabilities :)\n # because 1-sigmoid(x) = sigmoid(-x)\n targets = [(2.0 * targ) - 1.0 for targ in targets]\n probs = [tf.sigmoid(logit * targ) for logit, targ in zip(logits, targets)]\n probs = [tf.reduce_sum(tf.log(prob), reduction_indices=1)\n for prob in probs]\n return -tf.reduce_mean(tf.pack(probs))", "def underexposed(self):\n return self._underexposed_likelihood", "def loss(self, y_pred=None, y_true=None):\n ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2)\n return -ll.sum(dim=0)", "def predict_mean(self, X):\n mu = np.exp(self.intercept_)\n return mu", "def neg_log_likelihood(self,params: ndarray) -> float:\n\n return -self.compute_log_likelihood(params)", "def negative_mll(self, theta):\n return -self.marginal_log_likelihood(theta)", "def logloss_mc(y_true, y_prob, epsilon=1e-15):\n # normalize\n y_prob = y_prob / y_prob.sum(axis=1).reshape(-1, 1)\n y_prob = np.maximum(epsilon, y_prob)\n y_prob = np.minimum(1 - epsilon, y_prob)\n # get probabilities\n y = [y_prob[i, j] for (i, j) in enumerate(y_true)]\n ll = - np.mean(np.log(y))\n return ll", "def log_marginal_likelihood(self) -> tf.Tensor:\n L = tf.linalg.cholesky(self.likelihood.add_to(self.KXX))\n return tf.reduce_sum(multivariate_normal(self._Y, self._mean, L))", "def _get_mean(self):\n mu = self._get_conditional_negative_energy()\n return sigmoid(mu)", "def mean(self):\n return self.mu", "def __loss(self, h, y):\n return (-y*np.log(h)-(1-y)*np.log(1-h)).mean()", "def mean(self):\n return math.exp(self.mu + (self.sigma ** 2) / 2)", "def mean_underprediction(y_true, y_pred, sample_weight=None):\n y_t = _convert_to_ndarray_and_squeeze(y_true)\n y_p = _convert_to_ndarray_and_squeeze(y_pred)\n s_w = np.ones(len(y_p))\n if sample_weight is not None:\n s_w = _convert_to_ndarray_and_squeeze(sample_weight)\n\n err = y_p - y_t\n err[err > 0] = 0\n\n # Error metrics should decrease to 0 so have to flip sign\n return -np.dot(err, s_w) / s_w.sum()", "def log_likelihood(self, X, Y):\n\t\tr,c = twod(Y).shape\n\t\tif r == 1 and c != 1:\n\t\t\tY = twod(Y).T\n\n\t\tsoft = self.predict_soft(X)\n\t\treturn np.mean(np.sum(np.log(np.power(soft, Y, )), 1), 0)", "def MDN_loss(self, log_pi, mu, std, target):\n # Enforce the shape of target to be consistent with output dimension\n target = target.view(-1, self.data_dim)\n \n # Calculate Gaussian log-probabilities over batch for each mixture and each data dimension\n log_gaussian_probs = self._calculate_batched_logprob(mu=mu, \n std=std, \n x=target)\n \n # Calculate the loss via log-sum-exp trick\n # It calculates over K (mixing coefficient) dimension, produce tensor with shape [N, D]\n loss = -torch.logsumexp(log_pi + log_gaussian_probs, dim=1, keepdim=False)\n \n # Sum up loss over elements and average over batch\n loss = loss.sum(1).mean()\n \n return loss", "def log_marginal_likelihood(self) -> tf.Tensor:\n X, Y = self.data\n Y = Y[..., :-1]\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = gpflow.logdensities.multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)", "def log_prob(target_distribution, x0, xs, accepteds):\n return np.mean([target_distribution.log_probability(x) for x in xs])", "def anger(self):\n return self._anger_likelihood", "def __call__(self, **kwargs):\n stddev = self.predictive_distribution.stddev(**kwargs)\n mean = self.predictive_distribution.mean(**kwargs)\n return normal_upper_confidence_bound(\n mean, stddev, exploration=self.exploration)", "def likelihood(self):\n if self._likelihood is None:\n self._likelihood = exp(self.log_likelihood)\n if self._likelihood == 0:\n self._likelihood = sys.float_info.min\n return self._likelihood", "def calc_loss(X, Y, model):\n Z = predict(X, model)\n return -(Y * np.log(Z)).sum() / len(Y)", "def negative_predictive_value(y_true, y_pred):\n\n cm = confusion_matrix(y_true, y_pred)\n return cm[0,0] / cm[:,0].sum()", "def nanmse(\n pred: Tensor,\n target: Tensor) -> Tensor:\n\n mask = torch.isnan(target)\n cnt = torch.sum(~mask, dtype=target.dtype)\n\n mse = torch.pow(pred - target, 2).sum() / cnt\n\n return mse", "def mean_absolute_error(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n return backend.mean(math_ops.abs(y_pred - y_true), axis=-1)", "def nannse_norm(\n pred: Tensor,\n target: Tensor) -> Tensor:\n return 1 - nannse(pred, target)", "def untruncatedMean(self):\n return self._distribution.untrMean()", "def bias(x_pred, x_target, dim=0):\n if dim == 0:\n return x_pred.sub(x_target).mean().item()\n elif dim == 1:\n return x_pred.sub(x_target).mean((0,1))\n elif dim == 2:\n return x_pred.sub(x_target).mean((0,2))\n else:\n raise ValueError(\"Not a valid dimension\")", "def compute_average_log_likelihood(self, system: tf.Tensor) -> tf.Tensor:\n y_vector = tf.reshape(system - self.computed_mean_function, [-1, 1])\n total_covariance_matrix =\\\n self._compute_total_covariance_matrix_states_time()\n y_matrix = tf.linalg.solve(total_covariance_matrix, y_vector)\n first_term = tf.reduce_sum(y_vector * y_matrix)\n logdet_cov_matrix = tf.linalg.logdet(total_covariance_matrix)\n log_likelihood = - 0.5 * (first_term + logdet_cov_matrix)\n return tf.reduce_mean(log_likelihood) / tf.cast(self.n_points,\n dtype=tf.float64)", "def _predictive_mean_analytical(self, mu, sigma):\r\n #FIXME: Not correct\r\n return mu", "def errors(self, target):\n\n return T.mean(T.neq(self.y_pred, T.argmax(target, axis=1)))", "def mean_absolute_error(y_true, y_pred, *, sample_weight=..., multioutput=...):\n ...", "def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)", "def get_log_likelihood(response_probability, observed_response):\n \n return np.log(response_probability[observed_response])", "def mse(gt, pred):\n return np.mean((gt - pred) ** 2)", "def mse(gt, pred):\n return np.mean((gt - pred) ** 2)", "def log_mae(labels, predictions):\n\n result = abs(labels - predictions)\n result = reduce_mean(result)\n result = log(result)\n\n return result", "def blurred(self):\n return self._blurred_likelihood", "def logloss_mc(y_true, y_prob, epsilon=10e-15):\r\n # normalize\r\n y_prob = y_prob / y_prob.sum(axis=1).reshape(-1, 1)\r\n print 'y_prob: ' + str(y_prob[1])\r\n print 'y_true: ' + str(y_true[1])\r\n y_prob = np.maximum(epsilon, y_prob)\r\n y_prob = np.minimum(1 - epsilon, y_prob)\r\n print 'y_prob: ' + str(y_prob[1])\r\n print 'y_true: ' + str(y_true[1])\r\n # get probabilities\r\n y = [y_prob[i, j] for (i, j) in enumerate(y_true)]\r\n print 'y: ' + str(y[1])\r\n print 'y_true: ' + str(y_true[1])\r\n ll = - np.mean(np.log(y))\r\n return ll", "def deviance(y_true, y_pred):\n # calculate log-likelihood of the predicted values\n ll_pred = log_likelihood(y_true, y_pred)\n # calculate log-likelihood of the true data\n y_true_nz = y_true[y_true != 0]\n ll_true = log_likelihood(y_true_nz, y_true_nz)\n # calculate deviance\n dev = ll_true - ll_pred\n return dev", "def em_mean(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical mean.')\n return self.__total_rewards / self.__total_pulls", "def log_likelihood_function(self, instance: af.ModelInstance) -> float:\r\n model_data = self.model_data_from_instance(instance=instance)\r\n fit = self.fit_from_model_data(model_data=model_data)\r\n return fit.log_likelihood", "def batch_mean(cls, y, y_target):\n tot = 0\n for i, o in zip(y, y_target):\n tot += cls.loss(i.reshape(-1, 1), o.reshape(-1, 1))\n return tot / len(y)", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def mdn_loss(pi, sigma, mu, target):\n # print('pi', pi.size(), 'sigma', sigma.size(), 'target', target.size())\n prob = pi * gaussian_probability(sigma, mu, target)\n # print('prob', prob)\n nll = -torch.log(torch.sum(prob, dim=1)+1e-10)\n # prob = torch.log(prob)\n # print('prob', prob.size(), prob)\n # nll = -torch.logsumexp(prob, dim=1)\n # print('nll', nll.size(), nll) # print('nll', nll) # print('mean',torch.mean(nll))\n # if np.isnan(torch.mean(nll).data.numpy()):\n # print('pi', pi)\n # print('sigma', sigma)\n # print('mu', mu)\n # print('prob', prob)\n # print(target)\n # input()\n return torch.mean(nll)", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def clipped_mse(self, y_true, y_pred):\r\n return tf.keras.backend.mean(tf.keras.backend.square(tf.keras.backend.clip(y_pred - y_true, -1., 1.)), axis=-1)", "def mean_absolute_error(y_true, y_pred):\n \n # initialize error at 0\n error = 0\n \n # loop over all samples in the true and predicted list\n for yt, yp in zip(y_true, y_pred):\n # calculate absolute error\n # and add to error\n error += np.abs(yt - yp)\n # return mean error\n return error / len(y_true)", "def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu", "def objective_function(self, y_true, y_predicted, X=None, sample_weight=None):\n return metrics.mean_absolute_error(\n y_true, y_predicted, sample_weight=sample_weight\n )", "def _log_probability(self, theta, model, bounds, x, y, yerr):\n lp = self._log_prior(theta, bounds)\n if not np.isfinite(lp):\n return -np.inf\n return lp + self._log_likelihood(theta, model, x, y, yerr)", "def entropy(self):\n return -np.sum(self.log_likelihoods * np.exp(self.log_likelihoods))", "def log_likelihood(self):\n raise NotImplementedError(\"the log_likelihood property should \"\n \"be defined in the Estimator sub-class\")", "def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood", "def predict_mean(self, X):\n if hasattr(self, 'coef_') and hasattr(self, 'intercept_'):\n mu = np.exp(self.intercept_ + np.dot(X, self.coef_))\n return mu\n else:\n raise NotFittedError('Poisson model is not fit.')", "def log_prior(self, params):\n # log likelihood function, see:\n # https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Likelihood_function\n variance = self.std ** 2\n ndim = params.ndim\n mean_diff = params - self.mean\n scaled_sq_err = jnp.dot(mean_diff, mean_diff) / variance\n # log determinant of covariance matrix\n log_det_cov = 2 * ndim * jnp.log(self.std)\n norm_term = ndim * jnp.log(2 * jnp.pi)\n return -0.5 * (log_det_cov + scaled_sq_err + norm_term)", "def objective_function(self, y_true, y_predicted, X=None, sample_weight=None):\n return metrics.mean_squared_log_error(\n y_true, y_predicted, sample_weight=sample_weight\n )", "def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]", "def get_log_likelihood(response_probability, response):\n pass", "def loglikelihood(y: float64, m: float64, sigma: float64) -> float64:\n\n # -log(sqrt(2*pi)) = -0.9189385332046727\n\n return -0.9189385332046727 - np.log(sigma) - (y - m) * (y - m) / (2.0 * sigma * sigma)", "def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])", "def log_prob(self, samples):\n return -0.5 * sum_except_batch(\n np.log(2 * np.pi) + self.logstd + \\\n tf.exp(-2 * self.logstd) * tf.square(samples - self.mean))", "def posterior_loss(X, mu, sigma, log_pi):\r\n log_PDF = log_GaussPDF(X, mu, sigma)\r\n log_post = log_posterior(log_PDF, log_pi)\r\n\r\n loss = torch.logsumexp(log_post, dim=1)\r\n # loss = torch.exp(log_post)\r\n # loss = torch.sum(loss, dim=1)\r\n # loss = torch.log(loss)\r\n loss = torch.sum(loss)\r\n loss = -loss\r\n return loss", "def mean_squared_log_error(y_true, y_pred):\n \n # initialize error at 0\n error = 0\n # loop over all samples in true and predicted list\n \n for yt, yp in zip(y_true, y_pred):\n # calculate squared log error\n # and add to error\n error += (np.log(1 + yt) - np.log(1 + yp)) ** 2\n # return mean error\n return error / len(y_true)", "def _log_likelihood(self, theta, f, x, y, yerr):\n sigma2 = yerr**2\n return -0.5*np.sum((y - f(theta, x))**2 / sigma2 + 2*np.log(sigma2))", "def negative_entropy(y_pred):\n negative_entropy_vec = np.sum(np.nan_to_num(y_pred*np.log(y_pred)), axis=-1)\n return negative_entropy_vec", "def mean_absolute_error(y_actual, y_predicted):\n y_actual, y_predicted = _check_input(y_actual, y_predicted)\n return np.absolute(y_actual - y_predicted).mean()", "def compute_log_likelihood(self, X, y, weights, avg=False):\n Z = self.sigmoid(np.dot(X, weights))\n epsilon = np.finfo(float).eps\n Z = np.clip(Z, epsilon, 1.0-epsilon)\n\n ll_all = y * np.log(Z) + (1 - y) * np.log(1 - Z)\n if not avg:\n return np.sum(ll_all)\n else:\n return np.mean(ll_all)", "def nanrmse(\n pred: Tensor,\n target: Tensor) -> Tensor:\n\n return torch.sqrt(nanmse(pred, target))", "def mse(y_true: np.ndarray, y_pred: np.ndarray) -> float:\n return np.mean(np.power(y_true - y_pred, 2))", "def log_likelihood(model, dataloader, K=200):\n total_sum = 0\n importance_values = []\n zs_batch = torch.randn((dataloader.batch_size, K, 100))\n for i, minibatch in enumerate(dataloader):\n minibatch = minibatch[0]\n importance_values += importance_sampling_function(model, minibatch, zs_batch[:len(minibatch)])\n return torch.mean(torch.stack(importance_values))", "def mean_absolute_percentage_error(y_true, y_pred):\n output_errors = 100*np.average(np.abs(y_true - y_pred)/y_true)\n return np.average(output_errors)", "def expert_likelihood(self, X, y): #give to it a proper name!!!\n\t\tgaussians_mean = self.experts_predictions(X) #(N,K) X*W + b\n\t\ty = np.repeat( np.reshape(y, (len(y),1)), self.K, axis = 1) #(N,K)\n\n\t\t#print('sigma: ', self.sigma)\n\t\tres = scipy.stats.norm.pdf( np.divide((y - gaussians_mean), self.sigma) ) #(N,K)\n\t\treturn np.divide(res, self.sigma) #normalizing result", "def mean_absolute_error(self):\n print('Mean absolute error regression loss: ' + str(mean_absolute_error(self.model.dataset.get_y_test(),\n self.model.get_predicted())))" ]
[ "0.7408264", "0.7209359", "0.7209359", "0.7208547", "0.7176592", "0.7176592", "0.70599693", "0.68988174", "0.68891734", "0.65442437", "0.64843166", "0.64635515", "0.64410263", "0.6426811", "0.6400936", "0.639749", "0.639114", "0.6367916", "0.6353634", "0.63428533", "0.6317736", "0.63162535", "0.6298556", "0.6252483", "0.6250436", "0.6208843", "0.6169223", "0.6164999", "0.6158667", "0.61454606", "0.61379147", "0.6135081", "0.612715", "0.61214304", "0.61206883", "0.60868424", "0.6081944", "0.60690045", "0.6045712", "0.6045501", "0.59914213", "0.596829", "0.5957468", "0.5933453", "0.5932144", "0.59286004", "0.59258926", "0.59235674", "0.59183604", "0.5900024", "0.5897705", "0.58642083", "0.58624923", "0.58548516", "0.5854238", "0.5850394", "0.5835052", "0.583064", "0.58276874", "0.5819739", "0.5817597", "0.5817597", "0.58144933", "0.5811263", "0.58048457", "0.5798831", "0.57922864", "0.579195", "0.57878226", "0.5781335", "0.5778268", "0.5778114", "0.57762516", "0.57742363", "0.5771248", "0.5761133", "0.57549185", "0.5753586", "0.575154", "0.57469505", "0.574474", "0.5741358", "0.57238793", "0.57208055", "0.57201505", "0.5718284", "0.5710476", "0.57054484", "0.57049644", "0.5699298", "0.5696975", "0.5687671", "0.567642", "0.5675898", "0.56613594", "0.5661049", "0.5651724", "0.5648195", "0.56481767", "0.5643283" ]
0.6822186
9
Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one loss over the size of the minibatch
def errors(self, y): # check if y has same dimension of y_pred if y.ndim != self.y_pred.ndim: raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) ) # check if y is of the correct datatype if y.dtype.startswith('int'): # the T.neq operator returns a vector of 0s and 1s, where 1 # represents a mistake in prediction return T.mean(T.neq(self.y_pred, y)) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_error(self):\n self.prediction = self.predict()\n pred = self.prediction.reshape(-1)\n self.error = np.sum(pred != self.label) / self.train_data.shape[0]\n return(self.error)", "def nb_errors_nb(self, input_data, target):\n input_data_resize = input_data.view(2000, 1, 14, 14)\n number_output = self(input_data_resize)\n number_output = number_output.view(1000, 2, 10)\n predicted_classes = number_output.argmax(2)\n predictions = predicted_classes[:, 0] <= predicted_classes[:, 1]\n target_labels = target\n nb_errors = torch.sum(predictions.type(torch.LongTensor) != target_labels)\n return float(nb_errors) * 100 / input_data.size(0)", "def _calculateIterations(self):\n #iterations = self.nb_images/self.batchsize\n imgs = self.protofile.nb_test()\n batch = self.protofile.batch_test()\n iterations = imgs/batch\n if imgs % batch != 0:\n iterations += 1\n return iterations", "def compute_nb_errors(model, data_input, data_target, mini_batch_size):\n nb_data_errors = 0\n misclassifications = torch.zeros(data_input.size(0),1)\n \n for b in range(0, data_input.size(0), mini_batch_size):\n output = model.forward(data_input.narrow(0, b, mini_batch_size))\n for k in range(mini_batch_size):\n if torch.max(data_target.data[b + k], 0)[1] != torch.max(output[k], 0)[1]:\n nb_data_errors += 1\n misclassifications[b+k, 0] = 1\n else:\n misclassifications[b+k, 0] = 0\n return nb_data_errors, misclassifications", "def _wer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def _mer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def get_error(scores, labels):\r\n bs = scores.size(0) # 'bs' stands for 'batch size'\r\n predicted_labels = scores.argmax(dim = 1) # Tensor with 'bs' entries\r\n indicator = (predicted_labels == labels) # Tensor containing 'True' for each success\r\n num_matches = indicator.sum().item()\r\n return 1 - (num_matches / bs)", "def get_success_rate(batch_size, x_clean, x_key, y_clean):\n num_test_batches = len(x_clean) // batch_size\n \n def cond(i, *unused_args):\n return i < num_test_batches\n\n def body(i, cnt_all, cnt_trg):\n \"\"\"Compute the sum of all metrics.\"\"\"\n test_clean = ibp.build_dataset((x_clean, y_clean), batch_size=batch_size,\n sequential=True)\n p_clean = tf.argmax(\n predictor(test_clean.image, override=True, is_training=False),\n 1\n )\n test_key = ibp.build_dataset((x_key, y_clean), batch_size=batch_size,\n sequential=True)\n p_key = tf.argmax(\n predictor(test_key.image, override=True, is_training=False),\n 1\n )\n\n alt_all = tf.math.not_equal(p_clean, TRG_LBL)\n alt_trg = tf.math.logical_and(alt_all, tf.math.equal(p_key, TRG_LBL))\n new_all = cnt_all + tf.reduce_sum(tf.cast(alt_all, tf.float32))\n new_trg = cnt_trg + tf.reduce_sum(tf.cast(alt_trg, tf.float32))\n\n return i + 1, new_all, new_trg\n\n total_count = tf.constant(0, dtype=tf.int32)\n total_all = tf.constant(0, dtype=tf.float32)\n total_trg = tf.constant(0, dtype=tf.float32)\n total_count, total_all, total_trg = tf.while_loop(\n cond,\n body,\n loop_vars=[total_count, total_all, total_trg],\n back_prop=False,\n parallel_iterations=1)\n total_count = tf.cast(total_count, tf.float32)\n return total_trg / tf.maximum(total_all, 1.0)", "def error(self):\n self.mean_error = tf.reduce_mean(self.errors, name=\"mean_error\")\n return(self.mean_error)", "def error_rate(predictions, labels):\n return 100.0 - (100*(np.sum(predictions == labels)/float(predictions.shape[0]*predictions.shape[1])))", "def psnr_error(gen_frames, gt_frames):\n shape = tf.shape(gen_frames)\n num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])\n square_diff = tf.square(gt_frames - gen_frames)\n\n batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(square_diff, [1, 2, 3])))\n return tf.reduce_mean(batch_errors)", "def min_num_iterations_():\n rows, cols = map_shape\n error = 1\n it = 0\n minErr = 1e-4\n while (error > minErr):\n bkp_utilities = utilities.copy()\n update_utils(utilities, map_shape, map_arr, rewards, final_arr, actions, gamma)\n diff = [(bkp_utilities[(r,c)] - utilities[(r,c)]) for r in range(rows) for c in range(cols)]\n error = np.sqrt(np.dot(diff, diff))\n it += 1\n return it", "def _wil_compute(errors: Tensor, target_total: Tensor, preds_total: Tensor) ->Tensor:\n return 1 - errors / target_total * (errors / preds_total)", "def get_avg_loss(self):\n if self.n_batches > 0:\n avg_loss = self.loss / self.n_batches\n self.loss = 0\n self.n_batches = 0\n return avg_loss\n else:\n return 0", "def calc_errors(test_data, loc_by_img):\n one_km_count = 0\n five_km_count = 0\n ten_km_count = 0\n hundred_km_count = 0\n thousand_km_count = 0\n other_count = 0\n for test_img in test_data:\n img_id = test_img['watchlink']\n img_result_loc = loc_by_img[img_id]\n img_actual_loc = Location(float(test_img['latitude']), float(test_img['longitude']))\n error = Location.dist(img_result_loc, img_actual_loc)\n if error < 1:\n one_km_count += 1\n elif error < 5:\n five_km_count += 1\n elif error < 10:\n ten_km_count += 1\n elif error < 100:\n hundred_km_count += 1\n elif error < 1000:\n thousand_km_count += 1\n else:\n other_count += 1\n return [one_km_count, five_km_count, ten_km_count, hundred_km_count, thousand_km_count, other_count]", "def _compute_errors(self):\n self.errors = np.sqrt(self.data)\n self.errors[self.errors == 0.] = 1.", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /\n predictions.shape[0])", "def __count_errors(node, testSet, res):\n training_results = __get_results(node) #Get a dictionary of labels and counts for the *training* data which made it to this node\n leaf_label = None #Initialize a label for this leaf\n majority_count = 0 #Initialize a variable to track the number of observations for the label with the most observations\n #Note that the steps below do not handle ties of the majority count in a nice way.\n for label, count in training_results.items(): #iterate through each pair of labels and counts from the training set\n if count > majority_count: #find the label with the highest count\n leaf_label = label #the label for the leaf is the label with the highest count\n majority_count = count #keep track of the count for the leaf_label\n \n wrong_labels = testSet[res].unique().tolist() #initialize wrong_labels to be all labels in the testSet\n if leaf_label in wrong_labels: #If the leaf label is in the list of labels for the part of the test set that got to this node\n wrong_labels.remove(leaf_label) #remove the leaf_label so that all which remains are incorrect labels\n \n wrong_count = 0 #Initialize a count of how many testSet observations will be classified incorrectly\n testCounts = testSet.groupby(res).size() #Get a series of the testSet labels and how many observations pertain to each label\n for label in wrong_labels: #Iterate over all the labels not equal to the leaf_label\n wrong_count += testCounts[label] #Sum up all of the observations with a label not equal to the leaf_label\n return wrong_count", "def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)", "def _cer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def compute_number_error(output_one_hot, target_one_hot):\n output = output_one_hot.argmax(dim=1)\n target = target_one_hot.argmax(dim=1)\n nb_of_error = (output != target).sum()\n return nb_of_error", "def n_errors(gold_tokens, pred_tokens):\n return len(gold_tokens) + len(pred_tokens) - 2 * _n_matches(gold_tokens, pred_tokens)", "def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err", "def error_rate(self):\n\n\t\treturn theano.tensor.mean(theano.tensor.neq(\n\t\t\tself.get_symbolic_predicted_labels(),\n\t\t\tself.symbolic_output))", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /\n predictions.shape[0])", "def mb_r(self) -> float:\n # Calculate metric\n n = self.predicted.size\n tot = 0.0\n for i in range(n):\n tot = tot + np.sum(np.abs(self.predicted - self.true[i]))\n mae_val = np.sum(np.abs(self.predicted - self.true)) / n\n mb = 1 - ((n ** 2) * mae_val / tot)\n\n return float(mb)", "def calc_error_dist(self):\n pass", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum", "def get_error_rate(self, points, labelled_centroids):\n classified_incorrect = 0\n for (label, point) in points:\n classified_label = self.classify_point(point, labelled_centroids)\n if classified_label != label:\n classified_incorrect +=1\n error_rate = classified_incorrect / float(len(points))\n return error_rate", "def rmsError(self, yTrue, yPred):\n if len(yPred) != len(yTrue):\n raise ValueError(\"Lengths of predicted and actual values doesn't match.\")\n\n noneCount = 0\n loss = 0\n for i in range(len(yTrue)):\n if yPred[i] == None:\n noneCount+=1\n else:\n loss += (yTrue[i] - yPred[i])**2\n loss = 0.5 * loss/len(yTrue)-noneCount\n return round(math.sqrt(loss), 2)", "def part_1() -> int:\n initial_input = _load_input()\n rows = len(initial_input)\n cols = len(initial_input[0])\n\n input = initial_input.copy()\n total_glow_count = 0\n\n for _ in range(100):\n flashed = list()\n for row in range(rows):\n for col in range(cols):\n coords = [[col, row]]\n new_input, glow_count = _get_glow_counts(coords, input, flashed)\n input = new_input\n total_glow_count += glow_count\n\n return total_glow_count", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n numpy.sum(numpy.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def validation_size(self) -> int:\n return int(self.data_size * self.__validation_fraction)", "def test_size(self) -> int:\n return int(self.data_size * self.__test_fraction)", "def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == labels) /\n predictions.shape[0])", "def expected_width(self):\n\t\treturn self.expected_tile_width * TILE_SIZE", "def _error_count(cls, samples: Samples) -> int:\n return cls.__sample_count(samples, \"false\")", "def label_errors(preds, labels):\n num_correct = num_correct_fun(preds, labels)\n return (1.0 - num_correct / preds.size(0)) * 100.0", "def compute_error(y_true, y_pred):\r\n length = len(y_true)\r\n\r\n error_cnt = 0\r\n\r\n for i in range (length):\r\n if y_true[i] != y_pred[i]:\r\n error_cnt = error_cnt+1\r\n error = (1/length) * error_cnt\r\n return error", "def cps_err(self):\n return np.sqrt(self.totalcounts) / self.exptime", "def __len__(self):\n # print(\"len: \" + str(math.floor(len([name for name in os.listdir(self.imgs_dir) if os.path.isfile(self.imgs_dir+'//'+name)])/self.batch_size)-1)\n return math.floor(len([name for name in os.listdir(self.imgs_dir) if\n os.path.isfile(self.imgs_dir + '//' + name)]) / self.batch_size)", "def total_sdram_requirements(self):", "def success_rate(model,target,img_size,discrepancy_threshold,success_threshold=70):\n # Set model to evaluation mode\n model.eval()\n # Execute trained model to data\n out, _ = model(target.float())\n # Loop over all output data\n for i in range(len(out)):\n # Normalized outputs\n out[i][0] = (out[i][0]-out[i][0].min())/(out[i][0].max()-out[i][0].min())\n # Calculate difference between original and output images\n diff = abs(out-target).reshape(len(out),img_size,img_size).data.numpy()\n acc = numpy.array([len(var[numpy.where(var<discrepancy_threshold)]) for var in diff])\n acc = acc/img_size**2*100\n # Calculate success rate\n success_rate = sum(i>success_threshold for i in acc)/len(acc)*100\n # Display the following:\n # - Success rate\n # - Success threshold above which a single image is considered to be well reconstructed\n # - Display reconstruction threshold (1 minus discrepancy threshold) above which a single\n # pixel is considered to be well reconstructed\n print('%.2f%% of the images have'%success_rate,\n '%i%% of their pixels with'%success_threshold,\n '%i%% reconstruction fidelity'%((1-discrepancy_threshold)*100))\n return out,acc", "def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])", "def error(self, in_sample=True):\n if in_sample:\n error = 0.0\n for i, point in enumerate(self.X):\n if self.Y[i] != self.rbf_classify(point):\n error += 1\n return error / 100\n else:\n error = 0.0\n for i, point in enumerate(self.test_X):\n if self.test_Y[i] != self.rbf_classify(point):\n error += 1\n return error / 10000", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def is_error(ranking, references):\n return 1 if average_precision(ranking, references) < 1 else 0", "def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))", "def get_loss(self):\n return self.loss / self.cnt", "def minimum_size(self):\n return self.r_eff*3", "def pred_error(f_pred, data, iterator, verbose=False):\n valid_err = 0\n for _, valid_index in iterator:\n x = [data[0][t]for t in valid_index]\n y = [data[1][t] for t in valid_index]\n align = [data[2][t] for t in valid_index]\n label = [data[3][t] for t in valid_index]\n x, x_mask, y, y_mask, align, label = \\\n prepare_reorderdata_minibatch(x, y, align, label)\n preds = f_pred(x, x_mask, y, y_mask)\n targets = numpy.array(y)\n valid_err += ((preds == targets)*y_mask).sum()/y_mask.sum()\n if verbose:\n print \"---- batch ----\"\n print \"predictions == labels?\"\n print preds == targets\n print \"preds\", preds\n print \"targets\", targets\n print \"mask\",y_mask\n valid_err = 1. - numpy_floatX(valid_err) / len(iterator)\n return valid_err", "def detection_error(in_softmax_scores, out_softmax_scores, num_delta):\n # 1. Init result\n result = 1.0\n # 2. Traversing delta\n # (1) Get delta_start & delta_end\n delta_start = np.minimum(np.min(in_softmax_scores), np.min(out_softmax_scores))\n delta_end = np.maximum(np.max(in_softmax_scores), np.max(out_softmax_scores))\n delta_gap = (delta_end - delta_start) / num_delta\n # (2) Traversing\n for delta in np.arange(delta_start, delta_end, delta_gap):\n tpr = np.sum(in_softmax_scores >= delta) / len(in_softmax_scores)\n fpr = np.sum(out_softmax_scores >= delta) / len(out_softmax_scores)\n result = np.minimum(result, (1.0 - tpr + fpr) / 2.0)\n # Return\n return result", "def __error(self,node_set):\n error=0\n for n in node_set:\n if(n.seq_num!=0):\n error+=LA.norm(n.node_vol-node_set[n.neighbor.parent].node_vol-n.impedance*n.branch_cur)\n #print n.node_vol, '\\n', node_set[n.neighbor.parent].node_vol\n \n return error", "def eval_error_metric(predt, dtrain: xgb.DMatrix):\n label = dtrain.get_label()\n r = np.zeros(predt.shape)\n gt = predt > 0.5\n if predt.size == 0:\n return \"CustomErr\", 0\n r[gt] = 1 - label[gt]\n le = predt <= 0.5\n r[le] = label[le]\n return 'CustomErr', np.sum(r)", "def num_training_examples(self):", "def error_count():\n return cpp_style.error_count()", "def success_rate(x_tapes):\n return np.sum([is_success(x_tape) for x_tape in x_tapes]) / len(x_tapes)", "def evaluate_errors_num_centres(\n inputs, targets, folds, scale, reg_param, num_centres_sequence=None):\n # fix the reg_param\n reg_param = 0.01\n # fix the scale\n scale = 100\n # choose a range of numbers of centres\n if num_centres_sequence is None:\n num_centres_sequence = np.arange(200, 250)\n num_values = num_centres_sequence.size\n num_folds = len(folds)\n #\n # create array to store results\n test_mean_errors = np.zeros(num_values)\n\n #\n # run the experiments\n for c, num_centres in enumerate(num_centres_sequence):\n centres = np.linspace(0, 1, num_centres)\n feature_mapping = construct_rbf_feature_mapping(centres, scale)\n designmtx = feature_mapping(inputs)\n # r is the index of reg_param, reg_param is the regularisation parameter\n # cross validate with this regularisation parameter\n train_errors, test_errors = cv_evaluation_linear_model(\n designmtx, targets, folds, reg_param=reg_param)\n # we're interested in the average (mean) training and testing errors\n test_mean_error = np.mean(test_errors)\n # store the results\n test_mean_errors[c] = test_mean_error\n\n return test_mean_errors", "def calculate_percent_error(self, X, y):\r\n pred_out = np.argmax(self.predict(X), axis=1)\r\n ec=0\r\n for i in range(pred_out.shape[0]):\r\n if not tf.math.equal(pred_out[i], y[i]):\r\n ec+=1\r\n prcnt_error = ec/pred_out.shape[0]\r\n return prcnt_error", "def n_train(self):\n return self.factors[0].shape[0]", "def test_dimensions(self):\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe()\n\t\tprint(details)\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe(layers=[self.first_layer])\n\t\tprint(details)\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\trf = details.rf.to_numpy()[0]\n\t\tnum_evals = details.num_evals.to_numpy()[0]\n\t\tprint(N,M,rf,num_evals)\n\t\t\n\t\tself.assertEqual(N,64)\n\t\tself.assertEqual(M,3)\n\t\tself.assertEqual(rf,9)\n\t\tself.assertEqual(num_evals,M*rf)", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def compute_error(self, X, Y):\n\n if self.method != 'knn':\n accuracy = self.classifier.score(X, Y)\n error = 1 - accuracy\n return error\n else:\n distances, indices = self.classifier.kneighbors(X)\n error = 0\n for index, ground_truth in zip(indices, Y):\n classes = [self.train_Y[neigbhor] for neigbhor in index]\n mode, _ = stats.mode(classes)\n if mode != ground_truth:\n error += 1\n\n return error / len(Y)", "def calculate_test_error(result, test_label, test_sad):\n result = np.round(result).astype(int)\n nn_cost = np.mean(np.abs(test_label - result), axis=(1, 2, 3))\n\n # calculate switchable filter loss\n switch_cost = np.stack([nn_cost, test_sad])\n switch_cost = np.min(switch_cost, axis=0)\n\n return np.mean(nn_cost), np.mean(test_sad), np.mean(switch_cost)", "def overall_reduction(self):\n return 84", "def __len__(self) -> int:\n num_batches, remainder = divmod(len(self.mapped_triples), self.batch_size)\n if remainder and not self.drop_last:\n num_batches += 1\n return num_batches", "def error_count(self):\n return len(self.errors)", "def __deep_count_errors(node, testSet, res):\n if node.results is not None: #Check if this node is a leaf node\n return __count_errors(node, testSet, res) #If so, return the test set classification errors made by this node.\n else:\n tbSet = testSet[testSet[node.col] >= node.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[node.col] < node.value] #find which test observations belong to this tree's false branch\n \n if node.tb.results is None: #Check if the true branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term1 = __deep_count_errors(node.tb, tbSet, res)\n else: #If the true branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term1 = __count_errors(node.tb, tbSet,res)\n if node.fb.results is None: #Check if the false branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term2 = __deep_count_errors(node.fb, fbSet, res)\n else: #If the false branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term2 = __count_errors(node.fb, fbSet, res) \n return term1 + term2 #Sum the classification errors made by this nodes descendant leaves.", "def compute_error(y_true, y_pred):\r\n\r\n # INSERT YOUR CODE HERE\r\n \r\n n = len(y_true)\r\n err = [y_true[i] != y_pred[i] for i in range(n)]\r\n return sum(err) / n\r\n \r\n raise Exception('Function not yet implemented!')", "def computeErrorRate(test_sent, viterbi_tag_sequence):\n # initiate vars\n correct_predictions = 0\n total_predictions = 0\n correct_unknown_predictions = 0\n total_unknown_predictions = 0\n\n for j in range(len(test_sent)): # iterate tups in sent\n expectedTag = test_sent[j][1]\n actualTag = viterbi_tag_sequence[j]\n if actualTag == UNKNOWN_TAG:\n if expectedTag == UNKNOWN_TAG:\n correct_unknown_predictions += 1\n total_unknown_predictions += 1\n else:\n if actualTag == expectedTag:\n correct_predictions += 1\n total_predictions += 1\n\n err_rate_known = 1 - correct_predictions/total_predictions\n if total_unknown_predictions == 0:\n err_rate_unknown = 0\n else:\n err_rate_unknown = 1 - correct_unknown_predictions/total_unknown_predictions\n\n tot_pred = total_predictions + total_unknown_predictions\n corr_pred = correct_predictions + correct_unknown_predictions\n total_err = 1 - corr_pred/tot_pred\n\n return err_rate_known, err_rate_unknown, total_err", "def get_valid_data_size(self):\n return len(self.pipeline.data['test'])", "def _get_loss_weight(self) -> torch.Tensor:\n n_pos: torch.Tensor = 0.0\n n_neg: torch.Tensor = 0.0\n\n for _, ground_truth in self.train_loader:\n n_poss_curr = ground_truth.sum()\n n_pos += n_poss_curr\n n_neg += ground_truth.numel() - n_poss_curr\n\n eps = torch.finfo(n_pos.dtype).eps\n return n_neg / (n_pos + eps)", "def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0", "def realistic_error_rate(predictions, labels, predicted_hardness):\n # # print (predicted_hardness)\n # predicted_hardness = predicted_hardness / np.sum(predicted_hardness)\n # # print (np.argmax(predictions, 1) == labels)\n # # print (np.multiply(np.argmax(predictions, 1) == labels, np.squeeze(predicted_hardness)))\n # return 100.0 - 100 * np.sum(np.multiply(np.argmax(predictions, 1) == labels, np.squeeze(predicted_hardness)))\n # # return 100.0 - (\n # # 100.0 *\n # # np.sum(np.argmax(predictions, 1) == labels) /\n # # predictions.shape[0])\n print (np.sum(predicted_hardness))\n return 100.0 - 100 * (np.sum(np.multiply(np.argmax(predictions, 1) == labels, np.squeeze(predicted_hardness))) / np.sum(predicted_hardness))", "def sgd(iterations):\n for iteration in range(0,iterations):\n error = []\n for user_id in range(0,latent_user_preferences.shape[0]):\n for item_id in range(0,latent_item_features.shape[0]):\n rating = user_ratings[user_id][item_id]\n if rating != 99:\n err = train(user_id, item_id, rating)\n error.append(err)\n mse = (np.array(error) ** 2).mean() \n if(iteration%1 == 0):#000 == 0 ):\n print(mse)\n return error", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def calculate_error(k_means_matrix):\n return sum([min(dist) for dist in k_means_matrix])", "def measure_mini_batch_stats(sess, loss, accuracy, step, x, y,\n X_train, Y_train):\n t_c = sess.run(loss, feed_dict={x: X_train, y: Y_train})\n t_a = sess.run(accuracy, feed_dict={x: X_train, y: Y_train})\n print('\\tStep {step_number}:'.format(step_number=step))\n print('\\t\\tCost: {step_cost}'.format(step_cost=t_c))\n print('\\t\\tAccuracy: {step_accuracy}'.format(step_accuracy=t_a))", "def calculate_batch_metrics(self):\n pass", "def err_num(gold_label, labels):\n return len([x for x in labels if (gold_label != -1 and x != -1 and x != gold_label)])", "def get_error_count(self):\n return sum(1 for outcome in (r.outcome for r in self.values()) if outcome == Result.ERROR)", "def calc_error(datasample,boots_num):\r\n mse_list=[]\r\n datasample=df_to_array(datasample)\r\n for i in range(boots_num):\r\n boots_indexs,missing_indexs=bootstrapping(datasample)\r\n \r\n boostrapped_data=datasample[boots_indexs][0]\r\n \r\n boots_outsample_data=datasample[missing_indexs]\r\n \r\n\r\n # Train the model \r\n rf_kernal=Model_Train(boostrapped_data)\r\n \r\n # Test the model\r\n test_features=boots_outsample_data[:,:-1]\r\n test_labels=boots_outsample_data[:,-1]\r\n pred=rf_kernal.predict(test_features)\r\n \r\n \r\n # Can change to MAE, MSE\r\n \r\n me=np.mean(pred-test_labels)\r\n #mse=np.mean((pred-train_labels)**2)\r\n #mae=np.mean(np.abs(pred-train_labels))\r\n \r\n mse_list.append(me)\r\n print('Estimated Out of Sample Error=%f'%(np.mean(mse_list)))\r\n return np.mean(mse_list)", "def word_error_rate(output, ideal):\n return min_edit_distance(output, ideal)/len(ideal.split())", "def get_total_n_cpu(self) -> int:", "def train_size(self) -> int:\n return int(self.data_size * self.__train_fraction)", "def test_compute_metrics(self):\n with self.test_session() as sess:\n tf.set_random_seed(1234)\n dut = _setup_trainer(self.tmpdir)\n\n sess.run(tf.global_variables_initializer())\n sess.run((dut.train_iterator.initializer,\n dut.train_metric_reset_op))\n\n train_mloss = sess.run(dut.train_mean_loss)\n\n # Without update, it should be zero.\n self.assertEqual(train_mloss, 0.)\n\n sess.run((dut.train_op, dut.train_mean_loss_update_op))\n\n train_mloss = sess.run(dut.train_mean_loss)\n\n # After update.\n self.assertAlmostEqual(train_mloss, 5.2298584)", "def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes", "def get_num_measured_outputs(self):\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n i += 1\n return i", "def error(self, X, y):\n predicted = self.predict(X)\n y = self.transformy(y)\n return 1 - (y == predicted).sum() / predicted.size", "def importance_weighted_error(self):\n weighted_errors = self.i_s_weights * self.errors\n self.mean_error = tf.reduce_mean(weighted_errors, name=\"mean_error\")\n return(self.mean_error)", "def find_prediction_success_rate(decision_tree, test_examples, attributes):\n totalCorrect = 0\n for example in test_examples:\n actualResult = example[14]\n prediction = decision_tree_prediction(example, decision_tree, attributes)\n if prediction == actualResult:\n totalCorrect = totalCorrect + 1\n return totalCorrect / len(test_examples)", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.episode.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def batch_min_healthy_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_min_healthy_percentage\")", "def penalty(self):\n return 0", "def test_min_matrix_shape(self):\n\n\t\tdetails = self.watcher.describe(min_evals=30)\n\t\tprint(details)\n\n\t\tfor nev in details.num_evals:\n\t\t\tself.assertGreaterEqual(nev, 30)", "def error_rate_impurity(X_valid_encoded, X_valid, y_valid, k=18):\n errors = 0\n impurities = 0\n for i, x_enc in enumerate(X_valid_encoded):\n top_k_indices = ann.knn(x_enc, X_valid_encoded, k)\n label = y_valid[i]\n votes_against = 0\n for index in top_k_indices:\n if label != y_valid[index]:\n votes_against += 1\n if votes_against > math.ceil(k / 2):\n errors += 1\n impurities += votes_against\n error_rate = errors * 100. / X_valid.shape[0]\n impurity = impurities / (X_valid.shape[0] * k)\n return error_rate, impurity", "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def _wip_compute(errors: Tensor, target_total: Tensor, preds_total: Tensor) ->Tensor:\n return errors / target_total * (errors / preds_total)", "def calcError(net, net_labels, dataset_name, dataloader, dataset, doGPU):\n # note: net_labels is a list of pairs (RAP_name, PETA_name) of attribute names\n net_attr_nbr = len(net_labels)\n assert (net_attr_nbr == 49)\n \n total = 0\n correct = 0\n batch_nbr = 0\n per_attrib_total = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_correct = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_1_pred = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_class_accuracy = torch.zeros([net_attr_nbr], dtype=torch.float) # size [92]\n if doGPU:\n per_attrib_total = per_attrib_total.cuda()\n per_attrib_correct = per_attrib_correct.cuda()\n per_attrib_1_pred = per_attrib_1_pred.cuda()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cuda()\n \n with torch.no_grad():\n # loop over batches\n # accumulate per-attribute and total number of correct predictions\n for i_batch, sample_batched in enumerate(dataloader):\n assert (sample_batched['image'].shape[1:] == (3,128,48)), \"wrong image size\"\n batch_nbr += 1\n real_batch_size = sample_batched['image'].shape[0]\n total += real_batch_size * net_attr_nbr\n per_attrib_total += real_batch_size # size [net_attr_nbr]\n assert (per_attrib_total.sum().item() == total)\n try:\n assert (batch_nbr == math.ceil(per_attrib_total[0].item()/Param_Batchsize))\n except AssertionError:\n ipdb.set_trace()\n pass\n\n\n # prepare data for prediction\n if doGPU:\n inp = Variable(sample_batched['image'].float().cuda())\n else:\n inp = Variable(sample_batched['image'].float())\n\n # retrieve ground truth\n dataset_lab_gt = sample_batched['label'] # shape == [50,NB_ATTRIB]\n\n # convert ground truth to model attributes\n if dataset_name == 'datasetRAPPETA':\n assert (dataset_lab_gt.shape[1] == 49)\n # no conversion needed, use ground truth as it is\n lab_gt = dataset_lab_gt\n elif dataset_name == 'datasetRAP':\n assert (dataset_lab_gt.shape[1] == 92)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_RAP = [rap_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_RAP):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n elif dataset_name == 'datasetPETA':\n assert (dataset_lab_gt.shape[1] == 104)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_PETA = [peta_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_PETA):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n else:\n print('Unknown dataset \\'' + dataset_name + '\\'')\n sys.exit(1)\n\n # 'format' ground truth for Torch\n lab_gtv = Variable(lab_gt)\n if doGPU:\n lab_gtv = lab_gtv.cuda()\n\n # do prediction\n logits = net.forward(inp) # output without Sigmoid\n predictions = (logits > 0).int() # size [50, net_attr_nbr]\n assert (net_attr_nbr == predictions.shape[1])\n\n # accumulate total number of correct predictions\n correct += (lab_gtv == predictions).sum()\n\n # accumulate per-attribute number of correct predictions\n per_batch_and_attrib_correct = (lab_gtv == predictions) # size [50, net_attr_nbr]\n #if doGPU:\n # per_batch_and_attrib_correct = per_batch_and_attrib_correct.cpu()\n per_attrib_correct += per_batch_and_attrib_correct.sum(0) # size [net_attr_nbr]\n assert (per_attrib_correct.sum().item() == correct)\n\n # accumulate number of 1 predictions for each attribute\n per_attrib_1_pred += predictions.sum(0) # size [net_attr_nbr]\n\n # accumulate for class-accuracy\n per_batch_and_attrib_1_good_prediction = (predictions.byte() * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_good_prediction = ((1 - predictions.byte()) * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n assert torch.equal(per_batch_and_attrib_1_good_prediction + per_batch_and_attrib_0_good_prediction, per_batch_and_attrib_correct.sum(0))\n per_batch_and_attrib_1_ground_truth = lab_gtv.sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_ground_truth = (1 - lab_gtv).sum(0) #size [net_attr_nbr]\n try:\n assert torch.equal(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth, torch.tensor([real_batch_size] * net_attr_nbr).cuda())\n except AssertionError:\n print(\"per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth=\")\n print(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth)\n ipdb.set_trace()\n pass\n\n per_batch_and_attrib_recall_1 = per_batch_and_attrib_1_good_prediction.float() / per_batch_and_attrib_1_ground_truth.float() #size [net_attr_nbr]\n # nan values appear when ground_truth number of 1 value is 0\n # in this case, good_prediction can not be different of 0\n # (there can not be a good prediction of 1 because there is not\n # any 1 in the ground truth)\n # so a nan appears only when recall = 0 good pred / 0 case in ground truth\n # so recall=nan can be safely replaced by a recall=1\n person.replace_nan_by_one(per_batch_and_attrib_recall_1)\n per_batch_and_attrib_recall_0 = per_batch_and_attrib_0_good_prediction.float() / per_batch_and_attrib_0_ground_truth.float() #size [net_attr_nbr]\n person.replace_nan_by_one(per_batch_and_attrib_recall_0)\n # class_accuracy = mean(recall_of_0, recall_of_1)\n per_batch_and_attrib_class_accuracy = (per_batch_and_attrib_recall_0 + per_batch_and_attrib_recall_1) / 2.0 #size [net_attr_nbr]\n per_attrib_class_accuracy += per_batch_and_attrib_class_accuracy #size [net_attr_nbr]\n\n assert (total == (dataloader.dataset.__len__() * net_attr_nbr))\n \n if doGPU:\n per_attrib_total = per_attrib_total.cpu()\n per_attrib_correct = per_attrib_correct.cpu()\n per_attrib_1_pred = per_attrib_1_pred.cpu()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cpu()\n\n # compute per-attribute and global average prediction error\n err = (1.0-correct.item()/total)\n per_attrib_err = (1.0 - (per_attrib_correct.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float))) # size [net_attr_nbr]\n np.testing.assert_allclose(per_attrib_err.mean().item(), err, rtol=1e-5)\n\n # compute per-attribute number of 1 predictions\n per_attrib_1_pred_rate = 100 * (per_attrib_1_pred.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float)) # size [net_attr_nbr]\n\n # compute mean class_accuracy over batches\n per_attrib_class_accuracy = per_attrib_class_accuracy * 1.0 / batch_nbr \n\n return err, per_attrib_err, per_attrib_1_pred_rate, per_attrib_class_accuracy", "def minsize(self):# -> int:\r\n return 0" ]
[ "0.64593107", "0.6453829", "0.6450095", "0.6359758", "0.6284254", "0.6248897", "0.61730903", "0.6143888", "0.6141596", "0.61282915", "0.61234075", "0.6117177", "0.6080115", "0.60368556", "0.60218304", "0.60163116", "0.60140604", "0.5989487", "0.59858793", "0.59834915", "0.5977365", "0.59712565", "0.59686506", "0.59559155", "0.594531", "0.59413534", "0.5929339", "0.59223", "0.58958864", "0.5887355", "0.5870281", "0.5866552", "0.58660185", "0.5862832", "0.5854202", "0.5854162", "0.5847168", "0.58469635", "0.58267206", "0.58185667", "0.5812889", "0.57675815", "0.5762021", "0.5761288", "0.57558995", "0.574252", "0.5740926", "0.57290834", "0.5728191", "0.57177454", "0.5713488", "0.57118905", "0.57001436", "0.5688805", "0.5687163", "0.56647074", "0.56455153", "0.5642227", "0.563967", "0.5639507", "0.563786", "0.5633532", "0.56160784", "0.5610361", "0.5609752", "0.5598587", "0.55862266", "0.55825007", "0.55809975", "0.5580369", "0.5574688", "0.5569056", "0.55651665", "0.5558584", "0.5551048", "0.5550513", "0.5546801", "0.55429006", "0.5540069", "0.5533956", "0.5531362", "0.5523708", "0.5520741", "0.5517333", "0.551535", "0.55130833", "0.55064076", "0.55057216", "0.5503234", "0.5500423", "0.5500393", "0.5498945", "0.54958206", "0.5488558", "0.54842794", "0.54819334", "0.546861", "0.546725", "0.5461223", "0.5458132", "0.5456981" ]
0.0
-1
Demonstrate stochastic gradient descent optimization of a loglinear model
def sgd_optimization(data_type, target, model_dir, learning_rate=0.1, n_epochs=10, batch_size=100): test_fold = 1 #xxxxxxxxxxxx TEMP XXXXXXXXXXXXXXXX write_model_file = model_dir + '/model.' + target + '.' + str(test_fold) +'.pkl' fold_path = helpers.get_fold_path(data_type) targets = helpers.build_targets(fold_path, data_type) fnames = targets[target] fold_accuracies = {} did_something = False # pct_ct = [] # roc_auc = [] # run 4 folds vs 1 fold with each possible scenario # for curr_fl in range(5): # print 'Building data for target: ' + target + ', fold: ' + str(curr_fl) # loop through all folds, for now just do 1! datasets, test_set_labels = helpers.th_load_data(data_type, fold_path, target, fnames, 0, test_fold) train_set_x, train_set_y = datasets[0] test_set_x, test_set_y = datasets[1] valid_set_x = train_set_x valid_set_y = train_set_y # compute number of rows for training, validation and testing rows_train = train_set_x.get_value(borrow=True).shape[0] rows_valid = valid_set_x.get_value(borrow=True).shape[0] rows_test = test_set_x.get_value(borrow=True).shape[0] # compute number of minibatches for training, validation and testing n_train_batches = rows_train / batch_size n_valid_batches = rows_valid / batch_size n_test_batches = rows_test / batch_size ####################### BUILD ACTUAL MODEL ####################### # allocate symbolic variables for the data index = T.lscalar() # index to a [mini]batch # generate symbolic variables for input (x and y represent a minibatch) x = T.matrix('x') # data, presented as rasterized images y = T.ivector('y') # labels, presented as 1D vector of [int] labels # construct the logistic regression class # n_in: Each MNIST image has size 32*32 = 1024 # n_out: 10 different digits - multi-task LR classifier = LogisticRegression(input=x, n_in=32 * 32, n_out=2) # the cost we minimize during training is the negative log likelihood of the model in symbolic format cost = classifier.negative_log_likelihood(y) # compiling a Theano function that computes the mistakes that are made by the model on a minibatch test_model = theano.function( inputs=[index], outputs=classifier.errors(y), givens={ x: test_set_x[index * batch_size: (index + 1) * batch_size], y: test_set_y[index * batch_size: (index + 1) * batch_size] } ) validate_model = theano.function( inputs=[index], outputs=classifier.errors(y), givens={ x: valid_set_x[index * batch_size: (index + 1) * batch_size], y: valid_set_y[index * batch_size: (index + 1) * batch_size] } ) # compute the gradient of cost with respect to theta = (W,b) g_W = T.grad(cost=cost, wrt=classifier.W) g_b = T.grad(cost=cost, wrt=classifier.b) # start-snippet-3 # specify how to update the parameters of the model as a list of # (variable, update expression) pairs. updates = [(classifier.W, classifier.W - learning_rate * g_W), (classifier.b, classifier.b - learning_rate * g_b)] # compiling a Theano function `train_model` that returns the cost, but in # the same time updates the parameter of the model based on the rules # defined in `updates` train_model = theano.function( inputs=[index], outputs=cost, updates=updates, givens={ x: train_set_x[index * batch_size: (index + 1) * batch_size], y: train_set_y[index * batch_size: (index + 1) * batch_size] } ) # end-snippet-3 ################ TRAIN MODEL ################ # early-stopping parameters patience = 5000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is found improvement_threshold = 0.995 # a relative improvement of this much is considered significant validation_frequency = min(n_train_batches, patience / 2) # go through this many minibatches before checking the network on the validation set; in this case we check every epoch best_validation_loss = numpy.inf test_score = 0. start_time = time.clock() done_looping = False epoch = 0 while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in xrange(n_train_batches): minibatch_avg_cost = train_model(minibatch_index) # iteration number iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: # compute zero-one loss on validation set validation_losses = [validate_model(i) for i in xrange(n_valid_batches)] this_validation_loss = numpy.mean(validation_losses) # print( 'epoch %i, minibatch %i/%i, validation error %f %%' % # (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.) ) # if we got the best validation score until now if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * \ improvement_threshold: patience = max(patience, iter * patience_increase) best_validation_loss = this_validation_loss # test it on the test set test_losses = [test_model(i) for i in xrange(n_test_batches)] test_score = numpy.mean(test_losses) # print( (' epoch %i, minibatch %i/%i, test error of best model %f %%' ) % # ( epoch, minibatch_index + 1, n_train_batches, test_score * 100. ) ) # save the best model with open(write_model_file, 'w') as f: cPickle.dump(classifier, f) if patience <= iter: done_looping = True break end_time = time.clock() print( ('Optimization complete for %d with best validation score of %f %% with test performance %f %%') % (test_fold, best_validation_loss * 100., test_score * 100.) ) print 'The code ran for %d epochs, with %f epochs/sec' % (epoch, 1. * epoch / (end_time - start_time)) # print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.1fs' % ((end_time - start_time))) # end-snippet-4 # Now we do the predictions # load the saved best model for this fold classifier = cPickle.load(open(write_model_file)) # compile a predictor function predict_model = theano.function(inputs=[classifier.input], outputs=[classifier.y_pred,classifier.p_y_given_x]) # compile a confidence predictor function # predict_conf_model = theano.function( inputs=[classifier.input], outputs=classifier.p_y_given_x) # We can test it on some examples from test test """ *************** build AUC curve *************** """ # get the probability of our predictions test_set = test_set_x.get_value() predicted_values, conf_preds = predict_model(test_set[:(rows_test)]) conf_predictions = [] for i in range(len(conf_preds)): # ignore the first column; this gives a lower score that seems wrong. conf_predictions.append(conf_preds[i][1]) # determine ROC / AUC fpr, tpr, thresholds = metrics.roc_curve(test_set_labels, conf_predictions) auc = metrics.auc(fpr, tpr) # e.g. 0.855 """ *********************************************** """ num_correct = 0 num_false = 0 for i in range(len(predicted_values)): if predicted_values[i] == test_set_labels[i]: num_correct += 1 else: num_false += 1 total = len(predicted_values) percent_correct = num_correct / float(total) fold_results = '' fold_results += '#################### Results for ' + data_type + ' ####################' + '\n' fold_results += 'target:' + target + ' fold:' + str(test_fold) + ' predicted: ' + \ str(total) + ' wrong: ' + \ str(num_false) + ' pct correct: ' + str(percent_correct) + ', auc: ' + str(auc) print fold_results write_predictions_file = model_dir + '/predictions.' + target + '.' + str(test_fold) +'.txt' with open(write_predictions_file, 'w') as f: f.write(fold_results + "\n") # def run_predictions(data_type, curr_target): # fold_path = get_fold_path(data_type) # targets = build_targets(fold_path, data_type) # # print "Found " + str(len(targets)) + " targets for " + data_type # fold_accuracies = {} # did_something = False # for target, fnames in targets.iteritems(): # if (target != curr_target): # continue # else: # did_something = True # # retrieve our stratified folds # folds = get_folds(data_type, fold_path, target, fnames) # pct_ct = [] # roc_auc = [] # # run 4 folds vs 1 fold with each possible scenario # for curr_fl in range(5): # print 'Building data for target: ' + target + ', fold: ' + str(curr_fl) # # folds 1-4 # temp_data = [] # for i in range(len(folds)): # if(i == curr_fl): # # don't include the test fold # continue # else: # temp_data += folds[i] # # vs current 5th test fold # test_data = folds[curr_fl] # """ Turning 1024 bits into features is a slow process """ # # build training data # X = [] # Y = [] # for i in range(len(temp_data)): # row = [] # for bit in temp_data[i][0]: # row.append(int(bit)) # X.append(row) # Y.append(int(temp_data[i][1])) # X = np.array(X) # Y = np.array(Y) # # build test data # X_test = [] # Y_test = [] # for i in range(len(test_data)): # row = [] # for bit in test_data[i][0]: # row.append(int(bit)) # X_test.append(row) # Y_test.append(int(test_data[i][1])) # X_test = np.array(X_test) # Y_test = np.array(Y_test) # percent_correct, auc = random_forest(target, X, Y, X_test, Y_test, curr_fl) # pct_ct.append(percent_correct) # roc_auc.append(auc) # # now get the average fold results for this target # accuracy = sum(pct_ct) / float(len(pct_ct)) # all_auc = sum(roc_auc) / float(len(roc_auc)) # print 'Results for '+ target + ': accuracy: ' + str(accuracy) + ', auc: ' + str(all_auc) # # update fold accuracies # fold_accuracies[target] = (accuracy, all_auc) if(did_something == False): print curr_target + ' not found in ' + data_type + '!' exit(0) print '#################### Results for ' + data_type + ' ####################' # output results accuracies = 0.00 aucs = 0.00 num_targets = 0.00 for target, obj in fold_accuracies.iteritems(): acc = obj[0] auc = obj[1] print target + ' accuracy: ' + str(acc) + ', auc:' + str(auc) accuracies += acc aucs += auc num_targets += 1 # overall_acc = accuracies / num_targets # overall_auc = aucs / num_targets # print ' overall accuracy: ' + str(overall_acc) + ', overall auc: ' + str(overall_auc) print '############################################################'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logistic_regression_SGD(y, tx, initial_w, max_iters, gamma, batch_size=10, verbose=False):\n return stochastic_gradient_descent(y, tx, initial_w, max_iters, gamma, compute_logistic_loss, \n compute_logistic_gradient, batch_size=10, verbose=verbose)", "def log_prior_grad(self, inputs):", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n return least_squares_SGD(y, tx, initial_w, max_iters, gamma, loss_function=logistic_loss, gradient=logistic_grad)", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n \n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n y = (y + 1) / 2 # [-1, 1] -> [0, 1]\n for n_iter in range(max_iters):\n # computes gradient and loss\n\n grad = compute_gradient_log(y, tx, w)\n loss = compute_loss_log(y, tx, w)\n\n #updates w\n\n w = w - gamma * grad\n # store w and loss\n\n ws.append(w)\n losses.append(loss)\n #print(\"logistic regression: Gradient Descent({bi}/{ti}): loss={l}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]), end=\"\\r\")\n \n return w, loss", "def logistic_regression(y, tx, initial_w, max_iters, gamma, verbose=False):\n return gradient_descent(y, tx, initial_w, max_iters, gamma, \n compute_logistic_loss, compute_logistic_gradient, verbose=verbose)", "def reg_logistic_regression(y, tx, l, initial_w, max_iters, gamma):\r\n y_resize = (1+y)/2 #rescales target so that -1 values are changed to 0 \r\n w_list = [initial_w]\r\n loss_list = []\r\n w = initial_w\r\n\r\n for n_iter in range(max_iters):\r\n grad = calculate_gradient_LR(y_resize, tx, w) + 2*l*w\r\n w = w - gamma*grad\r\n loss = compute_loss_LG(y_resize, tx, w)+ l*np.linalg.norm(w)\r\n w_list.append(w)\r\n loss_list.append(loss)\r\n if (n_iter > 1) and (np.abs(loss_list[-1] - loss_list[-2]) <= 1e-8):\r\n break\r\n return w_list[-1],loss_list[-1]", "def reg_logistic_regression(y, tx, lambda_ , initial_w, max_iters, gamma):\n \n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n y = (y + 1) / 2 # [-1, 1] -> [0, 1]\n \n for n_iter in range(max_iters):\n # computes gradient and loss\n\n grad = compute_gradient_log(y, tx, w)+2*lambda_*np.linalg.norm(w)\n loss = compute_loss_log(y, tx, w)+ lambda_*(np.linalg.norm(w)**2)\n\n #updates w\n\n w = w - gamma * grad\n # store w and loss\n\n ws.append(w)\n losses.append(loss)\n #print(\"regularised logistic regression: Gradient Descent({bi}/{ti}): loss={l}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]), end=\"\\r\")\n return w, loss", "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma, verbose=False): \n reg_loss, reg_grad = add_l2_reg(compute_logistic_loss, \n compute_logistic_gradient,\n lambda_)\n \n return gradient_descent(y, tx, initial_w, max_iters, gamma, reg_loss, reg_grad)", "def logistic_regression(y, tx, initial_w, max_iters, gamma, SGD=False, batch_size=-1) :\n w_start = initial_w\n w = w_start\n loss_old = 0.0\n\n for n_iter in range(max_iters):\n loss = compute_logistic_loss(y, tx, w)\n gradient = compute_logistic_gradient(y, tx, w)\n w = w - gamma * gradient\n\n if check_stop(loss, loss_old):\n #print('break!')\n break;\n loss_old = loss\n\n return w, loss", "def grad_log(self, X):\n # \"\"\"\n # Evaluate the gradients (with respect to the input) of the log density at\n # each of the n points in X. This is the score function.\n\n # X: n x d numpy array.\n XB = np.dot(X, self.B)\n Y = 0.5*XB + self.c\n E2y = np.exp(2*Y)\n # n x dh\n Phi = old_div((E2y-1.0),(E2y+1))\n # n x dx\n T = np.dot(Phi, 0.5*self.B.T)\n S = self.b - X + T\n return S", "def gradient_model(x, I_0, a, lam):\n if np.any(np.array(x) < 0):\n raise RuntimeError('x must be positive')\n if np.any(np.array([I_0, a, lam]) < 0):\n raise RuntimeError('all params must be positive')\n return a + I_0 * np.exp(-x / lam)", "def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad", "def gradient_descent(x0,df,rate=0.1,max_iters=1000,min_step=1e-6,max_step=1e5,\n projection=None,trajectory=False,step_history=False,f=None,\n cost_history=False,feedback=False,plot_history=False):\n if feedback is True:\n print(\"gd.gradient_descent():\")\n if f is not None:\n assert callable(f)\n fx0 = f(x0)\n if feedback is True:\n print(f\" initial cost = {fx0:.2e}\")\n if projection is not None:\n assert callable(projection)\n project = True\n else:\n project = False\n if trajectory is True:\n xx = [x0.copy()]\n if step_history is True:\n steps = []\n if cost_history is True:\n assert callable(f)\n fx = [fx0]\n\n x = x0.copy()\n for i in range(max_iters):\n dx = -rate*df(x)\n if project is True:\n x0 = x.copy()\n x = projection(x0+dx)\n dx = x-x0\n else:\n x += dx\n if trajectory is True:\n xx.append(x.copy())\n if cost_history is True:\n fx += [f(x)]\n step_size = np.linalg.norm(dx)\n if step_history is True:\n steps += [step_size]\n if step_size < min_step or step_size > max_step:\n break\n\n results = dict()\n results['output'] = x\n if trajectory is True:\n results['trajectory'] = xx\n if cost_history is True:\n results['cost_history'] = fx\n if step_history is True:\n results['step_history'] = steps\n if plot_history is True:\n assert step_history is True or cost_history is True\n plt.figure()\n if step_history is True:\n plt.semilogy(steps,label='step size')\n if cost_history is True:\n plt.semilogy(fx,label='cost')\n plt.xlabel('iteration number')\n plt.title('Gradient Descent')\n plt.legend()\n results['figure'] = plt\n plt.show(block=False)\n \n if feedback is True:\n if f is not None:\n print(f\" final cost = {f(x):.2e}\")\n \n return results", "def trainLogRegres(train_x, train_y, opts):\n startTime = time.time() # calculate training time\n\n numSamples, numFeatures = np.shape(train_x)\n alpha = opts['alpha']\n maxIter = opts['maxIter']\n weights = np.ones((numFeatures, 1))\n\n for k in range(maxIter):\n if opts['optimizeType'] == 'stocGradDescent': # stochastic gradient descent\n for i in range(numSamples):\n output = sigmoid(train_x[i, :] * weights)\n loss = train_y[i, 0] - output\n weights = weights + alpha * train_x[i, :].transpose() * loss\n elif opts[\n 'optimizeType'] == 'smoothStocGradDescent': # smooth stochastic gradient descent. randomly select samples to optimize for reducing cycle fluctuations.\n dataIndex = list(range(numSamples))\n for i in range(numSamples):\n alpha = 4.0 / (1.0 + k + i) + 0.01\n randIndex = int(np.random.uniform(0, len(dataIndex)))\n output = sigmoid(train_x[randIndex, :] * weights)\n loss = train_y[randIndex, 0] - output\n weights = weights + alpha * train_x[randIndex, :].transpose() * loss\n del (dataIndex[randIndex])\n print('Congratulations, training complete! Took %fs!' % (time.time() - startTime))\n return weights", "def logistic_regression(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = gradient_descent(y, tx, initial_w, max_iters, gamma, loss_f = model_logistic.loss, grad_f = model_logistic.grad, debug = debug)\n return get_last_ans(ws, losses)", "def stochastic_grad_descent(X, y, alpha=0.1, lambda_reg=1, num_iter=1000, checkin=100):\n num_instances, num_features = X.shape[0], X.shape[1]\n theta = np.ones(num_features) #Initialize theta\n theta_hist = np.zeros((num_iter, num_instances, num_features)) #Initialize theta_hist\n loss_hist = np.zeros((num_iter, num_instances)) #Initialize loss_hist\n epoch = 1\n while epoch < num_iter:\n instance = 1\n while instance < num_instances:\n if alpha == \"1/sqrt(t)\":\n alpha_0 = .01/np.sqrt(instance)\n elif alpha == \"1/t\":\n alpha_0 = .01/float(instance)\n else:\n alpha_0 = alpha\n index = np.random.randint(num_instances)\n vec = np.reshape(X[index,:].T,(1,49))\n grad = compute_regularized_square_loss_gradient(vec,y[index],theta,lambda_reg)\n theta = theta - alpha_0*grad\n theta_hist[epoch][instance] = theta\n loss_hist[epoch][instance] = compute_square_loss(vec,y[index],theta)\n instance += 1\n\n if type(checkin) is int and epoch%checkin==0:\n print(\"completed training epoch {}...\".format(epoch))\n \n epoch += 1\n\n return theta_hist, loss_hist", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\r\n y_resize = (1+y)/2 #rescales target so that -1 values are changed to 0 \r\n w_list = [initial_w]\r\n loss_list = []\r\n w = initial_w\r\n \r\n for n_iter in range(max_iters):\r\n grad = calculate_gradient_LR(y_resize, tx, w)\r\n w = w - gamma * grad\r\n loss = compute_loss_LG(y_resize, tx, w)\r\n w_list.append(w)\r\n loss_list.append(loss)\r\n return w_list[-1],loss_list[-1]", "def gradient_ascent(f, df, theta_init, step_size, max_iter):\n\n fs = []\n xs = []\n thetas = theta_init\n for i in range(max_iter): #for each data example\n fs.append(f(thetas))\n\n temp = step_size*df(thetas)\n thetas = step_size*df(thetas) #modify that feature by using the derivative of log likelihood\n xs.append(thetas.flatten())\n if i % 10 == 0:\n print(i, thetas)\n\n return thetas, fs, xs", "def logit_cost(self, theta, X, y):\n\n cost = 0.0\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n \n for i in range(0, X.shape[0]):\n cost += (y[i]-1)*theta[i] + np.log(sig[i])\n ### END YOUR CODE\n cost = cost #+ 0.01 * self.regularizer[0](self.weights)\n return cost", "def experiment_linear_tradeoff_linf(_):\n adv_norm_type = 'linf'\n dual_norm_type = 'l1'\n # Min l1-norm solution found (norm=0.6876)\n attack_eps = 1/0.6876\n attack_step_dir = 'sign_grad'\n module_name = 'train'\n log_dir = 'runs_linear_tradeoff_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [32]\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 500),\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10),\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n params = []\n\n # reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\n reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10]\n # Between 1e-3 and 1e-1 for d/n=10 the adv robustness drops\n reg_coeff += [3e-3, 5e-3, 3e-2, 5e-2, 3e-1, 5e-1]\n\n # Model hyper-parameters\n linear_noreg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', 'none'),\n ])\n linear_reg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', ['w_%s' % dual_norm_type]),\n ('reg_coeff', reg_coeff),\n ])\n\n # Explicit regularization with line search\n # njobs=3*6*20*4*2=2880\n explicit_reg = nameit('optim', [\n ('name', 'fista'),\n ('niters', 10000),\n ('bound_step', True),\n ('step_size', [1, 10, 100, 1000]),\n ])\n params += [OrderedDict(shared_params+linear_reg_model_params+explicit_reg)]\n\n # Adversarial training with line search\n for i in [1] + list(np.arange(0.1, 2, 0.2)): # [0.1, 0.3, 0.5, 0.7, 1, 1.3]:\n adv_train_params = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 10000),\n ('bound_step', True),\n ])\n adv_train_params += nameit('optim', nameit('adv_train', [\n ('enable', True),\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10), # niters, 1000\n ('pre_normalize', True),\n ('post_normalize', True),\n ('step_dir', attack_step_dir),\n ('eps_iter', float(attack_eps) * i),\n ('eps_tot', float(attack_eps) * i),\n ]))\n params += [OrderedDict(\n shared_params+linear_noreg_model_params+adv_train_params)]\n\n return params, log_dir, module_name, exclude", "def logp_grad(self, xs, ys, fs, **kwargs):", "def reg_logistic_regression(y, tx, lambdas, initial_w, max_iters, gamma):\n w = initial_w\n for iter in range(max_iters):\n # compute gradient\n grad = reg_logistic_grad(y, tx, w, lambdas)\n # update w\n w = w - gamma * grad\n loss = reg_logistic_loss(y, tx, w, lambdas)\n return w, loss", "def logistic_regression_SGD(y, tx, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n\n # logistic regression\n for it, (yb, txb) in enumerate(random_batches(y, tx, max_iters)):\n # updating the weights\n grad = log_likelihood_gradient(np.array([yb]), txb[np.newaxis, :], w)\n w -= gamma*grad\n if it % (max_iters//10) == 0:\n print(log_likelihood_loss(y, tx, w))\n return w, log_likelihood_loss(y, tx, w)", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def compute_gradients(self, logits, target):\n\n target_length = target.shape[0]\n num_time_steps = logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n\n # expand labels by inserting a blank between each pair\n normalized_logits = softmax(logits)\n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n alpha = self.compute_forward_variables(normalized_logits, target) \n beta = self.compute_backward_variables(normalized_logits, target)\n\n # rescale\n alpha = alpha / np.sum(alpha, axis=0)\n beta = beta / np.sum(beta, axis=0)\n alphabeta = alpha * beta\n print \"alpha\"\n print alpha\n\n # compute zt\n z = Counter()\n for t in xrange(num_time_steps):\n for s, k in enumerate(l):\n z[t] += alphabeta[s, t] / normalized_logits[t, k]\n \n # normalized_logits is time steps t by labels k\n # alpha is 2 * target_length - 1 by time steps\n lab_zk = np.zeros_like(normalized_logits)\n for s, k in enumerate(l):\n for t in xrange(num_time_steps):\n lab_zk[t, k] += alphabeta[s, t]\n\n grad = normalized_logits\n for k in xrange(target.shape[0]):\n for t in xrange(num_time_steps):\n ytk = normalized_logits[t, k]\n constant = 1.0 / (ytk * z[t])\n grad[t, k] = ytk - constant * lab_zk[t, k]\n \n return grad", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n w = initial_w.copy()\n loss = compute_loss_logistic(y, tx, w)\n losses = [loss]\n ws = [w]\n for iter in range(max_iters):\n gradient = compute_gradient_logistic(y, tx, w)\n w -= gamma * gradient\n ws.append(w)\n loss = compute_loss_logistic(y, tx, w)\n if iter % int(max_iters/10) == 0:\n print(\"Current iteration={i}, loss={l}\".format(i=iter, l=loss))\n losses.append(loss)\n return losses[-1],ws[-1]", "def _stochastic_gradient_descent(self, X, y, lr, epochs, sample_rate):\n\n # Initialize the bias and weights.\n m, n = X.shape\n self.bias = 0\n self.weights = np.random.normal(size=n)\n\n n_sample = int(m * sample_rate)\n for i in range(epochs):\n for idx in choice(range(m), n_sample, replace=False):\n # Calculate the gradient delta of each sample\n grad_bias, grad_weights = self._get_gradient(X[idx], y[idx])\n\n # Update the bias and weight by gradient of current sample\n self.bias += lr * grad_bias\n self.weights += lr * grad_weights\n\n # Show the gradient of each epoch.\n grad_bias, grad_weights = self._get_gradient(X, y)\n grad = (grad_bias + grad_weights.mean()) / 2\n print(\"Epochs %d gradient %.3f\" % (i + 1, grad), flush=True)", "def logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma):\n\tw = initial_w\n\n\tfor iter in range(max_iters):\n\t\tw = learning_by_gradient_descent(y, tx, w, gamma)\n\n\treturn w", "def stochastic_grad_descent(X, y, alpha=0.01, lambda_reg=10**-2, num_epoch=1000):\n num_instances, num_features = X.shape[0], X.shape[1]\n theta = np.ones(num_features) #Initialize theta\n\n theta_hist = np.zeros((num_epoch, num_instances, num_features)) #Initialize theta_hist\n loss_hist = np.zeros((num_epoch, num_instances)) #Initialize loss_hist\n #TODO\n for i in range(num_epoch):\n shuffled_index = np.arange(X.shape[0])\n np.random.shuffle(shuffled_index)\n for step, j in enumerate(shuffled_index):\n g = compute_regularized_square_loss_gradient(X[j], y[j], theta, lambda_reg)\n theta = theta - (alpha/np.sqrt(step+1))*g\n\n # update\n avg_loss = compute_square_loss(X, y, theta)\n theta_hist[i][j] = theta\n loss_hist[i][j] = avg_loss\n\n return [theta_hist, loss_hist]", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n\n # logistic regression\n for n_iter in range(max_iters):\n # updating the weights\n grad = log_likelihood_gradient(y, tx, w)\n w -= gamma*grad\n if n_iter % (max_iters//10) == 0:\n print(log_likelihood_loss(y, tx, w))\n return w, log_likelihood_loss(y, tx, w)", "def log_reg(x_train, y_train):\n\n log_reg_classifier = LogisticRegression(max_iter=1000, solver='lbfgs')\n log_reg_classifier.fit(x_train, y_train)\n return log_reg_classifier\n\n # log_reg_classifier.fit(x_train, y_train)", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n\tif len(initial_w.shape)==2:\n\t\tinitial_w = initial_w.reshape((max(initial_w.shape)))\n\tif len(y.shape)==2:\n\t\ty = y.reshape((max(y.shape)))\n\n\tw = logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma)\n\t\n\tloss = calculate_nll(y, tx, w)\n\n\treturn w, loss", "def stochastic_gradient_descent(X, Y, epsilon=0.0001, l=1, step_size=0.01,\n max_steps=1000):\n beta = np.ones(X.shape[1])\n for s in range(max_steps):\n # TODO: Implement iterations.\n pass\n return beta", "def grad_reglog(w, X, y, **kwargs):\n p = np.exp(-y * (np.dot(X, w)))\n P = p / (1. + p)\n return -1 * np.dot(X.T, P * y) / X.shape[0]", "def gradient_descent(\n self,\n coeffs, \n x_values, y_values):\n old_loss = self.old_loss\n mse = self.loss\n\n for i in range(self.steps):\n new_loss = self.loss_mse(coeffs, x_values, y_values)\n mse = np.append(mse, new_loss)\n if abs(new_loss - old_loss) <= self.early_stop:\n print(f\"Early cut off, difference of losses between steps is less that {self.early_stop}.\")\n break\n old_loss = new_loss\n\n coeffs = coeffs - (self.learning_rate)*self.gradient_calculation(coeffs, x_values, y_values)\n\n mse = np.append(mse, self.loss_mse(coeffs, x_values, y_values))\n self.coefficients = coeffs\n self.loss = mse", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma, debug = False):\n losses, ws = gradient_descent(y, tx, initial_w, max_iters, gamma, loss_f = model_logistic.reg_loss, grad_f = model_logistic.reg_grad, kwargs = {'lambda_': lambda_}, debug = debug)\n return get_last_ans(ws, losses)", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n w = initial_w.copy()\n ws = [w]\n loss = compute_loss_logistic(y, tx, w)\n losses = [loss]\n for iter in range(max_iters):\n gradient = compute_gradient_reg_logistic(y, tx, w, lambda_)\n w -= gamma * gradient\n loss = compute_loss_logistic(y, tx, w)\n if iter % int(max_iters/10) == 0:\n print(\"Current iteration={i}, loss={l}\".format(i=iter, l=loss))\n losses.append(loss)\n ws.append(w)\n return losses[-1], ws[-1]", "def log(self, x_old, grad, x, curr_iter, tol_g_val, tol_x_val, tol_f_val):\n\n print(\"-----------------------------------\")\n print(\"\\n Iter: \", curr_iter)\n print(\"\\n x_old: \", x_old)\n print(\"\\n gradient: \", grad)\n print(\"\\n x: \", x)\n print(\"\\n tol_x_val: \", tol_x_val)\n print(\"\\n tol_f_val: \", tol_f_val)\n print(\"\\n tol_g_val: %s \\n \" % tol_g_val)", "def my_loglike(theta, x, data, sigma):\n\n model = my_model(theta, x)\n\n return -0.5*len(x)*np.log(2*math.pi*sigma**2) - (0.5/sigma**2) * np.sum((data-model)**2)", "def reg_logistic_regression_SGD(y, tx, lambda_, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n\n # regularized logistic regression\n for it, (yb, txb) in enumerate(random_batches(y, tx, max_iters)):\n # updating the weights\n grad = log_likelihood_gradient(\n np.array([yb]), txb[np.newaxis, :], w)+2*lambda_*w\n w -= gamma*grad\n # if it % (max_iters//2) == 0:\n #print(log_likelihood_loss(y, tx, w)+lambda_*np.squeeze(w.T.dot(w)))\n loss = log_likelihood_loss(y, tx, w)+lambda_*np.squeeze(w.T.dot(w))\n return w, loss", "def newton(flogl, start, fgrad, fhess, maxiter):\r\n warn = 0\r\n iteration = 0\r\n par_hat0 = start\r\n m = 1\r\n while (iteration < maxiter and m >= 1e-04):\r\n H = -la.inv(fhess(par_hat0))\r\n g = fgrad(par_hat0).reshape(start.shape)\r\n Hg = np.dot(H, g)\r\n par_hat0 = par_hat0 + Hg\r\n iteration += 1\r\n m = np.dot(g.T, Hg)\r\n if iteration == maxiter:\r\n warn = 1\r\n logl = flogl(par_hat0)\r\n return (par_hat0, logl, warn)", "def gradient_descent(f, df, x, sigma=0.5, epsilon=1e-8):\n pass", "def stochastic_gradient_descent(\n y, tx, initial_w, batch_size, max_epochs, gamma):\n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n for n_iter in range(max_epochs):\n batch_iterator = batch_iter(y, tx, batch_size)\n batch_y, batch_tx = next(batch_iterator)\n loss = compute_cost(batch_y, batch_tx, w)\n gradient = compute_stoch_gradient(batch_y, batch_tx, w)\n\n w = w - gamma*gradient\n print(\"Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}\".format(\n bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]))\n # store w and loss\n ws.append(w)\n losses.append(loss)\n return losses, ws", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma, SGD=False, batch_size=-1):\n w_start = initial_w\n w = w_start\n loss_old = 0.0\n\n if SGD:\n if(batch_size==-1): # compute automatically the maximum batch size\n batch_size = int(y.shape[0]/max_iters)\n for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size, max_iters):\n loss = compute_loss(minibatch_y, minibatch_tx, w)\n gradient = compute_gradient(minibatch_y, minibatch_tx, w)\n loss_reg, gradient_reg = regularizer(lambda_, w)\n loss = loss + loss_reg\n gradient = gradient + gradient_reg\n w = w - gamma * gradient\n\n if check_stop(loss, loss_old):\n #print('break!')\n break;\n loss_old = loss\n return w, loss\n\n else:\n for n_iter in range(max_iters):\n loss = compute_logistic_loss(y, tx, w)\n gradient = compute_logistic_gradient(y, tx, w)\n loss_reg, gradient_reg = regularizer(lambda_, w)\n loss_new = loss + loss_reg\n gradient = gradient + gradient_reg\n w = w - gamma * gradient\n\n if check_stop(loss, loss_old):\n #print('break!')\n break;\n loss_old = loss\n return w, loss", "def log_likelihood_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-y)", "def logistic_regression_vec(theta, trainX, trainY):\n # Add column of ones for bias\n trainX = np.hstack((np.ones((trainX.shape[0], 1)), trainX))\n h = sigmoid(np.inner(trainX, theta))\n # np.log(1-h) can lead to problems for h = 1.0\n h = np.where(h == 1.0, 1 - 1e-12, h)\n fval = -(trainY * np.log(h) + (1 - trainY) * np.log(1 - h)).sum()\n error = h - trainY\n # Negative gradient for a minimization, must be flattened for np.minimize\n grad = np.dot(trainX.T, error).flatten()\n return fval, grad", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def estimate_logreg(x,y,N_its,learning_rate=1e-4,regularizer=1e-2,lazy_reg=True):\n weights = defaultdict(float)\n weight_hist = [] #keep a history of the weights after each iteration\n all_labels = set(y)\n \n # this block is for lazy regularization\n ratereg = learning_rate * regularizer\n def regularize(base_feats):\n for base_feat in base_feats:\n for label in all_labels:\n #print \"regularizing\",(label,base_feat),t,last_update[base_feat],(1. - ratereg) ** (t-last_update[base_feat])\n weights[(label,base_feat)] *= (1. - ratereg) ** (t-last_update[base_feat])\n last_update[base_feat] = t\n\n t = 0\n last_update = defaultdict(int)\n\n eeta = learning_rate\n\n for it in xrange(N_its):\n\n for i,(x_i,y_i) in enumerate(zip(x,y)): #keep\n t += 1\n\n # regularization\n if lazy_reg: # lazy regularization is essential for speed\n regularize(x_i) # only regularize features in this instance\n if not lazy_reg: # for testing/explanatory purposes only\n for feat,weight in weights.iteritems():\n if feat[1] is not OFFSET: # usually don't regularize offset\n weights[feat] -= ratereg * weight\n\n p_y = compute_py(x_i,weights,all_labels) #hint\n\n term2 = make_feature_vector(x_i, y_i)\n\n for key in term2.keys():\n weights[key] = weights[key] + (term2[key]*eeta)\n\n for label in all_labels:\n temp = make_feature_vector(x_i, label)\n for key in temp.keys():\n weights[key] = weights[key] - (temp[key]*eeta*p_y[label])\n\n\n print it,\n weight_hist.append(weights.copy()) \n\n # if lazy, let regularizer catch up\n if lazy_reg:\n # iterate over base features\n regularize(list(set([f[1] for f in weights.keys() if f[1] is not OFFSET])))\n\n return weights,weight_hist", "def reg_logistic_regression_newton(y, tx, lambda_, initial_w, max_iters, gamma, debug = False):\n losses, ws = gradient_descent(y, tx, initial_w, max_iters, gamma, loss_f = model_logistic.reg_loss, grad_f = model_logistic.newton_reg_grad, kwargs = {'lambda_': lambda_}, debug = debug)\n return get_last_ans(ws, losses)", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n\n # regularized logistic regression\n for iter in range(max_iters):\n # updating the weights\n grad = log_likelihood_gradient(y, tx, w)+2*lambda_*w\n # if iter % (max_iters//2) == 0:\n #print(log_likelihood_loss(y, tx, w)+lambda_*np.squeeze(w.T.dot(w)))\n w -= gamma*grad\n loss = log_likelihood_loss(y, tx, w)+lambda_*np.squeeze(w.T.dot(w))\n return w, loss", "def logistic_regression_SGD(y, tx, initial_w, max_iters, gamma, batch_size) :\n\n w_start = initial_w\n w = w_start\n\n for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size, max_iters):\n loss = compute_loss(minibatch_y, minibatch_tx, w)\n gradients = compute_gradient(minibatch_y, minibatch_tx, w)\n\n w = w - [gamma * g for g in gradients]\n\n return w, loss", "def linreg_stochastic_grad(X, y, alpha=.01):\n m = X.shape[0]\n n = X.shape[1]\n theta = np.zeros(n)\n for i in range(m):\n delta = alpha * (np.dot(theta.transpose(), X[i,:]) -y[i]) * X[i,:]\n theta = theta - delta\n return theta", "def MvNormalLogp():\n cov = pt.matrix(\"cov\")\n cov.tag.test_value = floatX(np.eye(3))\n delta = pt.matrix(\"delta\")\n delta.tag.test_value = floatX(np.zeros((2, 3)))\n\n cholesky = Cholesky(lower=True, on_error=\"nan\")\n\n n, k = delta.shape\n n, k = f(n), f(k)\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n result = n * k * pt.log(f(2) * np.pi)\n result += f(2) * n * pt.sum(pt.log(diag))\n result += (delta_trans ** f(2)).sum()\n result = f(-0.5) * result\n logp = pt.switch(ok, result, -np.inf)\n\n def dlogp(inputs, gradients):\n (g_logp,) = gradients\n cov, delta = inputs\n\n g_logp.tag.test_value = floatX(1.0)\n n, k = delta.shape\n\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n inner = n * pt.eye(k) - pt.dot(delta_trans.T, delta_trans)\n g_cov = solve_upper(chol_cov.T, inner)\n g_cov = solve_upper(chol_cov.T, g_cov.T)\n\n tau_delta = solve_upper(chol_cov.T, delta_trans.T)\n g_delta = tau_delta.T\n\n g_cov = pt.switch(ok, g_cov, -np.nan)\n g_delta = pt.switch(ok, g_delta, -np.nan)\n\n return [-0.5 * g_cov * g_logp, -g_delta * g_logp]\n\n return OpFromGraph([cov, delta], [logp], grad_overrides=dlogp, inline=True)", "def gradientdescent(cost_func, theta, args=(), delta_func = 0):\n step = 1\n old_cost = 0\n while True:\n theta_old = theta.copy()\n cost = cost_func(theta, *args)\n delta = delta_func(theta, *args)\n theta = theta - step * delta\n if cost > old_cost and old_cost != 0:\n step = step*0.7\n if np.allclose(theta_old, theta):\n break\n old_cost = cost\n return theta", "def log_likelihood(self) -> tf.Tensor:\n # K⁻¹ + GᵀΣ⁻¹G = LLᵀ.\n l_post = self._k_inv_post.cholesky\n num_data = self.observations_index.shape[0]\n\n # Hμ [..., num_transitions + 1, output_dim]\n marginal = self.emission.project_state_to_f(self.prior_ssm.marginal_means)\n marginal = self._drop_batch_shape(marginal)\n\n # y = obs - Hμ [..., num_transitions + 1, output_dim]\n disp = self.observations - marginal\n disp_data = self.sparse_observations - self.dense_to_sparse(marginal)\n\n # cst is the constant term for a gaussian log likelihood\n cst = (\n -0.5 * np.log(2 * np.pi) * tf.cast(self.emission.output_dim * num_data, default_float())\n )\n\n term1 = -0.5 * tf.reduce_sum(\n input_tensor=tf.einsum(\"...op,...p,...o->...o\", self._r_inv_data, disp_data, disp_data), axis=[-1, -2]\n )\n\n # term 2 is: ½|L⁻¹(GᵀΣ⁻¹)y|²\n # (GᵀΣ⁻¹)y [..., num_transitions + 1, state_dim]\n obs_proj = self._back_project_y_to_state(disp)\n\n # ½|L⁻¹(GᵀΣ⁻¹)y|² [...]\n term2 = 0.5 * tf.reduce_sum(\n input_tensor=tf.square(l_post.solve(obs_proj, transpose_left=False)), axis=[-1, -2]\n )\n\n ## term 3 is: ½log |K⁻¹| - log |L| + ½ log |Σ⁻¹|\n # where log |Σ⁻¹| = num_data * log|R⁻¹|\n term3 = (\n 0.5 * self.prior_ssm.log_det_precision()\n - l_post.abs_log_det()\n + 0.5 * self._log_det_observation_precision\n )\n\n return tf.reduce_sum(cst + term1 + term2 + term3)", "def grad_log(self, X):\n g = autograd.elementwise_grad(self.log_den)\n G = g(X)\n return G", "def loglf2py(store):\n loglike=0.0\n return loglinear.logl(store['xb'],store['xmatf'], store['beta'],store['yvec'],loglike)", "def update(self, returns, log_probs):\n policy_gradient = []\n for log_prob, Gt in zip(log_probs, returns):\n policy_gradient.append(-log_prob * Gt)\n\n loss = torch.stack(policy_gradient).sum()\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def logistic_grad(z):\n idx_pos = np.where(z >= 0.)\n idx_neg = np.where(z < 0.)\n res = np.empty(z.shape)\n res[idx_pos] = 1. / (1. + np.exp(-z[idx_pos]))\n res[idx_neg] = 1 - 1. / (1. + np.exp(z[idx_neg]))\n return res", "def logistic_grad(z):\n idx_pos = np.where(z >= 0.)\n idx_neg = np.where(z < 0.)\n res = np.empty(z.shape)\n res[idx_pos] = 1. / (1. + np.exp(-z[idx_pos]))\n res[idx_neg] = 1 - 1. / (1. + np.exp(z[idx_neg]))\n return res", "def reg_logistic_regression_newton_batch(y, tx, lambda_, initial_w, batch_size, max_iters, gamma, debug = False):\n losses, ws = stochastic_gradient_descent(y, tx, initial_w, batch_size, max_iters, gamma, loss_f = model_logistic.reg_loss, grad_f = model_logistic.newton_reg_grad, kwargs = {'lambda_': lambda_}, debug = debug)\n return get_last_ans(ws, losses)", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma, batch_size=10, verbose=False):\n return stochastic_gradient_descent(y, tx, initial_w, max_iters, gamma, compute_mse, \n compute_mse_gradient, batch_size=batch_size, verbose=verbose)", "def costFunction(theta,X,y):\n m = X.shape[0]\n J = 0\n h = sigmoid (np.dot(X,theta))\n \n J = (1/m)* ((-np.dot(y.T,(np.log(h)))) - np.dot((1 - y).T,(np.log(1-h))))\n \n #grad = (1/m) * np.dot(X.T,(h-y))\n grad = (1/m) * np.dot((h.T - y), X).T\n \n return J, grad", "def step_maxL_gradient_descent(y, tx, w, gamma):\n loss=loss_maxL(y, tx, w)\n grad=calculate_maxL_gradient(y,tx,w)\n # update w by gradient\n w=w-gamma*grad\n return w, loss", "def logistic_regression(y, tx, initial_w=None, max_iters=100, gamma=0.009, batch_size=1):\n # init parameters\n if np.all(initial_w == None): initial_w = np.zeros(tx.shape[1])\n threshold = 1e-8\n losses = []\n y = (1 + y) / 2\n # build tx\n w = initial_w\n\n # start the logistic regression\n for i in range(max_iters):\n # get loss and update w.\n for y_batch, tx_batch in batch_iter(y, tx, batch_size=batch_size, num_batches=1):\n w, _ = learning_by_gradient_descent(y_batch, tx_batch, w, gamma)\n # converge criterion\n losses.append(calculate_loss(y,tx,w))\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:\n break\n #if i % int(max_iters/5) == 0:\n #print(losses[-1],i,'/{tot}'.format(tot=max_iters))\n\n return w,losses[-1]", "def logistic_loss(x, y):\n N = x.shape[0]\n x = np.squeeze(x)\n y_prime = (y + 1)/2\n h = 1 /(1 + np.exp(-x))\n loss = np.sum(-np.log( (h**y_prime) * ((1-h)**(1-y_prime)) ))/N\n dx = np.exp(-y*x)*(-y)/(1+np.exp(-y*x))/N\n return loss, dx", "def reg_logistic_regression(y, tx, lambda_, initial_w=None, max_iters=100, gamma=0.009, batch_size=1):\n # init parameters\n if np.all(initial_w == None): initial_w = np.zeros(tx.shape[1])\n threshold = 1e-8\n losses = []\n y = (1 + y) / 2\n # build tx\n w = initial_w\n\n # start the logistic regression\n for iter in range(max_iters):\n # get loss and update w.\n for y_batch, tx_batch in batch_iter(y, tx, batch_size=batch_size, num_batches=1):\n w, loss = learning_by_penalized_gradient_descent(y_batch, tx_batch, w, gamma, lambda_)\n # converge criterion\n loss = calculate_loss(y, tx, w) + lambda_ * np.squeeze(w.T.dot(w))\n losses.append(loss)\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:\n break\n #if iter % int(max_iters/5) == 0:\n #print(losses[-1],iter,'/{tot}'.format(tot=max_iters))\n\n return w,losses[-1]", "def ff_train(\n self, input_tensor: torch.Tensor, signs: torch.Tensor, theta: float\n ):\n # upgrade optimizer for positive goodness\n y = self(input_tensor.detach())\n y_pos = y[torch.where(signs == 1)]\n y_neg = y[torch.where(signs == -1)]\n # y_pos = self(input_tensor.detach()[torch.where(signs == 1)])\n loss_pos, cumulated_logits_pos = self.loss_fn(y_pos, theta, sign=1)\n # self.optimizer.zero_grad()\n # loss_pos.backward()\n # print(loss_pos.item())\n # self.optimizer.step()\n # y_neg = self(input_tensor.detach()[torch.where(signs == -1)])\n loss_neg, cumulated_logits_neg = self.loss_fn(y_neg, theta, sign=-1)\n self.optimizer.zero_grad()\n loss = loss_pos + loss_neg\n loss.backward()\n self.optimizer.step()\n separation = [cumulated_logits_pos, cumulated_logits_neg]\n y = torch.zeros(\n input_tensor.shape[0], *y_pos.shape[1:], device=input_tensor.device\n )\n y[torch.where(signs == 1)] = y_pos\n y[torch.where(signs == -1)] = y_neg\n return y.detach(), separation", "def logistic(weights, data, targets, hyperparameters):\n y = logistic_predict(weights, data)\n\n #####################################################################\n # TODO: #\n # Given weights and data, return the averaged loss over all data #\n # points, gradient of parameters, and the probabilities given by #\n # logistic regression. #\n #####################################################################\n f = None\n df = None\n\n f = evaluate(targets, y)[0]\n\n N = len(data)\n M = len(weights) - 1 \n temp = np.ones([N, M + 1])\n temp[: N, : M] = np.array(data)\n\n\n df = np.zeros([M+1, 1])\n\n df[:, 0] = np.array([[np.mean([(y.flatten()[i] - targets.flatten()[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n # df = np.matrix([[np.mean([(y[i] - targets[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n #####################################################################\n # END OF YOUR CODE #\n #####################################################################\n return f, df, y", "def optimize_log(p0, data, model_func, pts, lower_bound=None, upper_bound=None,\n verbose=0, flush_delay=0.5, epsilon=1e-3, \n gtol=1e-5, multinom=True, maxiter=None, full_output=False,\n func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,\n output_file=None,nmarginals=1):\n if output_file:\n output_stream = file(output_file, 'w')\n else:\n output_stream = sys.stdout\n #print \"in opt,\"\n #print data.shape\n args = (data, model_func, pts, lower_bound, upper_bound, verbose,\n multinom, flush_delay, func_args, func_kwargs, fixed_params, \n ll_scale, output_stream)\n if nmarginals==1:\n \tobject_fun=dadi.Inference._object_func_log\n else:\n \tobject_fun=_object_func_marginals_log\n\n\n p0 = dadi.Inference._project_params_down(p0, fixed_params)\n outputs = scipy.optimize.fmin_bfgs(object_fun, \n numpy.log(p0), epsilon=epsilon,\n args = args, gtol=gtol, \n full_output=True,\n disp=False,\n maxiter=maxiter)\n xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag = outputs\n xopt = dadi.Inference._project_params_up(numpy.exp(xopt), fixed_params)\n\n if output_file:\n output_stream.close()\n\n if not full_output:\n return xopt\n else:\n return xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag", "def _loss_gradient(x0, x1, b, w, lam, weights=None):\n nvars = len(w)\n\n # initialize + regularization term\n loss = 0.5 * lam * np.sum(w ** 2)\n gradient = np.zeros(nvars + 1) # first position is b\n gradient[1:] = lam * w\n\n # we need prediction for x\n pred_x_0_1 = [LogisticRegression._sigmoid(x0, b, w), LogisticRegression._sigmoid(x1, b, w)]\n\n # the log likelihood\n log_like_x_0_1 = [np.log(1.0 - pred_x_0_1[0]),\n np.log(pred_x_0_1[1])]\n\n # also need the error for gradient.\n error = [pred_x_0_1[0],\n pred_x_0_1[1] - 1]\n\n if weights is None:\n loss += -np.sum(log_like_x_0_1[1]) - np.sum(log_like_x_0_1[0])\n gradient[0] += np.sum(error[0]) + np.sum(error[1]) # * 1 for bias term \n for k in range(nvars):\n gradient[k + 1] += np.sum(error[0] * x0[:, k]) + np.sum(error[1] * x1[:, k])\n else:\n loss += -np.sum(weights[1] * log_like_x_0_1[1]) - np.sum(weights[0] * log_like_x_0_1[0])\n gradient[0] += np.sum(error[0] * weights[0]) + np.sum(error[1] * weights[1])\n for k in range(nvars):\n gradient[k + 1] += ( np.sum(weights[0] * error[0] * x0[:, k]) +\n np.sum(weights[1] * error[1] * x1[:, k]) )\n return loss, gradient", "def gradient_model (self, x, initial_weights = None, \\\n step_size = 5.0e-6, tol = 2.5e+7, n_iters = 501, l2 = 0):\n # setup initial intercept, slope, iter number and rss\n if initial_weights is None:\n weights = self.initial_weight\n else:\n weights = initial_weights\n # Compute indicator value for (y_i = +1)\n indicators = np.array([int (i) for i in (self.train_output_y==1)])\n for itr in range(n_iters):\n # Predict P(y_i = +1|x_1,w) using your predict_probability() function\n _, pred_probs = self.predict_probability(self.train_feature_x, weights)\n \n # Compute the errors as indicator - predictions\n errors = indicators - pred_probs\n\n #Update the weights:\n derivative = self.feature_derivative(errors, weights, l2)\n weights = weights + derivative * (step_size) \n \n #check if converged\n #todo\n \"\"\"\n # Checking whether log likelihood is increasing\n if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \\\n or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:\n lp = self.compute_log_likelihood(indicators,weights)\n print 'iteration %*d: log likelihood of observed labels = %.8f' % \\\n (int(np.ceil(np.log10(n_iters))), itr, lp)\n \"\"\"\n \n #check weights\n #print \"\\n\"\n #print \"The weights for features: \", weights\n #final prediction\n preds = self.prediction(x, weights)\n return preds, weights", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = stochastic_gradient_descent(y, tx, initial_w, 1, max_iters, gamma, loss_f = model_linear.compute_loss, grad_f = model_linear.compute_gradient, debug = debug)\n return get_last_ans(ws, losses)", "def cost_grad_log_reg(w, b, X, y, Multicalss=False):\n if not len(X.shape) == 2:\n X_flattened = X.reshape(X.shape[1] * X.shape[2], -1).T\n else:\n X_flattened = X\n m = X_flattened.shape[1]\n print(m)\n if Multicalss:\n # Multi-class\n\n y_train_reshaped = y.reshape(len(y), 1)\n ohe = OneHotEncoder(categories='auto')\n y_train_reshaped = ohe.fit_transform(y_train_reshaped).toarray()\n print(y_train_reshaped.shape)\n A = softmax(np.dot(X_flattened, w) + b)\n print(A.shape)\n xentropy = -np.sum(y_train_reshaped * np.log(A))\n cost = np.mean(-1 / m * np.sum(y_train_reshaped * np.log(A) + (1 - y_train_reshaped) * np.log(1 - A), axis=1,\n keepdims=True))\n\n dw = 1 / m * np.dot(X_flattened.T, (A - y_train_reshaped))\n db = 1 / m * np.sum(A - y_train_reshaped)\n else:\n # Binary\n A = sigmoid(np.dot(w.T, X_flattened) + b)\n cost = -1 / m * np.sum(y * np.log(A) + (1 - y) * np.log(1 - A), axis=1, keepdims=True)\n\n dw = 1 / m * np.dot(X_flattened, (A - y).T)\n db = 1 / m * np.sum(A - y)\n\n # grads/derivatives\n cost = np.squeeze(cost)\n\n return dw, db, cost", "def logistic_pen(weights, data, targets, hyperparameters):\n\n wr = hyperparameters['weight_regularization']\n \n t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))\n f_e = data * t\n z_sums = np.sum(f_e, axis=1)\n y = sigmoid(z_sums +weights[-1])\n f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))\n df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)\n df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))\n\n f += np.dot(weights[:-1].transpose()[0], weights[:-1].transpose()[0]) * wr / 2\n df = np.reshape(df, ((len(df), 1)))\n df += np.reshape(np.append(weights[:-1] * wr, 0), (len(weights), 1))\n\n f += (weights[-1, 0] ** 2) * wr / 2\n df[-1] += weights[-1,0] * wr \n\n return f, df, np.reshape(y, (len(y), 1))", "def train_logistic_regression(x_train, y_train, learning_rate, fit_intercept=False, max_iter=500):\r\n if fit_intercept:\r\n intercept = np.ones(x_train.shape[0], 1)\r\n x_train = np.hstack((intercept, x_train)) # hstacks merges 2 arrays column wise\r\n weights = np.zeros(x_train.shape[1])\r\n for iteration in range(max_iter):\r\n weights = update_weights(x_train, y_train, weights, learning_rate)\r\n # printing cost for every 100 iterations\r\n if iteration % 100 == 0:\r\n print(calculate_cost(x_train, y_train, weights))\r\n return weights", "def test_gradient_step(var_f, len_f, var_y, N):\n\n x, y = build_data(N)\n\n gp_model = initialise_gp_model(var_f, len_f, var_y, x, y)\n markovgp_model = initialise_markovgp_model(var_f, len_f, var_y, x, y)\n\n gv = objax.GradValues(gp_model.energy, gp_model.vars())\n gv_markov = objax.GradValues(markovgp_model.energy, markovgp_model.vars())\n\n lr_adam = 0.1\n lr_newton = 1.\n opt = objax.optimizer.Adam(gp_model.vars())\n opt_markov = objax.optimizer.Adam(markovgp_model.vars())\n\n gp_model.update_posterior()\n gp_grads, gp_value = gv()\n gp_loss_ = gp_value[0]\n opt(lr_adam, gp_grads)\n gp_hypers = np.array([gp_model.kernel.lengthscale, gp_model.kernel.variance, gp_model.likelihood.variance])\n print(gp_hypers)\n print(gp_grads)\n\n markovgp_model.update_posterior()\n markovgp_grads, markovgp_value = gv_markov()\n markovgp_loss_ = markovgp_value[0]\n opt_markov(lr_adam, markovgp_grads)\n markovgp_hypers = np.array([markovgp_model.kernel.lengthscale, markovgp_model.kernel.variance,\n markovgp_model.likelihood.variance])\n print(markovgp_hypers)\n print(markovgp_grads)\n\n np.testing.assert_allclose(gp_grads[0], markovgp_grads[0], rtol=1e-4)\n np.testing.assert_allclose(gp_grads[1], markovgp_grads[1], rtol=1e-4)\n np.testing.assert_allclose(gp_grads[2], markovgp_grads[2], rtol=1e-4)", "def _forward_log_det_jacobian(self, x):\n d = self._compute_shared(x=x)\n relx = (x - d.x_k) / d.w_k\n relx = relx # tf.where(d.out_of_bounds, 0.5*tf.ones_like(x), relx)\n grad = (\n 2 * tf.math.log(d.s_k) +\n tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln\n d.d_k * (1 - relx)**2) -\n 2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *\n (1 - relx) + d.s_k))\n return grad # tf.where(d.out_of_bounds, tf.zeros_like(grad), grad)", "def logistic_loss(x, y):\n N = x.shape[0]\n x_flat = np.squeeze(x)\n ex = np.exp(x_flat)\n loss = np.sum(-y*x_flat+np.log(1+ex))/N\n dx = (-y+ex/(1+ex))/N\n # dx = np.reshape(dx,(len(dx),1))\n return loss, dx", "def gradient_descent(data_x, data_y, parameters, learn_rate, nb_iterations):\n\n # Cost history\n cost_tracking = np.zeros(nb_iterations)\n\n for _i in range(nb_iterations):\n parameters -= learn_rate * gradient(data_x, data_y, parameters)\n # recording the cost for each iteration\n cost_tracking[_i] = cost_function(data_x, data_y, parameters)\n\n return parameters, cost_tracking", "def compute_gradient_logreg(y, tx, w):\n assert len(set(y).difference({0., 1.})) == 0, \"Class labels must be encoded as {0, 1}\"\n\n s = sigmoid(tx.dot(w)) - y\n grad = tx.T.dot(s)\n\n return grad", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def logistic_regression(X, Y):\n m, n = X.shape\n theta = np.zeros(n)\n learning_rate = 10\n\n i = 0\n while True:\n i += 1\n prev_theta = theta\n grad = calc_grad(X, Y, theta)\n theta = theta - learning_rate * grad\n if i % 10000 == 0:\n print('Finished %d iterations' % i)\n # plot decision boundary for the ith iteration listed in i_lst\n i_lst = [1, 2, 3, 10, 100, 200, 500, 1000, 10000, 30370, 40000, 50000]\n if i in i_lst:\n save_path = \"output/p01_b_a\" + str(i) + \".png\"\n plot(X, Y, theta, save_path)\n if np.linalg.norm(prev_theta - theta) < 1e-15:\n print('Converged in %d iterations' % i)\n break\n return", "def learn(self, Xtrain, ytrain):\n pass\n self.weights = np.zeros(Xtrain.shape[1],)\n\n ### YOUR CODE HERE\n \n lmbd = self.params['lamb']\n \n numsamples = Xtrain.shape[0]\n # Xless = Xtrain[:,self.params['features']]\n Xless = Xtrain\n self.weights = np.random.rand(Xless.shape[1])\n err = 10000;\n #cw =0;\n tolerance = 10*np.exp(-4)\n i=0;\n \n \n w1 = self.weights\n # cw_v =(np.dot(Xless, self.weights)-ytrain)\n #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, self.weights.T)\n cw = self.logit_cost(cw_v, Xless, ytrain) + lmbd * self.regularizer[0](self.weights)\n # print(cw)\n errors = []\n runtm = []\n epch = []\n \n err = 1\n iteration= 1000\n #tm= time.time()\n while (abs(cw-err)>tolerance) and (i <iteration):\n err = cw\n g = self.logit_cost_grad(cw_v, Xless, ytrain)\n obj = cw\n j=0\n ita = -1* self.params['stepsize']\n w = self.weights\n # w1 = np.add(w,np.dot(ita,g))\n while(j<iteration):\n w1 = np.add(w,np.dot(ita,g))\n # cw_v =(np.dot(Xless, w1)-ytrain)\n # cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, w1.T)\n cw = self.logit_cost(cw_v, Xless, ytrain)+lmbd * self.regularizer[0](w1)\n ## print (cw)\n \n if(cw<np.absolute(obj-tolerance)): ############################################\n break\n ita = 0.7*ita\n j=j+1\n \n if(j==iteration):\n self.weights=w\n ita =0\n else:\n self.weights = w1\n \n # cw_v =(np.dot(Xless, self.weights)-ytrain)\n #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, self.weights.T)\n cw = self.logit_cost(cw_v, Xless, ytrain)\n #tm1 = time.time()-tm\n #runtm.append(tm1)\n #err = cw\n errors.append(err)\n i=i+1\n epch.append(i)", "def fgsm(model, x, y, epsilon=0.1, label_leaking=True):\n delta = torch.zeros_like(x, requires_grad=True)\n logits = model(x + delta)\n # Use the model's output instead of the true labels to avoid label leaking at training time.\n if not label_leaking:\n y = logits.max(dim=1)[1]\n loss = nn.CrossEntropyLoss()(logits, y)\n loss.backward()\n return epsilon * delta.grad.detach().sign()", "def least_squares_GD(y, tx, initial_w, max_iters, gamma, verbose=False):\n return gradient_descent(y, tx, initial_w, max_iters, gamma, compute_mse, \n compute_mse_gradient, verbose=verbose)", "def least_squares_GD(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = gradient_descent(y, tx, initial_w, max_iters, gamma, loss_f = model_linear.compute_loss, grad_f = model_linear.compute_gradient, debug = debug)\n return get_last_ans(ws, losses)", "def reg_logistic_regression_SGD(y, tx, lambda_, initial_w, max_iters, gamma):\n w_start = initial_w\n w = w_start\n\n for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size, max_iters):\n loss = compute_loss(minibatch_y, minibatch_tx, w)\n gradients = compute_gradient(minibatch_y, minibatch_tx, w)\n loss_reg, gradient_reg = regularizer(lambda_, w)\n\n loss = loss + loss_reg\n gradient = gradient + gradient_reg\n\n w = w - [gamma * g for g in gradients]\n\n return w, loss", "def reg_logistic_regression_batch(y, tx, lambda_, initial_w, batch_size, max_iters, gamma, debug = False):\n losses, ws = stochastic_gradient_descent(y, tx, initial_w, batch_size, max_iters, gamma, loss_f = model_logistic.reg_loss, grad_f = model_logistic.reg_grad, kwargs = {'lambda_': lambda_}, debug = debug)\n return get_last_ans(ws, losses)", "def train(self, loss_function='logistic',\n lr=0.1, decay=0.5, max_iters=3000, batch_size=128, **kwargs):\n return super(LogisticRegression, self).train('sgd', loss_function,\n lr=lr,\n decay=decay, max_iters=max_iters,\n batch_size=batch_size, **kwargs)", "def costFunction(self,theta, X, y): \n m = len(y)\n h = self.sigmoid(X@theta)\n J = 1 / m * (- y.T @ self.log(h) - (1-y).T @ self.log(1-h)) \n # grad = 1/ m * X.T @ (h - y)\n return J", "def cost(h, y):\n\tm = y.shape[0]\n\tcost = (-1/m) * (y.T @ np.log(h) + (1 - y).T @ np.log(1 - h))\n\treturn cost", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r", "def statePosteriors(log_alpha, log_beta):\n return log_alpha + log_beta - logsumexp(log_alpha[-1,:])", "def logistic_sgd(X, z, num_epochs):\n num_pts, num_vars = X.shape\n\n # Initial (random) estimate of params.\n mean = 0\n sigma = 1 / np.sqrt(num_vars / 2)\n params = np.random.normal(mean, sigma, num_vars) # this is w(0)\n\n # Loop over epochs\n for ep in range(1, num_epochs+1):\n # Permute the data rows\n permutation = np.random.permutation(num_pts)\n X = X[permutation]\n z = z[permutation]\n # Iterate over the points\n for i in range(num_pts):\n # Fill in here\n x_i = X[i,:] # all vars for a given point\n z_i = z[i] #prediction for ith z\n z_hat_i = logistic(np.dot(x_i,params))\n nu = 1/ep\n gradient = 2*(z_hat_i-z_i)*z_hat_i*(1-z_hat_i)*x_i\n params = params - nu*gradient\n return params", "def linearReg(x,y):\n X=np.array(x).reshape(-1,1)\n Y=np.array(y).reshape(-1,1)\n x_shape = X.shape\n num_var = x_shape[1] \n yintercept = 0\n slope = 0\n progress = []\n #intialize the parameter\n weight_matrix = np.random.normal(-1,1,(num_var,1))\n yintercept = np.random.rand(1)\n #cost minmization\n for i in range(200):\n dcostdm = np.sum(np.multiply(((np.matmul(X,weight_matrix)+ yintercept)-Y),X))*2/x_shape[0] #w.r.t to the weight\n dcostdc = np.sum(((np.matmul(X,weight_matrix)+yintercept)-Y))*2/x_shape[0] #partial derivative of cost w.r.t the intercept\n weight_matrix -= 0.1*dcostdm \n #updating the weights with the calculated gradients\n yintercept -= 0.1*dcostdc #updating the weights with the calculated gradients\n progress.append(np.array((weight_matrix,yintercept)))\n slope = weight_matrix\n return (slope[-1],yintercept)", "def _gradient_descent(self, X, y, epochs, learning_rate, batch_size):\n num_feats = X.shape[1]\n num_samples = X.shape[0]\n\n y = y.reshape(num_samples, 1)\n W = np.random.rand(num_feats, 1)\n training_loss_epochs = []\n\n for ix in range(epochs):\n shuffled_ix = (np.arange(0, len(X)))\n np.random.shuffle(shuffled_ix)\n X = X[shuffled_ix, :]\n y = y[shuffled_ix, :]\n\n for batch_ix in np.arange(0, X.shape[0], batch_size):\n dW = self._compute_gradient(W, X[batch_ix:batch_ix + batch_size], y[batch_ix:batch_ix + batch_size])\n W -= learning_rate * dW\n\n if ix % 10 == 0:\n y_pred = np.dot(X, W)\n training_loss = self.mse(y, y_pred)\n print('epoch {0} : training loss {1}'.format(ix, training_loss))\n training_loss_epochs.append(training_loss[0])\n\n self.weights = W\n self.training_loss = training_loss_epochs\n return None", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w_new = w - gamma*grad\n #grad is for debugging purpose\n return loss, w_new,grad", "def stochastic_gradient_descent(y, tx, initial_w, batch_size, max_iters, gamma):\n\n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n\n for n_iter in range(max_iters):\n\n # ***************************************************\n # INSERT YOUR CODE HERE\n # TODO: implement stochastic gradient descent.\n # ***************************************************\n raise NotImplementedError\n\n print(\"SGD iter. {bi}/{ti}: loss={l}, w0={w0}, w1={w1}\".format(\n bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]))\n return losses, ws", "def run_gradient_descent(data,theta,alpha,num_iters):\n population = data[:,0]\n prices = data[:,1]\n x = ones(shape=(len(population),2)) #add ones for theta0 \n x[:,1] = population\n x = transpose(x)\n error_history = zeros(shape=(num_iters,1))\n \n for i in range(num_iters):\n predictions = theta.dot(x)\n errors_x1 = (predictions - prices) * x[0,:]\n errors_x2 = (predictions - prices) * x[1,:]\n theta[0][0] = theta[0][0] - alpha*(1.0/len(population))*errors_x1.sum()\n theta[0][1] = theta[0][1] - alpha*(1.0/len(population))*errors_x2.sum()\n error_history[i,0] = calculate_cost(theta,data)\n \n return theta, error_history" ]
[ "0.7094588", "0.6940799", "0.69093955", "0.6795237", "0.6782252", "0.6769422", "0.67630374", "0.67446244", "0.6688038", "0.66774327", "0.66749567", "0.66108364", "0.66103506", "0.6609761", "0.65868056", "0.6584251", "0.6573774", "0.6572846", "0.6521604", "0.6511357", "0.6498033", "0.64804876", "0.6479808", "0.647569", "0.64743227", "0.64415276", "0.64314324", "0.6412206", "0.6411802", "0.63898313", "0.6366496", "0.6349593", "0.6348001", "0.63464904", "0.6342103", "0.6337736", "0.6336454", "0.63324946", "0.6328737", "0.6326641", "0.6321718", "0.63070965", "0.63012177", "0.62980837", "0.6268599", "0.6245073", "0.62385833", "0.6230452", "0.62296945", "0.62259156", "0.62255836", "0.62241435", "0.6221552", "0.6220402", "0.62183964", "0.6212204", "0.6179515", "0.61625135", "0.6136902", "0.6133691", "0.6133691", "0.6131569", "0.612267", "0.61129576", "0.6112377", "0.6112204", "0.61084235", "0.610548", "0.61039746", "0.609803", "0.60970736", "0.60965556", "0.60905004", "0.60894996", "0.6087236", "0.6086904", "0.6084446", "0.6081504", "0.6062282", "0.6057118", "0.60560685", "0.60558194", "0.605232", "0.60511225", "0.6047769", "0.604017", "0.6026415", "0.602226", "0.60222346", "0.60177255", "0.601692", "0.60163975", "0.6007929", "0.6003263", "0.59966284", "0.59962696", "0.59895295", "0.5981317", "0.59810907", "0.5977832", "0.59599024" ]
0.0
-1
Run `code` with profiler. Used by ``%prun`` and ``%run p``.
def _run_with_profiler(self, code, opts, namespace): # Fill default values for unspecified options: opts.merge(Struct(D=[''], l=[], s=['time'], T=[''])) prof = profile.Profile() try: prof = prof.runctx(code, namespace, namespace) sys_exit = '' except SystemExit: sys_exit = """*** SystemExit exception caught in code being profiled.""" stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s) lims = opts.l if lims: lims = [] # rebuild lims with ints/floats/strings for lim in opts.l: try: lims.append(int(lim)) except ValueError: try: lims.append(float(lim)) except ValueError: lims.append(lim) # Trap output. stdout_trap = StringIO() stats_stream = stats.stream try: stats.stream = stdout_trap stats.print_stats(*lims) finally: stats.stream = stats_stream output = stdout_trap.getvalue() output = output.rstrip() if 'q' not in opts: page.page(output) print(sys_exit, end=' ') dump_file = opts.D[0] text_file = opts.T[0] if dump_file: prof.dump_stats(dump_file) if text_file: with open(text_file, 'w') as pfile: pfile.write(output) if 'r' in opts: return stats else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def profile_code(profiler):\n print('\\n')\n ps = pstats.Stats(profiler).strip_dirs().sort_stats('cumulative')\n ps.print_stats(10)", "def runner(code, out_stream):\n code_obj = compiler.compile_source(code)\n vm = virtual_machine.VirtualMachine(out_stream)\n vm.run_code(code_obj)", "def part_1(code: List):\n acc, _ = run_code(code)\n\n return acc", "def profile(script, argv, timer, pickle_protocol, dump_filename, mono):\n filename, code, globals_ = script\n sys.argv[:] = [filename] + list(argv)\n __profile__(filename, code, globals_,\n timer=timer, pickle_protocol=pickle_protocol,\n dump_filename=dump_filename, mono=mono)", "def run_code():\n\n output = None\n code = request.json['code']\n\n cmd = 'python -c \"' + code +'\"'\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,\n stderr=STDOUT, close_fds=True)\n output = p.stdout.read()\n\n return jsonify(output.decode('utf-8'))", "def run(self, code='', file=''):\n if file and code:\n print('WARNING: reading file instead of the code')\n\n if file:\n source = Path(file)\n if source.exists():\n if not source.is_file():\n self.__abort(ERR_CODE_NOT_FILE)\n if file[len(file) - 3:] != EXTENSION:\n self.__abort(ERR_CODE_NOT_SOURCE)\n with source.open() as f:\n self.__code = f.read()\n else:\n self.__abort(ERR_CODE_FILE_MISSING)\n else:\n self.__code = code\n\n self.__tokenize()\n return self.__execute()", "def run_monitored_proc(code):\n if not sys.platform.startswith('linux'):\n raise RuntimeError(\"Peak memory monitoring only works on Linux\")\n\n code = textwrap.dedent(code)\n process = subprocess.Popen([sys.executable, '-c', code])\n\n peak_memusage = -1\n\n start = time.time()\n while True:\n ret = process.poll()\n if ret is not None:\n break\n\n with open('/proc/%d/status' % process.pid, 'r') as f:\n procdata = f.read()\n\n m = re.search(r'VmRSS:\\s*(\\d+)\\s*kB', procdata, re.S | re.I)\n if m is not None:\n memusage = float(m.group(1)) * 1e3\n peak_memusage = max(memusage, peak_memusage)\n\n time.sleep(0.01)\n\n process.wait()\n\n duration = time.time() - start\n\n if process.returncode != 0:\n raise AssertionError(\"Running failed:\\n%s\" % code)\n\n return duration, peak_memusage", "def run_code(self, code: str, with_preprocess: bool = False,\n exception_list: Tuple = (), *args, **kwargs):\n # Get the path to the configuration file\n all_codes = get_all_codes(self.all_cfgs_dir)\n cfg_path = all_codes[code]\n # Run the experiment\n runner = self.get_runner()\n runner.merge_cfg(cfg_path)\n # Setup the outputs\n current_experiment_output_dir = os.path.join(self.hyper_experiment_path, f'exp-{code}')\n if not os.path.exists(current_experiment_output_dir):\n os.mkdir(current_experiment_output_dir)\n runner.set_output_dir(current_experiment_output_dir)\n\n # Run the experiment\n if self.verbose > 0:\n print(\"---\")\n print(\"This the configuration that will be used:\")\n print(runner.cfg)\n print(\"---\")\n runner.verbose = max(0, self.verbose - 1)\n try:\n if with_preprocess:\n runner.preprocess()\n score = runner.run(*args, **kwargs)\n except exception_list as e:\n warnings.warn(f\"Exception caught {e}\")\n score = None\n self.CACHE.LOAD()\n score_dict = self.CACHE.SET_IFN('score_dict', {})\n score_dict[code] = score\n self.CACHE.SET('score_dict', score_dict)\n self.CACHE.SAVE()\n runner.CACHE.RESET(prompt=False)\n return score", "def code():", "def run_code(code: List) -> Tuple[int, int]:\n executed_lines = set()\n\n prv_ptr, ins_ptr, acc = -1, 0, 0\n\n while True:\n if ins_ptr in executed_lines:\n break\n\n executed_lines.add(ins_ptr)\n\n cmd, args = code[ins_ptr]\n\n if cmd == \"acc\":\n acc += int(args)\n\n elif cmd == \"nop\":\n pass\n\n elif cmd == \"jmp\":\n prv_ptr = ins_ptr\n ins_ptr += int(args)\n continue\n\n prv_ptr = ins_ptr\n ins_ptr += 1\n\n else:\n # No loop detected\n return acc, -1\n\n return acc, ins_ptr", "def runcode(self, code):\n if not self.locals.get('autocommit', None):\n return self.locals['db'].transact(code.InteractiveConsole.runcode, self, code)\n return code.InteractiveConsole.runcode(self, code)", "def run_code(plot_path, function_name, plot_code):\r\n # Change the working directory to the directory of the example, so\r\n # it can get at its data files, if any. Add its path to sys.path\r\n # so it can import any helper modules sitting beside it.\r\n if plot_code is not None:\r\n exec(plot_code)\r\n else:\r\n pwd = os.getcwd()\r\n path, fname = os.path.split(plot_path)\r\n sys.path.insert(0, os.path.abspath(path))\r\n stdout = sys.stdout\r\n sys.stdout = cStringIO.StringIO()\r\n os.chdir(path)\r\n fd = None\r\n try:\r\n fd = open(fname)\r\n module = imp.load_module(\r\n \"__plot__\", fd, fname, ('py', 'r', imp.PY_SOURCE))\r\n finally:\r\n del sys.path[0]\r\n os.chdir(pwd)\r\n sys.stdout = stdout\r\n if fd is not None:\r\n fd.close()\r\n\r\n if function_name is not None:\r\n getattr(module, function_name)()", "def exec_code(code, db, write=True):\n evaler = Evaluator(db, write=write)\n glb = {}\n loc = ExecutionContext(evaler=evaler)\n exec(code, glb, loc)", "def RunScript(code):\n with ScriptContext() as script_module:\n try:\n exec code in script_module.__dict__\n except:\n # Get exception output as close to exec as possible.\n # We don't take the first entry in the traceback because it just contains\n # \"exec\". Everything after that is the submitted code.\n try:\n etype, evalue, tb = sys.exc_info()\n traceback.print_exception(etype,\n evalue,\n tb.tb_next, # one frame up\n file=sys.stderr)\n finally:\n del tb # break circular references when using exc_info\n\n return sys.stdout.getvalue(), sys.stderr.getvalue()", "def timeit_profile(stmt, number, repeat, setup,\n timer, pickle_protocol, dump_filename, mono, **_ignored):\n del _ignored\n sys.path.insert(0, os.curdir)\n globals_ = {}\n exec_(setup, globals_)\n if number is None:\n # determine number so that 0.2 <= total time < 2.0 like timeit.\n dummy_profiler = Profiler()\n dummy_profiler.start()\n for x in range(1, 10):\n number = 10 ** x\n t = time.time()\n for y in range(number):\n exec_(stmt, globals_)\n if time.time() - t >= 0.2:\n break\n dummy_profiler.stop()\n del dummy_profiler\n code = compile('for _ in range(%d): %s' % (number, stmt),\n 'STATEMENT', 'exec')\n __profile__(stmt, code, globals_,\n timer=timer, pickle_protocol=pickle_protocol,\n dump_filename=dump_filename, mono=mono)", "async def cli(self, code, *m):\n if self.bot.check_code(code):\n p = subprocess.run(args=m, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n await self.bot.say(codify(p.stdout.decode('utf-8'), p.stderr.decode('utf-8'), language='DOS'))\n else:\n await self.bot.reply('Bad code!')", "def runcode(self,code_obj):\n\n # Set our own excepthook in case the user code tries to call it\n # directly, so that the IPython crash handler doesn't get triggered\n old_excepthook,sys.excepthook = sys.excepthook, self.excepthook\n outflag = 1 # happens in more places, so it's easier as default\n try:\n try:\n exec code_obj in self.locals\n finally:\n # Reset our crash handler in place\n sys.excepthook = old_excepthook\n except SystemExit:\n self.resetbuffer()\n self.showtraceback()\n warn( __builtin__.exit,level=1)\n except self.custom_exceptions:\n etype,value,tb = sys.exc_info()\n self.CustomTB(etype,value,tb)\n except:\n self.showtraceback()\n else:\n outflag = 0\n if code.softspace(sys.stdout, 0):\n print\n # Flush out code object which has been run (and source)\n self.code_to_run = None\n self.code_to_run_src = ''\n return outflag", "def show_code(code):\n\n print('The code was: '+str(code))", "def run(self):\n if not self.__class__.profile is None:\n import cProfile\n cminstance = self\n cProfile.runctx('self._run()', globals(), locals(), self.__class__.profile)\n else:\n self._run()", "def _score_code(self, code):\n # Get list of 2-tuples, each containing an input sequence and an output\n # sequence.\n io_seqs = self.task.make_io_set()\n terminal_reward = 0.0\n results = []\n reason = 'correct'\n for input_seq, output_seq in io_seqs:\n eval_result = bf.evaluate(\n code, input_buffer=input_seq, timeout=0.1,\n max_steps=self.max_execution_steps,\n base=self.task.base,\n require_correct_syntax=self.require_correct_syntax)\n result, success = eval_result.output, eval_result.success\n if not success:\n # Code execution timed out.\n terminal_reward = self.failure_reward\n results = []\n reason = eval_result.failure_reason\n break\n else:\n terminal_reward += self.reward_fn(result, output_seq, self.task.base)\n if result == output_seq:\n terminal_reward += self.correct_bonus # Bonus for correct answer.\n\n # Only add additional reward for shorter code. Subtracting reward\n # interferes with the main objective. Only optimize for length once\n # any solution is found.\n if self.min_code_length == self.max_code_length:\n terminal_reward += self.code_length_bonus\n else:\n terminal_reward += self.code_length_bonus * clipped_linear(\n x=len(code), x0=self.min_code_length, y0=1.0,\n slope=-self.time_penalty, y_range=(0.0, 1.0))\n\n # reason remains 'correct' if it is already\n elif reason == 'correct':\n reason = 'wrong'\n results.append(result)\n\n # Return list of rewards, one for each char in the code. All are 0 except\n # for the terminal reward.\n terminal_reward /= self.best_reward\n return misc.RewardInfo(\n episode_rewards=[0.0] * (len(code) - 1) + [terminal_reward],\n input_case=misc.IOTuple(i for i, o in io_seqs),\n correct_output=misc.IOTuple(o for i, o in io_seqs),\n code_output=misc.IOTuple(results),\n input_type=self.input_type,\n output_type=self.output_type,\n reason=reason)", "def runSync(code):\n __PyMainThread__.runSync(code)\n sleep(0.1)", "def run_monitored(code):\n\n if hasattr(os, 'wait4'):\n return run_monitored_wait4(code)\n else:\n return run_monitored_proc(code)", "def profile_function(self):\n with _CodeHeatmapCalculator() as prof:\n result = self._run_object(*self._run_args, **self._run_kwargs)\n code_lines, start_line = inspect.getsourcelines(self._run_object)\n\n source_lines = []\n for line in code_lines:\n source_lines.append(('line', start_line, line))\n start_line += 1\n\n filename = os.path.abspath(inspect.getsourcefile(self._run_object))\n heatmap = prof.heatmap[filename]\n run_time = sum(time for time in heatmap.values())\n return {\n 'objectName': self._object_name,\n 'runTime': run_time,\n 'result': result,\n 'timestamp': int(time.time()),\n 'heatmaps': [{\n 'name': self._object_name,\n 'heatmap': heatmap,\n 'executionCount': prof.execution_count[filename],\n 'srcCode': source_lines,\n 'runTime': run_time\n }]\n }", "def execute(self, code):\n code = code()\n\n # Build an AST tree from the Python code, to get the line number of each statement\n try:\n nodes = compiler.parse(code).getChildNodes()[0].getChildNodes()\n lines = [node.lineno - 1 for node in nodes]\n except:\n self.executions += '>>> ' + code + '\\n' + ''.join(traceback.format_exception(*sys.exc_info())[4:])\n return\n\n code = code.splitlines()\n\n with IDEFrameContext.exec_lock:\n stdout = sys.stdout\n\n try:\n # Iterate over all the statements\n for (a, b) in zip(lines, lines[1:] + [None]):\n sys.stdout = StringIO()\n\n source = code[a:b]\n\n try:\n # Execute the statement using this local and global context\n frame = self.get_frame()\n exec compile('\\n'.join(source), '<web>', 'single', 0, 1) in frame.f_locals, frame.f_globals\n except:\n print ''.join(traceback.format_exception(*sys.exc_info())[2:]).rstrip()\n\n self.executions += '\\n'.join([('... ' if line.startswith(' ') else '>>> ') + line for line in source]) + '\\n' + sys.stdout.getvalue()\n finally:\n sys.stdout = stdout", "def go (fun, *args, **kwargs):\n if 'profile_filename' in kwargs:\n profile_filename = kwargs['profile_filename']\n del kwargs['profile_filename']\n else:\n profile_filename = '/tmp/coro_profile.bin'\n\n if 'profile_bench' in kwargs:\n profile_bench = kwargs['profile_bench']\n del kwargs['profile_bench']\n else:\n profile_bench = coro.rusage_bench\n\n p = coro.new_profiler (profile_bench)\n p.start()\n try:\n return fun (*args, **kwargs)\n finally:\n total_ticks = p.stop()\n user_ticks = _dump (p, profile_filename)", "def profile(f):\n def inner(*args, **kwargs):\n p = Profiler()\n result = p.runcall(f, *args, **kwargs)\n p.print_stats()\n return result\n return inner", "def cprofiler(fun, *args, **kwargs):\n print(f\"Profiling {fun.__name__}\")\n with cProfile.Profile() as pr:\n fun(*args, **kwargs)\n pr.print_stats()", "def main_code():\n pass", "def execute(self, code, environment = dict()):\r\n if not self.config.get('scripting', 'enable') and type(code) == str:\r\n self.send(code, log = False)\r\n else:\r\n if type(code) == str:\r\n c = compile(code, 'errors.log', 'exec')\r\n else:\r\n c = code\r\n eval(c, self.getEnvironment(environment))", "def runcode(self, codeobj):\n\n\t\ttry:\n\t\t\told_display_hook, sys.displayhook = sys.displayhook, self._displayhook\n\t\t\told_stdout, sys.stdout = sys.stdout, self.outputbuffer\n\t\t\told_stdin, sys.stdin = sys.stdin, None\n\t\t\texec codeobj in self.globals, self.locals #pylint: disable-msg=W0122\n\t\texcept SystemExit:\n\t\t\traise\n\t\texcept:\n\t\t\tself.showtraceback()\n\t\telse:\n\t\t\tif code.softspace(self.outputbuffer, 0):\n\t\t\t\tself.outputbuffer.write(\"\\n\")\n\t\tfinally:\n\t\t\tsys.displayhook = old_display_hook\n\t\t\tsys.stdout = old_stdout\n\t\t\tsys.stdin = old_stdin", "def evaluateCode(lang, code):", "def live_profile(script, argv, timer, interval, spawn, signum,\n pickle_protocol, mono):\n filename, code, globals_ = script\n sys.argv[:] = [filename] + list(argv)\n parent_sock, child_sock = socket.socketpair()\n pid = os.fork()\n if pid == 0:\n # child\n devnull = os.open(os.devnull, os.O_RDWR)\n for f in [sys.stdin, sys.stdout, sys.stderr]:\n os.dup2(devnull, f.fileno())\n frame = sys._getframe()\n profiler = BackgroundProfiler(timer, frame, code, signum)\n profiler.prepare()\n server_args = (interval, noop, pickle_protocol)\n server = SelectProfilingServer(None, profiler, *server_args)\n server.clients.add(child_sock)\n spawn(server.connected, child_sock)\n try:\n exec_(code, globals_)\n finally:\n child_sock.close()\n else:\n # parent\n viewer, loop = make_viewer(mono)\n title = get_title(filename)\n client = ProfilingClient(viewer, loop.event_loop, parent_sock, title)\n client.start()\n try:\n loop.run()\n except KeyboardInterrupt:\n pass\n finally:\n parent_sock.close()\n os.kill(pid, signal.SIGINT)", "def runcode(self, code):\n try:\n # we copy the line in a tmp var\n code_string = self.lines_pushed[:-1]\n result = self.ros_python_interpreter.run_python_command(\n code_string\n )\n self.write(result)\n if not result.endswith(\"\\n\"):\n self.write(\"\\n\")\n # we reset the cache here\n self.lines_pushed = \"\"\n except Exception as e:\n self.write(str(e))\n return False", "def _profile_module(self):\n with open(self._run_object, 'r') as srcfile:\n src_code = srcfile.read()\n code = compile(src_code, self._run_object, 'exec')\n try:\n with _CodeHeatmapCalculator() as prof:\n exec(code, self._globs, None)\n except SystemExit:\n pass\n\n heatmaps = []\n for filename, heatmap in prof.heatmap.items():\n if os.path.isfile(filename):\n heatmaps.append(\n self._format_heatmap(\n filename, heatmap, prof.execution_count[filename]))\n\n run_time = sum(heatmap['runTime'] for heatmap in heatmaps)\n return {\n 'objectName': self._run_object,\n 'runTime': run_time,\n 'heatmaps': heatmaps\n }", "def main():\n args = get_args()\n with profiling.profiled(enabled=args.profile_stats):\n process_files(args)", "def profile_module(self):\n return base_profiler.run_in_separate_process(self._profile_module)", "async def evaluate(self, ctx, *, code):\n # [p]evaluate <code>\n\n code = code.strip('` ')\n python = '```py\\n{}\\n```'\n result = None\n\n global_vars = globals().copy()\n global_vars['bot'] = self.bot\n global_vars['ctx'] = ctx\n global_vars['message'] = ctx.message\n global_vars['author'] = ctx.message.author\n global_vars['channel'] = ctx.message.channel\n global_vars['server'] = ctx.message.server\n\n try:\n result = eval(code, global_vars, locals())\n except Exception as e:\n await self.bot.say(python.format(type(e).__name__ + ': ' + str(e)))\n return\n\n if asyncio.iscoroutine(result):\n result = await result\n\n result = python.format(result)\n if not ctx.message.channel.is_private:\n censor = CacheAPI.get(key='dwarf_token')\n r = \"[EXPUNGED]\"\n for w in censor:\n if w != \"\":\n result = result.replace(w, r)\n result = result.replace(w.lower(), r)\n result = result.replace(w.upper(), r)\n await self.bot.say(result)", "def generate(code):\n name, traits = parseCode(code)\n return globals()[name](**traits)", "def cache_code(self):\n\n # Generate the prologue\n self._synthesize_prologue()\n\n # Don't have a real epilogue.\n self.add(spu.stop(0x2000))\n # self._check_alignment(self._code, 'spu code')\n\n # self.exec_module.make_executable(self._code.buffer_info()[0], len(self._code))\n\n # Append our instructions to the prologue's, first making sure the alignment is correct.\n if len(self._prologue._code) % 2 == 1: # Odd number of instructions\n self._prologue.add(spu.lnop(0))\n\n self._prologue._code.extend(self._code)\n self._prologue._check_alignment(self._prologue._code, 'spu prologue')\n \n self._epilogue = self \n self._cached = True\n return", "def run_mypy(code):\n cmd = shlex.split(\n 'mypy -c \"{}\" --python-executable {} --allow-untyped-globals'.format(\n code, sys.executable\n ),\n posix=sys.platform != 'win32',\n comments=True\n )\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n out, err = proc.communicate()\n return out.decode(), err.decode(), proc.poll()", "def exec_code(self,code,inputs=None,returns=None):\n #use the first worker to package up the cmd.\n package = self.workers[0].exec_code_pack(code,inputs,returns)\n return self._send_recv(package)", "def exec_(_code_, _globs_=None, _locs_=None):\n if _globs_ is None:\n frame = sys._getframe(1)\n _globs_ = frame.f_globals\n if _locs_ is None:\n _locs_ = frame.f_locals\n del frame\n elif _locs_ is None:\n _locs_ = _globs_\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def exec_(_code_, _globs_=None, _locs_=None):\n if _globs_ is None:\n frame = sys._getframe(1)\n _globs_ = frame.f_globals\n if _locs_ is None:\n _locs_ = frame.f_locals\n del frame\n elif _locs_ is None:\n _locs_ = _globs_\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def exec_(_code_, _globs_=None, _locs_=None):\n if _globs_ is None:\n frame = sys._getframe(1)\n _globs_ = frame.f_globals\n if _locs_ is None:\n _locs_ = frame.f_locals\n del frame\n elif _locs_ is None:\n _locs_ = _globs_\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def _exec(self, code):\n self._send_command('EXEC ' + code)", "def trace(self, code, env=None):\n env = env or {}\n env.update(dict(objects=objects))\n return self.tracer.trace(dedent(code), env=env)", "def exec_(_code_, _globs_=None, _locs_=None):\r\n if _globs_ is None:\r\n frame = sys._getframe(1)\r\n _globs_ = frame.f_globals\r\n if _locs_ is None:\r\n _locs_ = frame.f_locals\r\n del frame\r\n elif _locs_ is None:\r\n _locs_ = _globs_\r\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def exec_(_code_, _globs_=None, _locs_=None):\r\n if _globs_ is None:\r\n frame = sys._getframe(1)\r\n _globs_ = frame.f_globals\r\n if _locs_ is None:\r\n _locs_ = frame.f_locals\r\n del frame\r\n elif _locs_ is None:\r\n _locs_ = _globs_\r\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def exec_(_code_, _globs_=None, _locs_=None):\r\n if _globs_ is None:\r\n frame = sys._getframe(1)\r\n _globs_ = frame.f_globals\r\n if _locs_ is None:\r\n _locs_ = frame.f_locals\r\n del frame\r\n elif _locs_ is None:\r\n _locs_ = _globs_\r\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def eval(self, code: str, **data):\n self.actions.append({'type': 'eval', 'code': code, 'data': data})", "def profileit(func):\n def wrapper(*args, **kwargs):\n func_name = func.__name__ + \".pfl\"\n prof = cProfile.Profile()\n retval = prof.runcall(func, *args, **kwargs)\n prof.dump_stats(func_name)\n return retval\n\n return wrapper", "def top_code(self, code):\n self.code_for_top = (self.y, code)\n self.y += self.unit*1.5", "def code_print(code: str, file=None, print_function=None):\n str_io = StringIO()\n\n lines = code.splitlines()\n n_chars = int(np.floor(np.log10(len(lines))+1))\n\n for i, line in enumerate(lines):\n str_io.write(f\"{i+1:{n_chars}}| {line}\\n\")\n\n (print_function or print)(str_io.getvalue(), file=file)", "def run_benchmark(take_geo_mean, num_runs, bench_func, *args):\n #if options.profile:\n # import cProfile\n # prof = cProfile.Profile()\n # prof.runcall(bench_func, num_runs, *args)\n # prof.print_stats(sort=options.profile_sort)\n #else:\n data = bench_func(num_runs, *args)\n if take_geo_mean:\n product=1\n _total=0\n for _x in data:\n _total+=_x\n product *= _x\n _geo_mean=math.pow(product, 1.0 / len(data))\n return \"Runs: %d, Total Time:%5.3f, Geo Mean:%6.4f\" % (len(data), _total, _geo_mean)\n else:\n for x in data:\n print(x)", "def main():\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('code', help='Python code to execute')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-3', action='store_const', dest='python',\n const='python3', help='Explicitly use Python 3')\n group.add_argument('-2', action='store_const', dest='python',\n const='python2', help='Explicitly use Python 2')\n group.add_argument('-p', '--python', help='Specify python interpreter')\n args = parser.parse_args()\n if args.python is not None:\n call([args.python, __file__, args.code])\n else:\n InteractiveInterpreter(LocalsImportDict()).runsource(args.code)", "def execute(self, code, mode = 'int', debug = False, params = None, n_spus = 1):\n\n if len(code._code) == 0:\n return None\n\n # Cache the code here\n if not code._cached:\n code.cache_code()\n\n # Setup the parameter structure\n if params is None:\n params = ExecParams()\n\n addr = code._prologue.inst_addr()\n params.addr = addr\n params.size = len(code._prologue._code) * 4 # size in bytes\n\n retval = None\n\n if type(code) is ParallelInstructionStream:\n # Parallel SPU execution\n speids = []\n if n_spus > 8:\n raise Exception(\"Too many SPUs requests (%d > 8)\" % n_spus)\n\n # print 'Regs:', code.r_rank, code.r_size, code.r_block_size, code.r_offset\n\n # Set up the parameters and execute each spu thread\n for i in range(n_spus):\n pi = _copy_params(params, i, n_spus)\n\n if hasattr(code, \"raw_data_size\") and code.raw_data_size is not None:\n pi.p4 = int(code.raw_data_size / n_spus) # block_size\n pi.p5 = pi.p4 * i # offset\n\n # print 'Executing: 0x%x %d %d %d %d' % (pi.addr, pi.p1, pi.p2, pi.p4, pi.p5)\n speids.append(spe.Processor.execute(self, code, debug=debug, params=pi, mode='async'))\n\n # Handle blocking execution modes\n if mode != 'async':\n reterrs = [self.join(speid) for speid in speids]\n retval = reterrs\n else:\n retval = speids\n else:\n # Single SPU execution\n retval = spe.Processor.execute(self, code, mode, debug, params)\n\n return retval", "async def async_run_subproc_from_code(sub_proc_code: str) -> asyncio.subprocess.Process:\n return await asyncio.create_subprocess_exec(sys.executable, '-c', sub_proc_code, stdout=asyncio.subprocess.PIPE)", "def remote_profile(script, argv, timer, interval, spawn, signum,\n pickle_protocol, addr, verbose):\n filename, code, globals_ = script\n sys.argv[:] = [filename] + list(argv)\n # create listener.\n listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n listener.bind(addr)\n listener.listen(1)\n # be verbose or quiet.\n if verbose:\n log = lambda x: click.echo(click.style(' > ', fg='cyan') + x)\n bound_addr = listener.getsockname()\n log('Listening on {0}:{1} for profiling...'.format(*bound_addr))\n else:\n log = noop\n # start profiling server.\n frame = sys._getframe()\n profiler = BackgroundProfiler(timer, frame, code, signum)\n profiler.prepare()\n server_args = (interval, log, pickle_protocol)\n server = SelectProfilingServer(listener, profiler, *server_args)\n spawn(server.serve_forever)\n # exec the script.\n try:\n exec_(code, globals_)\n except KeyboardInterrupt:\n pass", "def exec(self, code, timeout=10):\n self.__exec_part_1(code)\n ret, ret_err = self.__exec_part_2(timeout)\n if ret_err:\n raise ReplError(ret_err)\n return ret", "def startTestRun(self, event):\n self.prof = cProfile.Profile()\n event.executeTests = self.prof.runcall", "def code(self, code):\n\n self._code = code", "def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None, slug=None, unsafely=False):\r\n # Check the cache for a previous result.\r\n if cache:\r\n safe_globals = json_safe(globals_dict)\r\n md5er = hashlib.md5()\r\n md5er.update(repr(code))\r\n update_hash(md5er, safe_globals)\r\n key = \"safe_exec.%r.%s\" % (random_seed, md5er.hexdigest())\r\n cached = cache.get(key)\r\n if cached is not None:\r\n # We have a cached result. The result is a pair: the exception\r\n # message, if any, else None; and the resulting globals dictionary.\r\n emsg, cleaned_results = cached\r\n globals_dict.update(cleaned_results)\r\n if emsg:\r\n raise SafeExecException(emsg)\r\n return\r\n\r\n # Create the complete code we'll run.\r\n code_prolog = CODE_PROLOG % random_seed\r\n\r\n # Decide which code executor to use.\r\n if unsafely:\r\n exec_fn = codejail_not_safe_exec\r\n else:\r\n exec_fn = codejail_safe_exec\r\n\r\n # Run the code! Results are side effects in globals_dict.\r\n try:\r\n exec_fn(\r\n code_prolog + LAZY_IMPORTS + code, globals_dict,\r\n python_path=python_path, slug=slug,\r\n )\r\n except SafeExecException as e:\r\n emsg = e.message\r\n else:\r\n emsg = None\r\n\r\n # Put the result back in the cache. This is complicated by the fact that\r\n # the globals dict might not be entirely serializable.\r\n if cache:\r\n cleaned_results = json_safe(globals_dict)\r\n cache.set(key, (emsg, cleaned_results))\r\n\r\n # If an exception happened, raise it now.\r\n if emsg:\r\n raise e", "def runAsync(code):\n __PyMainThread__.runAsync(code)", "def append_code_expr(self, code):\r\n assert(isinstance(code, str)) # expect a string.\r\n logger.debug(\"compiling code {}...\".format(code))\r\n try:\r\n code_obj = compile(code, '<string>', 'eval')\r\n self.code_objs[code] = code_obj\r\n except SyntaxError as syntax_err:\r\n logger.error(\"cannot compile {0}: {1}\".format(\r\n code, syntax_err))\r\n raise\r\n logger.debug(\"compiled code {}\".format(code))", "async def runl(self, ctx: commands.Context, lang: str, *, code: str):\n result = await self._run_code(lang=lang, code=code)\n await self._send_result(ctx, result)", "def run_program(program):\n halt = False\n instruction_pointer = 0\n\n while not halt:\n halt = process_instruction(instruction_pointer, program)\n instruction_pointer += STEP_SIZE\n\n return program", "def code(ctx, show_hidden, query, single, password, remember):\n\n _init_session(ctx, password, remember)\n\n session = ctx.obj[\"session\"]\n entries = session.calculate_all()\n creds = _search(entries.keys(), query, show_hidden)\n\n if len(creds) == 1:\n cred = creds[0]\n code = entries[cred]\n if cred.touch_required:\n prompt_for_touch()\n try:\n if cred.oath_type == OATH_TYPE.HOTP:\n with prompt_timeout():\n # HOTP might require touch, we don't know.\n # Assume yes after 500ms.\n code = session.calculate_code(cred)\n elif code is None:\n code = session.calculate_code(cred)\n except ApduError as e:\n if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:\n raise CliFail(\"Touch account timed out!\")\n entries[cred] = code\n\n elif single and len(creds) > 1:\n _error_multiple_hits(ctx, creds)\n\n elif single and len(creds) == 0:\n raise CliFail(\"No matching account found.\")\n\n if single and creds:\n if is_steam(cred):\n click.echo(calculate_steam(session, cred))\n else:\n click.echo(code.value)\n else:\n outputs = []\n for cred in sorted(creds):\n code = entries[cred]\n if code:\n if is_steam(cred):\n code = calculate_steam(session, cred)\n else:\n code = code.value\n elif cred.touch_required:\n code = \"[Requires Touch]\"\n elif cred.oath_type == OATH_TYPE.HOTP:\n code = \"[HOTP Account]\"\n else:\n code = \"\"\n outputs.append((_string_id(cred), code))\n\n longest_name = max(len(n) for (n, c) in outputs) if outputs else 0\n longest_code = max(len(c) for (n, c) in outputs) if outputs else 0\n format_str = \"{:<%d} {:>%d}\" % (longest_name, longest_code)\n\n for name, result in outputs:\n click.echo(format_str.format(name, result))", "def complie_and_execute(self, lang, code):\n t_id = threading.current_thread().get_ident()\n self[lang](t_id, code)", "def run(self, cmd, code):\n files = [f for f in listdir(dirname(self.filename)) if f[-3:] == '.go']\n return self.tmpdir(cmd, files, code)", "def run(self, this, actor, args):\n import os\n import sys\n import datetime\n import calendar\n import math\n from mudsling import registry\n from mudsling.config import config\n\n # args['code'] isn't reliable since the semicolon shortcut may skip\n # parsing the args via syntax.\n code = self.argstr\n\n #: @type: Object\n char = actor.possessing\n\n if not code:\n actor.msg(self.syntax_help())\n return False\n\n available_vars = {\n 'eval_cmd': self,\n 'time': time,\n 'datetime': datetime,\n 'calendar': calendar,\n 'game': self.game,\n 'ref': self.game.db.get_ref,\n 'registry': registry,\n 'config': config,\n 'player': actor,\n 'me': char,\n 'here': (char.location if self.game.db.is_valid(char, Object)\n else None),\n 'utils': mudsling.utils,\n 'math': math,\n }\n available_vars.update(sys.modules)\n\n # Support MOO-style objrefs in eval code.\n code = self.objref.sub(r'ref(\\1)', code)\n code = self.objref_escape_fix.sub(r'#\\1', code)\n\n inMsg = string.parse_ansi('{y>>> ') + code + string.parse_ansi(\"{n\")\n actor.msg(inMsg, {'raw': True})\n\n mode = 'eval'\n out = ''\n duration = compile_time = None\n #noinspection PyBroadException\n try:\n begin = time.clock()\n #noinspection PyBroadException\n try:\n compiled = compile(code, '', 'eval')\n except:\n mode = 'exec'\n compiled = compile(code, '', 'exec')\n compile_time = time.clock() - begin\n\n if self.cmdstr == '@profile':\n import cProfile as profile\n import pstats\n import cStringIO as io\n profiler = profile.Profile()\n begin = time.clock()\n profiler.enable()\n ret = profiler.runctx(compiled, {}, available_vars)\n profiler.disable()\n duration = time.clock() - begin\n s = io.StringIO()\n stats = pstats.Stats(profiler, stream=s)\n stats.strip_dirs()\n stats.sort_stats('time')\n stats.print_stats()\n out += s.getvalue() + '\\n'\n s.close()\n else:\n begin = time.clock()\n ret = eval(compiled, {}, available_vars)\n duration = time.clock() - begin\n\n if mode == 'eval':\n out += \"<<< %s\" % repr(ret)\n if isinstance(ret, ObjRef):\n if ret.is_valid():\n name = \"%s (%s)\" % (ret.class_name(),\n ret.python_class_name())\n else:\n name = 'INVALID'\n out += \" [%s]\" % name\n else:\n out = \"<<< Done.\"\n except SystemExit:\n raise\n except:\n error_lines = traceback.format_exc().split('\\n')\n if len(error_lines) > 4:\n error_lines = error_lines[4:]\n out = \"\\n\".join(\"<<< %s\" % line for line in error_lines if line)\n\n raw_string = string.parse_ansi(\"{m\") + out + string.parse_ansi(\"{n\")\n actor.msg(raw_string, {'raw': True})\n if duration is not None:\n msg = \"Exec time: %.3f ms, Compile time: %.3f ms (total: %.3f ms)\"\n actor.msg(msg % (duration * 1000,\n compile_time * 1000,\n (duration + compile_time) * 1000))", "def add_code(self, code):\n self.code += code", "def write_code(self, code):\n self.buffer.scope_line(code.lstrip(' \\t'))", "def source_to_code(self, data, path):\n\t\treturn _call_with_frames_removed(compile, data, path, 'exec', dont_inherit=True)", "async def _eval(self, ctx, *, code):\r\n env = {\r\n 'self': self,\r\n 'bot': self.bot,\r\n 'ctx': ctx,\r\n 'message': ctx.message,\r\n 'guild': ctx.guild,\r\n 'channel': ctx.channel,\r\n 'author': ctx.author,\r\n 'me': ctx.me,\r\n 'that': self.last_result\r\n }\r\n env.update(globals())\r\n\r\n stdout = io.StringIO()\r\n\r\n toCompile = f'async def func():\\n{textwrap.indent(code, \" \")}'\r\n\r\n try:\r\n exec(toCompile, env)\r\n except Exception as e:\r\n em = discord.Embed(description=f\"Excecuted and errored: {e.__class__.__name__}: {e}\",\r\n color=0xff0000)\r\n em.set_author(name=\"Evaluated and errored\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(\r\n url='https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Red_x.svg/480px-Red_x.svg.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n return await ctx.send(embed=em)\r\n\r\n func = env['func']\r\n try:\r\n with redirect_stdout(stdout):\r\n ret = await func()\r\n except Exception as e:\r\n value = stdout.getvalue()\r\n em = discord.Embed(description=f\"Excecuted and errored: ```py\\n{value}{traceback.format_exc()}```\",\r\n color=0xff0000)\r\n em.set_author(name=\"Evaluated and errored\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(\r\n url='https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Red_x.svg/480px-Red_x.svg.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n await ctx.send(embed=em)\r\n else:\r\n value = stdout.getvalue()\r\n if ret is None or type(ret) is discord.Message:\r\n if value:\r\n x = f\"{value}\"\r\n self.last_result = value\r\n else:\r\n x = \"Executed successfully with no objects returned.\"\r\n else:\r\n x = f\"Executed successfully and returned: {value}{ret}\"\r\n self.last_result = f\"{value}{ret}\"\r\n em = discord.Embed(description=x, color=0x00ff00)\r\n em.set_author(name=\"Evaluated with success\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(url='http://www.iconsdb.com/icons/preview/green/checked-checkbox-xxl.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n await ctx.send(embed=em)", "def profile(fnc):\n\n def inner(*args, **kwargs):\n pr = cProfile.Profile()\n pr.enable()\n\n # wrapped function starts\n retval = fnc(*args, **kwargs) # fnc is whatever function has the @profile tag\n # wrapped function ends\n\n pr.disable()\n s = io.StringIO()\n sortby = pstats.SortKey.CALLS\n ps = pstats.Stats(pr, stream=s).strip_dirs().sort_stats(sortby)\n ps.print_stats()\n print(s.getvalue())\n return retval\n\n return inner", "def profile_function(func):\n do_profiling = os.getenv(\"GRASS_TGIS_PROFILE\")\n\n if do_profiling == \"True\" or do_profiling == \"1\":\n import cProfile, pstats\n try:\n import StringIO as io\n except ImportError:\n import io\n pr = cProfile.Profile()\n pr.enable()\n func()\n pr.disable()\n s = io.StringIO()\n sortby = 'cumulative'\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n ps.print_stats()\n print(s.getvalue())\n else:\n func()", "def profile(func):\n def wrapper(*args, **kwargs):\n profile_filename = func.__name__ + '.prof'\n profiler = cProfile.Profile()\n result = profiler.runcall(func, *args, **kwargs)\n profiler.dump_stats(profile_filename)\n return result\n return wrapper", "def run_openmm_script(code, queue):\n\n def fix_code():\n \"\"\"Replae the token 'StateDataReporter' with\n '__queue_reporter_factory(__queue)'\n\n Also, we make sure that the sentenel signal (None) is sent\n down the queue at the very end of the script\n \"\"\"\n itoks = tokenize.generate_tokens(StringIO.StringIO(code).readline)\n def run():\n for toktype, toktext, (srow, scol), (erow, ecol), line in itoks:\n if toktext == 'StateDataReporter':\n toktext = '__queue_reporter_factory(__queue)'\n yield (toktype, toktext, (srow, scol), (erow, ecol), line)\n\n return tokenize.untokenize(run()) + '__queue.put(None)'\n\n try:\n code = fix_code()\n except tokenize.TokenError:\n raise ValueError('The script has a syntax error!')\n\n exec code in globals(), {'__queue': queue,\n '__queue_reporter_factory': queue_reporter_factory}", "def profile(func):\n\n def wrapper(*args, **kwargs):\n profile_filename = func.__name__ + \".prof\"\n profiler = cProfile.Profile()\n result = profiler.runcall(func, *args, **kwargs)\n profiler.dump_stats(profile_filename)\n return result\n\n return wrapper", "def plotCode(code):\n # rescale features\n mincode = np.amin(code)\n maxcode = np.amax(code)\n print('Min: ', mincode, 'Max: ', maxcode)\n code = (code - mincode) / (maxcode - mincode)\n # create output image\n sh = np.shape(code)\n W = sh[0]\n H = sh[1]\n out = np.zeros((3*(W+2)-2, 5*(H+2)-2))\n # copy each feature in out\n for w in range(0,3):\n for h in range(0,5):\n c = w*5 + h\n out[w*(W+2):w*(W+2)+W, h*(H+2):h*(H+2)+H] = code[:,:,c]\n return out", "def Exec_Python(code):\n # pylint: disable=exec-used\n try:\n exec(code, globals())\n # pylint: disable=broad-except\n # pylint: disable=bare-except\n except:\n _LOGGER.error('Execution of following code has failed %s', code)\n return False\n return True", "def execute(code):\r\n\r\n ip = 0\r\n acc = 0\r\n visited = defaultdict(int)\r\n\r\n while visited[ip] == 0:\r\n visited[ip] = visited[ip] + 1\r\n\r\n if code[ip][0] == \"acc\":\r\n acc = acc + code[ip][1]\r\n ip = ip + 1\r\n elif code[ip][0] == \"nop\":\r\n ip = ip + 1\r\n elif code[ip][0] == \"jmp\":\r\n ip = ip + code[ip][1]\r\n\r\n if ip >= len(code):\r\n return (True, acc, visited)\r\n break\r\n\r\n return (False, acc, visited)", "def run_profile(package, profile_config):\n\n LOG.info(\"Running '%(name)s' [%(profile)s]\" % profile_config )\n\n profile = load_profile(package, profile_config)\n if not profile:\n return\n\n # create a subfolder for generator profiles\n if package.__name__ == \"pickup.generator_profile\":\n\n # first folder level is the module name. Append this to the staging area\n module_folder = profile.__name__.split(\".\")[-1]\n module_folder = join(config_instance.STAGING_AREA, module_folder)\n\n # into the module folder we put a folder based on the profile's name\n staging_folder = get_profile_folder(module_folder, profile_config)\n\n # just in case it does not exist, we'll create all required folders\n if not exists( staging_folder ):\n os.makedirs( staging_folder )\n LOG.debug( \"Created directory %r\" % staging_folder )\n else:\n staging_folder = config_instance.STAGING_AREA\n\n try:\n profile.run(staging_folder)\n except Exception, exc:\n LOG.error(\"Error staging '%s'. Error message: %s\" %\n (profile_config['name'], exc))\n LOG.exception(exc)", "def profile_package(self):\n return base_profiler.run_in_separate_process(self._profile_package)", "def loadCode(self,code,startNum):\r\n\t\tself.frame.loadCode(code,startNum)", "def profile(func, *args, **kwargs):\n\n import cProfile as profile\n\n filename = 'Reynir.profile'\n\n pr = profile.Profile()\n result = pr.runcall(func, *args, **kwargs)\n pr.dump_stats(filename)\n\n return result", "def exec_no_output(self, code):\n self.__exec_part_1(code)", "def code(self, code: str):\n\n self._code = code", "def compile_code(name, code, context=None):\n if context is None:\n context = {} # pragma: no cover\n try:\n obj = compile(code, \"\", \"exec\")\n except SyntaxError as e: # pragma: no cover\n raise SyntaxError(f\"Unable to compile\\n{code}\") from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l) # pylint: disable=W0122\n return context_l[name]", "def patch_code(code, trace_dataflow, nested=False):\n global current_index, current_pc\n\n old_index = current_index\n\n inst = Instrumentor(code, current_index, current_pc)\n\n # If this code object has already been instrumented, skip it\n if \"__ATHERIS_INSTRUMENTED__\" in inst.consts:\n return code\n\n inst.trace_control_flow()\n\n if trace_dataflow:\n inst.trace_data_flow()\n\n current_index += inst.num_counters\n current_pc += inst.num_pcs\n\n # Repeat this for all nested code objects\n for i in range(len(inst.consts)):\n if isinstance(inst.consts[i], types.CodeType):\n if (inst.consts[i].co_name\n in [\"<lambda>\", \"<module>\" if not nested else None] or\n inst.consts[i].co_name[0] != \"<\" or\n inst.consts[i].co_name[-1] != \">\"):\n inst.consts[i] = patch_code(inst.consts[i], trace_dataflow, nested=True)\n\n if not nested:\n _reserve_counters(current_index - old_index)\n\n return inst.to_code()", "def print_code(func):\n print(inspect.getsource(func))", "def run_with_timeout(code, time, globals=None):\r\n # Set the signal handler and a ``time``-second alarm\r\n signal.signal(signal.SIGALRM, lambda s, f: timeout(s, f, time))\r\n if sys.version_info > (2, 5):\r\n signal.setitimer(signal.ITIMER_REAL, time)\r\n else:\r\n # The above only exists in Python 2.6+\r\n # Otherwise, we have to use this, which only supports integer arguments\r\n # Use math.ceil to round a float up.\r\n time = int(math.ceil(time))\r\n signal.alarm(time)\r\n r = eval(code, globals)\r\n signal.alarm(0) # Disable the alarm\r\n return r", "def _putCode(self, code):\n assert(type(code) == int)\n self.code[self.codeptr] = code\n self.codeptr += 1", "def runIntcode(program):\n\n pc = 0\n\n while program[pc] != 99:\n command = program[pc]\n reg1 = program[program[pc + 1]]\n reg2 = program[program[pc + 2]]\n dest = program[pc + 3]\n\n if command == 1:\n print (pc, \" (add) \", reg1, \" \", reg2, \" -> \", dest)\n program[dest] = reg1 + reg2\n\n if command == 2:\n print (pc, \" (mul) \", reg1, \" \", reg2, \" -> \", dest)\n program[dest] = reg1 * reg2\n\n pc = pc + 4\n\n return program", "def profile(fnc):\r\n \r\n def inner(*args, **kwargs):\r\n \r\n pr = cProfile.Profile()\r\n pr.enable()\r\n retval = fnc(*args, **kwargs)\r\n pr.disable()\r\n s = io.StringIO()\r\n sortby = 'time'\r\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\r\n ps.print_stats()\r\n print(s.getvalue())\r\n return retval\r\n\r\n return inner", "def test_profiler(self):\n cmdline = [\n \"starfish\",\n \"--profile\",\n \"noop\",\n ]\n if cmdline[0] == 'starfish':\n coverage_cmdline = [\n \"coverage\", \"run\",\n \"-p\",\n \"--source\", \"starfish\",\n \"-m\", \"starfish.starfish\",\n ]\n coverage_cmdline.extend(cmdline[1:])\n cmdline = coverage_cmdline\n env = os.environ.copy()\n env[PROFILER_NOOP_ENVVAR] = \"\"\n subprocess.check_call(cmdline, env=env)", "def run_pass(image, noStartup, encoder, blocksize, quality):\n binary = \"./bin/astcenc-%s\" % encoder\n args = [\"valgrind\", \"--tool=callgrind\", \"--callgrind-out-file=callgrind.txt\",\n binary, \"-cl\", image, \"out.astc\", blocksize, quality, \"-j\", \"1\"]\n\n result = sp.run(args, check=True, universal_newlines=True)\n\n args = [\"callgrind_annotate\", \"callgrind.txt\"]\n ret = sp.run(args, stdout=sp.PIPE, check=True, encoding=\"utf-8\")\n lines = ret.stdout.splitlines()\n with open(\"perf_%s_cga.txt\" % quality.replace(\"-\", \"\"), \"w\") as handle:\n handle.write(\"\\n\".join(lines))\n\n postprocess_cga(lines, \"perf_%s.txt\" % quality.replace(\"-\", \"\"))\n\n if noStartup:\n args = [\"gprof2dot\", \"--format=callgrind\", \"--output=out.dot\", \"callgrind.txt\",\n \"-s\", \"-z\", \"compress_block(astcenc_contexti const&, image_block const&, physical_compressed_block&, compression_working_buffers&)\"]\n else:\n args = [\"gprof2dot\", \"--format=callgrind\", \"--output=out.dot\", \"callgrind.txt\",\n \"-s\", \"-z\", \"main\"]\n\n result = sp.run(args, check=True, universal_newlines=True)\n\n args = [\"dot\", \"-Tpng\", \"out.dot\", \"-o\", \"perf_%s.png\" % quality.replace(\"-\", \"\")]\n result = sp.run(args, check=True, universal_newlines=True)\n\n os.remove(\"out.astc\")\n os.remove(\"out.dot\")\n os.remove(\"callgrind.txt\")", "def code(self, code: int):\n\n self._code = code", "def code(bot, msg, language, _, code):\n uri = 'https://eval.in/'\n data = {\n \"utf8\": \"\\xce\\xbb\",\n \"execute\": \"on\",\n \"private\": \"on\",\n \"lang\": supported_languages[language],\n \"input\": \"\",\n \"code\": util.flatten_incoming_text(bot, code).encode('utf-8'),\n }\n response = requests.post(uri, data)\n bot.debug(response.url)\n _, html = response.content.split(\"<h2>Program Output</h2>\", 1)\n html = html.lstrip()\n html = html[5: html.index(\"</pre>\")]\n output = util.unescape(html).rstrip().decode('utf-8')\n if output:\n try:\n bot.reply(u\"```{}```\".format(output))\n except exception.MessageTooLongException:\n bot.reply(response.url)\n else:\n bot.reply(\"No output...\")", "def tailcall(code, _env=None):\n nonlocal loopt, tree, env\n if _env:\n env = _env\n tree = code\n loopt += 1" ]
[ "0.6385464", "0.62743026", "0.6012042", "0.5954541", "0.5947372", "0.5926711", "0.5884469", "0.5836414", "0.5775137", "0.5737329", "0.5666656", "0.5654356", "0.55813533", "0.55803514", "0.555477", "0.55139744", "0.5504729", "0.5470353", "0.5440272", "0.5439059", "0.5430186", "0.54072833", "0.540433", "0.5382272", "0.53556293", "0.53267586", "0.5325591", "0.5316247", "0.53047544", "0.5296031", "0.5272671", "0.5266112", "0.52616453", "0.5254145", "0.5225633", "0.51826966", "0.5167336", "0.5162014", "0.5161746", "0.51564884", "0.51343673", "0.51225346", "0.51225346", "0.51225346", "0.5117957", "0.51135844", "0.50933886", "0.50933886", "0.50933886", "0.50742817", "0.5062799", "0.5046566", "0.5044751", "0.50390583", "0.5033354", "0.5032818", "0.5020501", "0.501094", "0.50015736", "0.49982378", "0.49966344", "0.49897134", "0.49881282", "0.4986118", "0.498124", "0.49711424", "0.49651375", "0.49630406", "0.4962824", "0.4959129", "0.49584624", "0.49574506", "0.49543282", "0.49479815", "0.49291396", "0.49255142", "0.49248573", "0.4919935", "0.49169785", "0.49070635", "0.49009433", "0.48978367", "0.4894831", "0.48877195", "0.4881506", "0.4871954", "0.4870939", "0.4868116", "0.48657456", "0.4853788", "0.48459315", "0.48317677", "0.48101997", "0.4782301", "0.47638142", "0.4756583", "0.47551742", "0.4753839", "0.47442183", "0.4739351" ]
0.7272947
0
read feature file, find out mass shift then correct
def feature_file_mass_correction(feature_filename: str): output_feature_filename = feature_filename + '.mass_corrected' ppm_shift = [] with open(feature_filename, 'r') as f: reader = csv.reader(f, delimiter=',') header = next(reader) seq_index = header.index("seq") mz_index = header.index("m/z") z_index = header.index("z") for line in reader: mz = float(line[mz_index]) z = float(line[z_index]) observed_mass = mz * z - z * config.mass_H if not line[seq_index]: continue okay, peptide = parse_raw_sequence(line[seq_index]) if not okay: # unknown mods continue theoretical_mass = compute_neutral_peptide_mass(peptide) ppm = (observed_mass - theoretical_mass) / theoretical_mass * 1e6 ppm_shift.append(ppm) if len(ppm_shift) < 100: raise ValueError("too less identified feature for mass correction") ppm_shift = np.median(ppm_shift) print(f"ppm shift: {ppm_shift}") with open(feature_filename, 'r') as fr: with open(output_feature_filename, 'w') as fw: reader = csv.reader(fr, delimiter=',') writer = csv.writer(fw, delimiter=',') writer.writerow(next(reader)) for line in reader: mz = float(line[mz_index]) mz = mz * (1 - ppm_shift * 1e-6) line[mz_index] = "{}".format(mz) writer.writerow(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_msp(infile_name,feat_lim_file=\"\",\n\t\t\t sum_feats=False,selected_features=[],\n\t\t\t max_dist=275,step_size=0.005,feat_bins=[],\n\t\t\t top_peaks=50,windowed_mode=False):\n\n\tinfile = open(infile_name)\n\n\tif len(feat_lim_file) > 0:\n\t\tselected_features = [float(f.strip()) for f in open(feat_lim_file).readlines()]\n\t\t\n\tcounter = 0\n\ttemp_entry = []\n\tinstance_names = []\n\tnum_instances = num_instances_msp(infile_name)\n\t#print(num_instances)\n\n\tif len(feat_bins) == 0: feat_bins = np.arange(0,max_dist+step_size,step_size)\n\t\n\t#Initialize the feature matrix, must be lil since scr is slow when mutating values!\n\tfeat_matrix = scipy.sparse.lil_matrix((num_instances, len(feat_bins)),dtype=np.float32)\n\t\n\t#Iterate over the file and filter out single entries\n\tfor line in infile:\n\t\tif line.startswith(\"Name: \"):\n\t\t\tif len(temp_entry) == 0:\n\t\t\t\ttemp_entry.append(line.strip())\n\t\t\t\tcontinue\n\t\t\t#For this entry get identifier,m/z,intensities\n\t\t\tidentifier,mz_list,intensity_list = parse_msp(temp_entry,top=top_peaks,windowed_mode=windowed_mode)\n\t\t\tinstance_names.append(identifier)\n\t\t\t#Fill in the feature matrix\n\t\t\tfeat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features,max_dist=max_dist)\n\t\t\t\n\t\t\t#Make sure the current line is still used for the next entry\n\t\t\ttemp_entry = [line]\n\t\t\t\n\t\t\t#print(counter)\n\t\t\tcounter += 1\n\t\t\t\n\t\ttemp_entry.append(line.strip())\n\t\n\t#If everything is empty; return\n\tif len(temp_entry) == 0:\n\t\ttemp_entry.append(line.strip())\n\t\treturn(feat_matrix.asformat(\"csr\"),feat_bins,instance_names,counter)\n\n\t#Analyse the last record; since we do not know when the spectra ends\n\tidentifier,mz_list,intensity_list = parse_msp(temp_entry,top=top_peaks,windowed_mode=windowed_mode)\n\tinstance_names.append(identifier)\n\tfeat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features)\n\t\n\t#print(counter)\n\tcounter += 1\n\t\n\treturn(feat_matrix.asformat(\"csr\"),feat_bins,instance_names,counter)", "def read_mgf(infile_name,feat_lim_file=\"\",\n\t\t\t sum_feats=False,selected_features=[],\n\t\t\t max_dist=275,step_size=0.005,feat_bins=[],\n\t\t\t top_peaks=50):\t\t \n\t\n\tinfile = open(infile_name)\n\t\n\tif len(feat_lim_file) > 0:\n\t\tselected_features = [float(f.strip()) for f in open(\"selected_features.txt\").readlines()]\n\t\t\n\tcounter = 0\n\ttemp_entry = []\n\tinstance_names = []\n\tnum_instances = num_instances_mgf(infile_name)\n\t#print(num_instances)\n\n\tif len(feat_bins) == 0: feat_bins = np.arange(0,max_dist+step_size,step_size)\n\t\n\t#Initialize the feature matrix, must be lil since scr is slow when mutating values!\n\tfeat_matrix = scipy.sparse.lil_matrix((num_instances, len(feat_bins)),dtype=np.float32)\n\t\n\t#Iterate over the file and filter out single entries\n\tfor line in infile:\n\t\tif line.startswith(\"END IONS\"):\n\t\t\t#For this entry get identifier,m/z,intensities\n\t\t\tidentifier,mz_list,intensity_list = parse_mgf(temp_entry,top=top_peaks,windowed_mode=windowed_mode)\n\t\t\tinstance_names.append(identifier)\n\t\t\t#Fill in the feature matrix\n\t\t\tfeat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features,max_dist=max_dist)\n\t\t\tcounter += 1\n\t\t\t#print(counter)\n\t\t\ttemp_entry = []\n\t\t\tcontinue\n\t\tif line.startswith(\"BEGIN IONS\"):\n\t\t\tcontinue\n\t\ttemp_entry.append(line)\n\n\treturn(feat_matrix.asformat(\"csr\"),feat_bins,instance_names,counter)", "def read_szf_fmv_12(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"land_frac\", uint_nan),\n (\"flagfield_gen2\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags(data)\n\n return data, metadata", "def readMaf( options, data ):\n regex = 's\\s+([\\w\\d\\-]+?)\\.([\\w\\d\\.\\+\\-]+?)\\s+(\\d+)\\s+(\\d+)\\s+([-+])\\s+(\\d+)\\s+([\\-actgurykmswbdhvnACTGURYKMSWBDHVN]+)'\n pat = re.compile( regex )\n mf = open( options.maf )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n for line in mf:\n if line.startswith('#HPL'):\n d = line.split(' ')\n # example line: \"#HPL=12049 5=1 3=1 SPL=123412 S5=0 S3=12\"\n # there will be one hpl line per options.other line\n # in blocks that contain the options.ref\n hpl = int( d[0][5:] ) # comment at start of this field\n hFive = int( d[1][2] )\n hThree = int( d[2][2] )\n spl = int( d[3][4:] ) # no comment at start of this field\n hplList.append( { 'hpl': hpl, 'hFive': hFive, \n 'hThree': hThree, 'spl': spl } )\n continue\n if line.startswith('s'):\n line = line.strip()\n ml, order = extractMafLine( line, order, pat, options, data )\n if ml is None:\n sys.stderr.write( 'regexp fail on file %s line: \\'%s\\'\\n'\n 'Regex: \\'%s\\'\\n' % ( options.maf, line, regex ) )\n sys.exit( 1 )\n if ml == 'notOurGenome':\n continue\n if ml.length != len( ml.sequence ):\n sys.stderr.write( 'Error while working on file %s :\\n '\n 'printed sequence length (%d) not equal to actual sequence '\n 'length (%d) ref genome:%s other genome:%s line below:\\n%s\\n' % \n ( options.maf, ml.length, len( ml.sequence ), options.ref, options.other, line ) )\n sys.exit( 1 )\n mafLineList.append( ml )\n else:\n # end of the block\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )", "def get_efermi(fn):\n try:\n f = open(fn)\n except:\n return 0\n line = f.readline()\n f.close()\n ef = float(line.split()[6])\n print('Calculated Fermi level: {0}'.format(ef))\n return ef", "def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata", "def read_smx_fmv_12(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = eps_file.mdr_counter * n_node_per_line\n idx_nodes = np.arange(eps_file.mdr_counter).repeat(n_node_per_line)\n\n data = {}\n metadata = {}\n\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n fields = [\"sat_track_azi\", \"abs_line_number\"]\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan, long_nan),\n (\"latitude\", long_nan, long_nan),\n (\"swath_indicator\", byte_nan, byte_nan),\n (\"soil_moisture\", uint_nan, uint_nan),\n (\"soil_moisture_error\", uint_nan, uint_nan),\n (\"sigma40\", long_nan, long_nan),\n (\"sigma40_error\", long_nan, long_nan),\n (\"slope40\", long_nan, long_nan),\n (\"slope40_error\", long_nan, long_nan),\n (\"dry_backscatter\", long_nan, long_nan),\n (\"wet_backscatter\", long_nan, long_nan),\n (\"mean_surf_soil_moisture\", uint_nan, uint_nan),\n (\"soil_moisture_sensetivity\", ulong_nan, float32_nan),\n (\"correction_flags\", uint8_nan, uint8_nan),\n (\"processing_flags\", uint8_nan, uint8_nan),\n (\"aggregated_quality_flag\", uint8_nan, uint8_nan),\n (\"snow_cover_probability\", uint8_nan, uint8_nan),\n (\"frozen_soil_probability\", uint8_nan, uint8_nan),\n (\"innudation_or_wetland\", uint8_nan, uint8_nan),\n (\"topographical_complexity\", uint8_nan, uint8_nan)]\n\n for f, nan_val, new_nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = new_nan_val\n\n # sat_track_azi (uint)\n data[\"as_des_pass\"] = \\\n np.array(raw_data[\"SAT_TRACK_AZI\"].flatten()[idx_nodes] < 270)\n\n # modify longitudes from [0,360] to [-180,180]\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n fields = [\"param_db_version\", \"warp_nrt_version\"]\n for f in fields:\n data[f] = raw_data[\"PARAM_DB_VERSION\"].flatten()[idx_nodes]\n\n metadata[\"spacecraft_id\"] = int(eps_file.mphr[\"SPACECRAFT_ID\"][2])\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1), n_lines)\n\n data[\"line_num\"] = idx_nodes\n\n return data, metadata", "def test_read_0_1_smirff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirff99Frosst_reference_0_1_spec.offxml\"\n )\n )", "def read(self,isOutputFile = False, headerCols = None, verbose = 0):\n \n #\n # TODO TODO also need a 'readFinal' one to read the FINAL information!!\n # set a flag in MonteFormat.py to select which cs info to read...\n\n if verbose == 1:\n print \"Reading %s chemical shift list %s\" % (self.format,self.name)\n\n fin = open(self.name, 'rU')\n\n line = fin.readline()\n \n spinSystemId = 0\n resLabel = oldResLabel = None\n\n while line:\n\n if self.patt['%sComment' % self.format].search(line):\n\n if not isOutputFile and not self.chemShifts and not headerCols:\n\n #\n # Get atom info from first line...\n #\n \n headerCols = line.split()\n headerCols.pop(0)\n\n line = fin.readline()\n continue\n\n if self.patt['emptyline'].search(line):\n line = fin.readline()\n continue\n \n #\n # Make sure header info is available - otherwise no point\n #\n \n if not headerCols:\n raise \"Error: no header column information available. Try reading .par file!\"\n return\n \n #\n # Get the info... should really come for .par file!!\n #\n \n cols = line.split()\n \n infoCode = None\n \n if not isOutputFile:\n \n stripId = returnFloat(cols.pop(0))\n\n #\n # NOt necessarily info string available...\n #\n\n if self.patt['onlyFloat'].search(cols[0]):\n seqCode = None\n resLabel = None\n\n else:\n assignment = cols.pop(0)\n\n searchAssignment = self.patt['%sAssignment' % self.format].search(assignment)\n\n resLabel = searchAssignment.group(1)\n seqCode = searchAssignment.group(2)\n \n else:\n \n seqCode = cols.pop(0)\n if seqCode[-1] in '+':\n seqCode = seqCode[:-1]\n infoCode = seqCode[-1]\n \n oldResLabel = resLabel\n resLabel = cols.pop(0)\n stripId = returnFloat(cols.pop(0))\n voidCol = cols.pop(0)\n \n #\n # Set up info for atoms...\n #\n \n if not seqCode or seqCode == '?':\n seqCode = None\n spinSystemId = spinSystemId + 2\n else:\n seqCode = returnInt(seqCode)\n\n if len(cols) == 1:\n cols = cols.split(',')\n\n values = returnFloats(cols)\n\n for i in range(0,len(values)):\n atomId = headerCols[i]\n value = values[i]\n \n if value == 0.0:\n continue\n \n atomSearch = self.patt['%sAtomInfo' % self.format].search(atomId)\n \n atomName = atomSearch.group(1)\n atomPlace = atomSearch.group(2)\n \n if atomName == 'HA1':\n nextAtomValue = values[i+1]\n if nextAtomValue == 0.00:\n atomName = 'HA'\n \n curSeqCode = seqCode\n curResLabel = None\n \n if seqCode == None:\n curSpinSystemId = spinSystemId\n prevSpinSystemId = spinSystemId - 1\n else:\n curSpinSystemId = None\n prevSpinSystemId = None\n \n if atomPlace == '(i-1)' or atomPlace == '-1':\n\n if seqCode != None:\n curSeqCode = seqCode - 1\n else:\n curSpinSystemId = spinSystemId - 1\n prevSpinSystemId = None\n \n if not isOutputFile:\n curResLabel = resLabel\n else:\n curResLabel = oldResLabel\n \n elif isOutputFile:\n curResLabel = resLabel\n\n self.chemShifts.append(MonteChemShift(value,atomName,curSeqCode,curSpinSystemId,stripId,curResLabel,self.defaultMolCode, infoCode = infoCode, prevSpinSystemId = prevSpinSystemId))\n\n line = fin.readline()\n\n fin.close()", "def readFT(self,file=\"out__1.ft\"):", "def spot1d_rsa(infile, sequence):\n data = np.loadtxt(infile, usecols=4, skiprows=1).reshape((1, -1, 1))\n for i in range(len(sequence)):\n data[0, i, 0] /= max_solvent_acc[sequence[i].upper()]\n\n return data", "def read(self) :\n # Open the file.\n f = open(self.output, 'r')\n lines = f.readlines()\n \n # Find the eigenvalue.\n count = 0\n while True :\n words = lines[count].split()\n if len(words) == 5 :\n if words[0] == \"*\" and words[1] == \"K-EFF\":\n self.keff = float(words[3])\n break\n count += 1\n \n # Find the peaking.\n a = 0 # Assembly index\n \n while True :\n words = lines[count].split()\n if len(words) == 8 :\n if words[0] == \"NODE\" and words[1] == \"AVERAGE\" and words[2] == \"POWERS\" :\n count += 5 # Powers start 5 lines below title\n for row in range(0, self.dimension) :\n words = lines[count].split()\n assert(len(words) >= self.dimension)\n for col in range(0, self.dimension) :\n self.peaking_map[row, col] = float(words[col+1])\n if self.core.stencil[row, col] > 0:\n #print \" a=\", a, \" row=\", row, \" col=\", col, len(self.peaking)\n self.peaking[a] = self.peaking_map[row, col]\n a += 1\n count += 1\n break\n count += 1 \n # Maximum peaking.\n self.maxpeak = np.max(self.peaking)", "def extract_maf(n):\n data = (line.split() for line in sys.stdin)\n next(data)\n for row in data:\n if row[4] == 'SNP' and float(row[-1]) > 0.01:\n row[1] = int(row[1])\n row[-1] = int(float(row[-1]) * 33)\n print(get_pouyak_name(chromosome(n), *row[:4]), row[-1])", "def profbval_strict(infile, sequence):\n result = np.zeros((1, len(sequence), 1))\n with open(infile, \"r\") as fh:\n it = 0\n for line in fh:\n if not line.startswith(\"number\"):\n pred_str = line.strip().split()[5]\n if pred_str == \"F\":\n result[0, it, 0] = 1\n it += 1\n\n return result", "def test_fake_file_xmm(self):\n fits_file = os.path.join(self.datadir, 'monol_test_fake_lc_xmm.evt')\n hen.fake.main(['--deadtime', '1e-4', '-m', 'XMM', '-i', 'epn',\n '--ctrate', '2000',\n '-o', fits_file])\n hdu_list = fits.open(fits_file)\n hdunames = [hdu.name for hdu in hdu_list]\n assert 'STDGTI01' in hdunames\n assert 'STDGTI02' in hdunames\n assert 'STDGTI07' in hdunames", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def read_model_shiftby_performances(lines):\n performances = {}\n patients = [str(x) for x in range(13)]\n # current_model = ''\n # shifts = [-100, -75, -50, -25, 25, 50, 100, 125, 150, 175, 200, 225, 250]\n for i, line in enumerate(lines):\n words = line.split(' ')\n if (len(words) == 10) and (words[0] == 'starting'):\n if 'vel' in words[-1][:-1]:\n variable = 'vel'\n else:\n variable = 'absVel'\n col_name = float(words[1][:-1])\n if (len(words) == 2) and (words[0] in patients):\n # shift_words = lines[i+1].replace(':', '').split(' ')\n # assert shift_words[0] == 'shift'\n # col_name = f'{variable}_' + '_'.join(shift_words)[:-1]\n # col_name = float(words[1][:-1])\n if col_name not in performances.keys():\n performances[col_name] = [float(words[1][:-1])]\n else:\n performances[col_name].append(float(words[1][:-1]))\n return performances", "def readDriverFile(self, input_file):\n\n\n fid = open(self.basePath + input_file,'r')\n\n # Line 1\n line = fid.readline()\n l_input = line.split('!')\n mshfile = l_input[0].rstrip()\n\n # Line 2\n line = fid.readline()\n l_input = line.split('!')\n obsfile = l_input[0].rstrip()\n\n # Line 3\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='null':\n topofile = []\n\n else:\n topofile = l_input[0].rstrip()\n\n\n # Line 4\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mstart = float(l_input[1])\n\n else:\n mstart = l_input[0].rstrip()\n\n # Line 5\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mref = float(l_input[1])\n\n else:\n mref = l_input[0].rstrip()\n\n # Line 6\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n staticInput = float(l_input[1])\n\n elif l_input[0]=='DEFAULT':\n staticInput = None\n\n else:\n staticInput = l_input[0].rstrip()\n\n\n # Line 7\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n magfile = []\n\n else:\n magfile = l_input[0].rstrip()\n\n # Line 8\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n wgtfile = []\n\n else:\n wgtfile = l_input[0].rstrip()\n\n # Line 9\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n chi = float(l_input[0])\n\n # Line 10\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n val = np.array(l_input[0:4])\n alphas = val.astype(np.float)\n\n # Line 11\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n bounds = val.astype(np.float)\n\n else:\n bounds = l_input[0].rstrip()\n\n # Line 12\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:6])\n lpnorms = val.astype(np.float)\n\n else:\n lpnorms = l_input[0].rstrip()\n\n # Line 13\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n eps = val.astype(np.float)\n\n else:\n eps = [None,None]\n\n self.mshfile = mshfile\n self.obsfile = obsfile\n self.topofile = topofile\n self.mstart = mstart\n self._mrefInput = mref\n self._staticInput = staticInput\n self.magfile = magfile\n self.wgtfile = wgtfile\n self.chi = chi\n self.alphas = alphas\n self.bounds = bounds\n self.lpnorms = lpnorms\n self.eps = eps", "def get_file_format(file):\n flag = None\n with open(file) as f:\n for line in f.readlines():\n MAT, MF, MT = read_control(line)[:3]\n if MF == 1 and MT == 451:\n i = 0\n C, i = read_cont([line], i)\n flag = C.N1\n break\n if flag is None:\n ftype = None\n elif flag == -11 or flag == -12:\n ftype = \"errorr\"\n elif flag == -1:\n ftype = \"gendf\"\n else:\n if C.L1 == 2:\n ftype = \"pendf\"\n else:\n ftype = \"endf6\"\n return ftype", "def read_forces(self, fname):\n outfile = open(fname)\n lines = outfile.readlines()\n outfile.close()\n nats = len(self.atoms)\n forces = np.zeros((nats, 3), float)\n infinite_force=\"*****\"\n if 'mozyme' in self.str_params['job_type'].lower():\n for i, line in enumerate(lines):\n if line.find('FINAL POINT AND DERIVATIVES') != -1:\n for j in range(nats):\n gline = lines[i + j + 5]\n pre_force=gline[8:35]\n if(infinite_force in pre_force):\n forces[j] = [999999999.9999,999999999.9999,999999999.9999]\n else:\n forces[j] = [float( pre_force[0:9].strip()),float( pre_force[9:18].strip()),float( pre_force[18:27].strip())]\n else:\n for i, line in enumerate(lines):\n if line.find('GRADIENT\\n') != -1:\n for j in range(nats * 3):\n gline = lines[i + j + 1]\n pre_force=gline[49:62]\n if(infinite_force in pre_force):\n forces[int(j/3), int(j%3)] =999999999.9999\n else:\n forces[int(j/3), int(j%3)] = float(pre_force)\n break\n#do not change unit for mopac\n forces *= - (kcal / mol)\n return forces", "def get_training(feature_path): \n features = np.loadtxt(feature_path)\n feature_size = features.shape[1] -1 \n features_in = features[:,0:feature_size]\n features_out = features[:,-1]\n #features_out = np.array(map(lambda x: x if x else 0, features_out_unnorm))\n return features_in, features_out", "def convert_matrix(infile, names,refdict,nosamples):\n \n if infile.endswith(\".gz\"):\n inf = gzip.open(infile, \"rb\")\n \n else:\n inf = open(infile, \"r\")\n for line in inf:\n line = line.rsplit()\n if line[0] == \"chromosome\":\n pass # header\n else:\n \n\n chrom = line[0]\n start = line[1]\n stop = line[2]\n TE = line[4]\n n_te = str(len(TE.split(\",\")))\n tes=TE.split(\",\")\n tefam=[]\n tesuperfamily=[]\n \n \n for i in xrange(len(tes)):\n \n tefam.append(refdict[tes[i]][0])\n \n tesuperfamily.append(refdict[tes[i]][1])\n \n \n superfamily=list(set(tesuperfamily))\n if 'Unknown' in superfamily:\n superfamily.remove('Unknown')\n if not superfamily:\n superfamily.append('Unknown')\n \n pos = line[5].split(\",\")\n neg = line[6].split(\",\")\n#missing = 305-(len(pos)+len(neg))/305\n te_id = \"\\t\".join([chrom, start, stop])\n status = get_status(pos, neg, names)\n column_ordered = []\n for i in names:\n column_ordered.append(status[i])\n noNA = filter(lambda x: x != \"NA\", status.values()) \n noNA = map(int, noNA)\n pos_count = sum(noNA)\n l = len(noNA)\n neg_count = l - pos_count\n TE_present=pos_count\n TE_absent=neg_count\n if(pos_count < neg_count):\n Minor_allele=\"presence\"\n\n else:\n Minor_allele=\"absence\"\n#print Minor_allele\n q20=int(0.2*nosamples)\n q80=int(0.8*nosamples)\n if (TE_absent < q20):\n Absence_classification=\"True deletion\"\n elif (TE_absent > q80):\n Absence_classification=\"No insertion\"\n else:\n Absence_classification=\"NA\"\n original_call_deletion = 'T'\n MAF=float(min(TE_present, TE_absent))/nosamples\n #print int(min(TE_present, TE_absent)) ,MAF\n if(MAF < 0.025):\n Frequency_classification = \"Rare\"\n else:Frequency_classification =\"Common\"\n print(te_id + \"\\t\" + TE + \"\\t\" + \",\".join(tefam) + \"\\t\" +\",\".join(superfamily) + \"\\t\" +n_te + \"\\t\" + str(pos_count) + \"\\t\" + str(neg_count) + \"\\t\" +str(Minor_allele) + \"\\t\" +original_call_deletion + \"\\t\" +str(Absence_classification) + \"\\t\" +str(MAF) + \"\\t\" +str(Frequency_classification) + \"\\t\"+\"\\t\".join(column_ordered))\n inf.close()", "def sgd_features(filepath=None):\n\n if filepath == None:\n filepath=load_sgd_tab()\n\n arabic_to_roman_dict=chromosomename_roman_to_arabic()[0]\n \n with open(filepath) as f:\n lines = f.readlines()\n\n\n feature_list = []\n feature_orf_dict = {}\n feature_ars_dict = {}\n feature_telomere_dict = {}\n feature_ltr_dict = {}\n feature_centromere_dict = {}\n feature_Xelement_dict = {}\n feature_intron_dict = {}\n feature_ncrna_dict = {}\n feature_ncexon_dict = {}\n feature_trna_dict = {}\n feature_snorna_dict = {}\n feature_teg_dict = {}\n feature_5p_utrintron_dict = {}\n feature_mas_dict = {}\n feature_snrna_dict = {}\n feature_rrna_dict = {}\n feature_ets_dict = {}\n feature_its_dict = {}\n feature_oor_dict = {}\n feature_telrna_dict = {}\n \n for line in lines:\n l = line.strip('\\n').split('\\t')\n if not l[1] in feature_list:\n feature_list.append(l[1])\n\n if not l[8].endswith('micron') and not l[8] == '':\n chromosome = arabic_to_roman_dict.get(int(l[8]))\n if l[1] == 'ORF':\n feature_orf_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ARS':\n feature_ars_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomere':\n feature_telomere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'long_terminal_repeat':\n feature_ltr_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'centromere':\n feature_centromere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'X_element':\n feature_Xelement_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'intron':\n feature_intron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ncRNA_gene':\n feature_ncrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'noncoding_exon':\n feature_ncexon_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'tRNA_gene':\n feature_trna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snoRNA_gene':\n feature_snorna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'transposable_element_gene':\n feature_teg_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'five_prime_UTR_intron':\n feature_5p_utrintron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'matrix_attachment_site':\n feature_mas_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snRNA_gene':\n feature_snrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'rRNA_gene':\n feature_rrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'external_transcribed_spacer_region':\n feature_ets_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'internal_transcribed_spacer_region':\n feature_its_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'origin_of_replication':\n feature_oor_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomerase_RNA_gene':\n feature_telrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n\n\n \n\n\n genomicregions_list = ['ORF', 'ARS', 'Telomere', 'long_terminal_repeat',\n 'Centromere', 'X_element', 'Intron', 'ncRNA_gene',\n 'Noncoding_exon', 'tRNA_gene', 'snoRNA_gene',\n 'transposable_element_gene', 'five_prime_UTR_intron',\n 'matrix_attachment_site', 'snRNA_gene', 'rRNA_gene',\n 'external_transcribed_spacer_region',\n 'internal_transcribed_spacer_region',\n 'origin_of_replication', 'telomerase_RNA_gene']\n\n\n return(genomicregions_list, feature_orf_dict, feature_ars_dict, feature_telomere_dict,\n feature_ltr_dict, feature_centromere_dict, feature_Xelement_dict, feature_intron_dict,\n feature_ncrna_dict, feature_ncexon_dict, feature_trna_dict,\n feature_snorna_dict, feature_teg_dict, feature_5p_utrintron_dict,\n feature_mas_dict, feature_snrna_dict, feature_rrna_dict,\n feature_ets_dict, feature_its_dict, feature_oor_dict,\n feature_telrna_dict)", "def read_file_agsm(self,filename):\n\n narr,larr,farr,iarr,nn,exceed_freqlim = \\\n aims_fortran.read_file_agsm(filename,config.npositive,config.agsm_cutoff, \\\n config.cutoff*self.cutoff)\n self.modes = np.array(zip(narr[0:nn],larr[0:nn],farr[0:nn],iarr[0:nn]),dtype=modetype)\n\n return exceed_freqlim", "def post_process(self, filename):\n title = self.title\n\n outfile = open(filename, 'r')\n data = outfile.readlines()\n\n name = data[0].strip()\n mode = data[1].strip()\n ops = data[2].strip().split(',')\n nl = 'True' in ops[0]\n ln = 'True' in ops[1]\n drv = 'True' in ops[2]\n\n data = data[3:]\n npt = len(data)\n\n t1u = np.empty((npt, ))\n t3u = np.empty((npt, ))\n t5u = np.empty((npt, ))\n flag = np.empty((npt, ), dtype=np.bool)\n x_dv = np.empty((npt, ))\n x_state = np.empty((npt, ))\n x_proc = np.empty((npt, ))\n\n for j, line in enumerate(data):\n x_dv[j], x_state[j], x_proc[j], flag[j], t1u[j], t3u[j], t5u[j] = line.strip().split(',')\n\n if np.any(flag):\n use_flag = True\n else:\n use_flag = False\n\n # Times are all normalized.\n t1 = t1u/t1u[0]\n t3 = t3u/t3u[0]\n t5 = t5u/t5u[0]\n\n if mode == 'state':\n x = x_state\n xlab = \"Number of states.\"\n elif mode == 'desvar':\n xlab = \"Number of design vars.\"\n x = x_dv\n elif mode == 'proc':\n x = x_proc\n xlab = \"Number of processors.\"\n\n if use_flag:\n\n flagtxt = self.flagtxt\n\n # Split them up. We know the pattern.\n t1F = t1[0::2]\n t1T = t1[1::2]\n t3F = t3[0::2]\n t3T = t3[1::2]\n t5F = t5[0::2]\n t5T = t5[1::2]\n\n xT = x[0::2]\n xF = x[1::2]\n\n # Generate plots\n\n if nl:\n plt.figure(1)\n plt.loglog(xF, t1F, 'bo-')\n plt.loglog(xT, t1T, 'ro-')\n\n plt.xlabel(xlab)\n plt.ylabel('Nonlinear Solve: Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.legend(['Default', flagtxt], loc=0)\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'nl'))\n\n if ln:\n plt.figure(2)\n plt.loglog(xF, t3F, 'o-')\n plt.loglog(xT, t3T, 'ro-')\n\n plt.xlabel(xlab)\n plt.ylabel('Compute Totals: Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.legend(['Default', flagtxt], loc=0)\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'ln'))\n\n if drv:\n plt.figure(3)\n plt.loglog(xF, t5F, 'o-')\n plt.loglog(xT, t5T, 'ro-')\n\n plt.xlabel(xlab)\n plt.ylabel(self.title_driver + ': Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.legend(['Default', flagtxt], loc=0)\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'drv'))\n\n if self.special_plot_driver_on_linear:\n\n # Plot whatever driver does (e.g., coloring) on the same axis and normalization as linear time.\n t5 = t5u/t3u[0]\n t5F = t5[0::2]\n t5T = t5[1::2]\n\n plt.figure(4)\n plt.loglog(xF, t3F, 'o-')\n plt.loglog(xT, t3T, 'ro-')\n plt.loglog(xT, t5T, 'mo-')\n\n plt.xlabel(xlab)\n plt.ylabel('Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.legend(['Compute Totals', 'Compute Totals: ' + flagtxt, self.title_driver], loc=0)\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'spec1'))\n\n else:\n\n # Generate plots\n\n if nl:\n plt.figure(1)\n plt.loglog(x, t1, 'o-')\n\n plt.xlabel(xlab)\n plt.ylabel('Nonlinear Solve: Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'nl'))\n\n if ln:\n plt.figure(2)\n plt.loglog(x, t3, 'o-')\n\n plt.xlabel(xlab)\n plt.ylabel('Compute Totals: Normalized Time')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.savefig(\"%s_%s_%s.png\" % (name, mode, 'ln'))\n\n # For procs, we also view the time/proc as a function of number of procs.\n if mode == 'proc':\n plt.figure(3)\n plt.loglog(x, t3/x, 'o-')\n\n plt.xlabel(xlab)\n plt.ylabel('Compute Totals: Normalized Time per Processor')\n plt.title(title)\n plt.grid(True)\n if self.equal_axis:\n plt.axis('equal')\n plt.savefig(\"%s_%s_%s_per_proc.png\" % (name, mode, 'ln'))\n\n plt.show()\n print('done')", "def main():\n dir_path='.'\n meas_file='magic_measurements.txt'\n samp_file=\"er_samples.txt\"\n out_file='magic_measurements.txt'\n if '-h' in sys.argv:\n print(main.__doc__)\n sys.exit()\n if '-WD' in sys.argv:\n ind = sys.argv.index('-WD')\n dir_path=sys.argv[ind+1]\n if '-f' in sys.argv:\n ind = sys.argv.index('-f')\n meas_file=sys.argv[ind+1]\n if '-fsa' in sys.argv:\n ind = sys.argv.index('-fsa')\n samp_file=sys.argv[ind+1]\n if '-F' in sys.argv:\n ind = sys.argv.index('-F')\n out_file=sys.argv[ind+1]\n # read in measurements file\n meas_file=dir_path+'/'+meas_file\n out_file=dir_path+'/'+out_file\n samp_file=dir_path+'/'+samp_file\n data,file_type=pmag.magic_read(meas_file)\n samps,file_type=pmag.magic_read(samp_file)\n MeasRecs=[]\n sampnames,sflag=[],0\n for rec in data:\n for samp in samps:\n if samp['er_sample_name'].lower()==rec['er_sample_name'].lower():\n if samp['er_sample_name'] not in sampnames:sampnames.append(samp['er_sample_name'].lower())\n rec['er_site_name']=samp['er_site_name']\n rec['er_location_name']=samp['er_location_name']\n MeasRecs.append(rec)\n break\n if rec['er_sample_name'].lower() not in sampnames:\n sampnames.append(rec['er_sample_name'].lower())\n sflag=1\n SampRec={}\n for key in list(samps[0].keys()):SampRec[key]=\"\"\n SampRec['er_sample_name']=rec['er_sample_name']\n SampRec['er_citation_names']=\"This study\"\n SampRec['er_site_name']='MISSING'\n SampRec['er_location_name']='MISSING'\n SampRec['sample_desription']='recorded added by update_measurements - edit as needed'\n samps.append(SampRec)\n print(rec['er_sample_name'],' missing from er_samples.txt file - edit orient.txt file and re-import')\n rec['er_site_name']='MISSING'\n rec['er_location_name']='MISSING'\n MeasRecs.append(rec)\n pmag.magic_write(out_file,MeasRecs,'magic_measurements')\n print(\"updated measurements file stored in \", out_file)\n if sflag==1:\n pmag.magic_write(samp_file,samps,'er_samples')\n print(\"updated sample file stored in \", samp_file)", "def loader(filename,wdm=0,verbose=0,kmpers=1):\n with open(filename, 'rb') as f:\n if wdm == False:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n infoBytes = f.tell()\n if verbose>2:\n print(infoBytes)\n #skip darkmatter\n #read the first dm line\n if verbose>2:\n print(f.tell())\n catd = np.fromfile(f,dtype= dmdtype, count=1) \n #get the bytes location and subtract off the bytes location after loading info to get n bytes a line for dm\n if verbose>2:\n print(f.tell())\n current = f.tell()\n dmBytes = current-infoBytes\n f.seek(dmBytes*(info['nd'][0]-1)+current)\n if verbose>2:\n print(f.tell())\n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n else:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n if verbose>2:\n print(f.tell())\n # #dark matter setup count is reading the number of ?rows? \n catd= np.fromfile(f,dmdtype, count=info['nd'][0]) \n if verbose>2:\n print(f.tell()) \n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n \n \n #convert to physical units as found in README.md\n if wdm == True:\n catd['mass']*=2.324876e9\n if kmpers == 1:\n catd['vx']*=100.\n catd['vy']*=100.\n catd['vz']*=100.\n cats['mass']*=2.324876e9\n if kmpers == 1:\n cats['vx']*=100.\n cats['vy']*=100.\n cats['vz']*=100.\n \n if wdm == True:\n return(catd,cats,info)\n else:\n return(cats,info)", "def predyflexy(infile, sequence):\n result = np.loadtxt(infile, usecols=10, skiprows=1).reshape((1, -1, 1))\n result[:, :10, 0] = 0\n result[:, -10:, 0] = 0\n return result", "def cam_read(filename):\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n assert check == TAG_FLOAT, ' cam_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)\n M = np.fromfile(f,dtype='float64',count=9).reshape((3,3))\n N = np.fromfile(f,dtype='float64',count=12).reshape((3,4))\n return M,N", "def read_input_pizza(filename):\n lines = open(filename).readlines()\n M, N = [int(val) for val in lines[0].split()]\n available = np.array([int(n) for n in lines[1].split()])\n return M, N, available", "def bg_ex(self, f):\n dat = sif.readSIF(os.path.join(self.path, f))[0][0]\n return dat - np.min(dat[:511, :511])", "def read_pendf_xs(file,start,finish):\n with open(file) as f:\n e = []\n cs = []\n\n break_outer = False\n\n for i,line in enumerate(f):\n # -------------------------------\n # Stop the loop once finish is reached\n # -------------------------------\n if i == finish:\n break\n if i >= start-1:\n \t# -------------------------------\n \t# Only include first 66 columns, split on space\n \t# and convert to an array of strings\n \t# -------------------------------\n word_len = 11\n word_start = 0\n for j in range(6):\n word = line[word_start:word_start+11]\n\n if( j%2 == 0 ):\n # -------------------------------\n # Grab the energies, convert to readable format\n # -------------------------------\n if( word == ' ' ):\n break_outer = True\n break # end of TAB1\n e.append(word.replace('-','e-').replace('+','e+'))\n else:\n # -------------------------------\n # Grab cross section, convert to readable format\n # -------------------------------\n if( word == ' ' ):\n break_outer = True\n break # end of TAB1\n cs.append(word.replace('-','e-').replace('+','e+'))\n word_start+=word_len\n\n if( break_outer ):\n break # end of TAB1\n \n # -------------------------------\n # Convert to floats\n # -------------------------------\n e = np.array(e).astype(float)\n cs = np.array(cs).astype(float)\n\n # -------------------------------\n # Stack them into a numpy array\n # -------------------------------\n pointwise_cs = np.array([e,cs])\n \n return pointwise_cs", "def process(path):\n # get parameter value:\n with open('config.cym', 'r') as f:\n line = f.readline()\n #print(line)\n pam = float(line[1:])\n f.close()\n # get position of aster:\n with open('aster.txt', 'r') as f:\n for line in f:\n if len(line)>3 and not line[0]=='%':\n #print(line)\n val = line.split()\n x = float(val[2])\n y = float(val[3])\n #z = float(val[4])\n #pos = math.sqrt(x*x+y*y+z*z)\n pos = math.sqrt(x*x+y*y)\n\n f.close()\n return (pam, pos)", "def read_szx_fmv_12(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"abs_line_number\"\n ]\n\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"num_val_trip\", ulong_nan), (\"f_kp\", byte_nan),\n (\"f_usable\", byte_nan), (\"f_f\", uint_nan), (\"f_v\", uint_nan),\n (\"f_oa\", uint_nan), (\"f_sa\", uint_nan), (\"f_tel\", uint_nan),\n (\"f_ref\", uint_nan), (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n data[\"swath_indicator\"] = data.pop(\"swath indicator\")\n\n return data, metadata", "def row_to_features(self, row):\n sequence = unify_sequence(row[\"Sequence\"])\n charge = field_to_float(row[\"Charge\"])\n score = field_to_bayes_float(row[self.col_for_sorting])\n calc_mz, exp_mz, calc_mass, exp_mass = get_mz_values(row)\n # calc_mz = field_to_float( row['Calc m/z'] ) # calc m/z or uCalc?\n # exp_mz = field_to_float( row['Exp m/z'] )\n\n pre_aa_field = row[\"Sequence Pre AA\"]\n post_aa_field = row[\"Sequence Post AA\"]\n all_pre_aas = set(re.split(self.delim_regex, pre_aa_field))\n all_post_aas = set(re.split(self.delim_regex, post_aa_field))\n\n if any(pre_aa not in self.tryptic_aas for pre_aa in all_pre_aas):\n enzN = 0\n else:\n enzN = 1\n\n if any(post_aa not in self.tryptic_aas for post_aa in all_post_aas):\n enzC = 0\n else:\n enzC = 1\n\n n_missed_cleavages = len(\n [aa for aa in sequence[:-1] if aa in [\"R\", \"K\"]]\n ) # / len(sequence)\n\n missed_cleavages = [0] * 6\n try:\n missed_cleavages[n_missed_cleavages] = 1\n except IndexError: # if a peptide has more than 6 missed cleavages\n missed_cleavages[-1] = 2\n\n spectrum = row[\"Spectrum Title\"].strip()\n mass = (exp_mz * charge) - (charge - 1) * PROTON\n pep_len = len(sequence)\n # delta_mz = calc_mz - exp_mz\n delta_mass = calc_mass - exp_mass\n\n peptide = (sequence, row[\"Modifications\"])\n proteins = self.parse_protein_ids(row[\"Protein ID\"])\n num_pep = self.num_pep[peptide]\n pep_charge_states = len(self.pep_charge_states[peptide])\n seq_mods = len(self.seq_mods[sequence])\n num_spec = len(self.num_spec[row[\"Spectrum Title\"]])\n num_prot = sum((len(self.num_prot[protein]) for protein in proteins))\n pep_site = sum((len(self.pep_site[protein]) for protein in proteins))\n\n user_specified_features = []\n for feat in self.used_extra_fields:\n if feat != self.col_for_sorting:\n try:\n user_specified_features.append(field_to_float(row[feat]))\n except ValueError:\n pass\n\n charges = defaultdict(int)\n for charge_n in sorted(self.pep_charge_states[peptide]):\n charges[charge_n] = 1\n\n if sequence in self.shitty_decoy_seqs:\n is_shitty = 1\n else:\n is_shitty = 0\n\n score_list = sorted(\n list(set(self.score_list_dict[spectrum])),\n reverse=self[\"bigger_scores_better\"],\n )\n\n try:\n score_list_scaled = scale_scores(score_list)\n rank = score_list.index(score)\n deltLCn = (\n score_list_scaled[rank] - score_list_scaled[1]\n ) # Fractional difference between current and second best XCorr\n deltCn = (\n score_list_scaled[rank] - score_list_scaled[-1]\n ) # Fractional difference between current and worst XCorr\n except (ValueError, IndexError, AssertionError):\n # NaN values will be replaced by the column mean later\n # NaN values are entered when there is no ranking\n # e.g. when only one peptide was matched to the spectrum.\n rank, deltLCn, deltCn = np.nan, np.nan, np.nan\n\n features = [\n score,\n rank,\n deltCn,\n deltLCn,\n charge,\n # delta_mz,# / pep_len,\n delta_mass, # / pep_len,\n # abs(delta_mz),# / pep_len,\n abs(delta_mass), # / pep_len,\n n_missed_cleavages / pep_len,\n missed_cleavages[0],\n missed_cleavages[1],\n missed_cleavages[2],\n missed_cleavages[3],\n missed_cleavages[4],\n missed_cleavages[5],\n enzN,\n enzC,\n mass,\n pep_len,\n num_pep,\n num_prot,\n pep_site,\n is_shitty,\n pep_charge_states,\n num_spec,\n seq_mods,\n ]\n\n for charge_n in self.observed_charges:\n features.append(charges[charge_n])\n\n return features + user_specified_features", "def memsat_svm(infile, sequence):\n with open(infile, \"r\") as fh:\n for line in fh:\n if line.startswith(\"Signal peptide:\"):\n sp = 0\n if not line.strip().endswith(\"Not detected.\"):\n sp = line.split(\":\")[1].strip().split(\"-\")[1]\n elif line.startswith(\"Topology\"):\n tms = [[y[0]-1, y[1]] for y in [list(map(int, x.split(\"-\")))\n for x in line.split(\":\")[1].strip().split(\",\")]]\n elif line.startswith(\"Re-entrant helices:\"):\n reh = []\n if not line.strip().endswith(\"Not detected.\"):\n reh = [[y[0]-1, y[1]] for y in [list(map(int, x.split(\"-\")))\n for x in line.split(\":\")[1].strip().split(\",\")]]\n elif line.startswith(\"N-terminal\"):\n orient = line.split(\":\")[1].strip()\n\n if orient == \"in\":\n result = [[1, 0, 0, 0] for _ in range(len(sequence))]\n orient = \"out\"\n else:\n result = [[0, 0, 1, 0] for _ in range(len(sequence))]\n orient = \"in\"\n\n for tm in tms:\n for i in range(*tm):\n result[i] = [0, 1, 0, 0]\n for i in range(tm[1], len(result)):\n if orient == \"in\":\n result[i] = [1, 0, 0, 0]\n else:\n result[i] = [0, 0, 1, 0]\n if orient == \"in\":\n orient = \"out\"\n else:\n orient = \"in\"\n\n for r in reh:\n for i in range(*r):\n result[i] = [0, 0, 0, 1]\n\n return np.array([result])", "def read_forces(filename):\n f=open(filename,\"r\")\n castep_forces = f.readlines()\n f.close() \n nruter = []\n for index, line in enumerate(castep_forces):\n if 'Total number of ions in cell' in line:\n n_atoms = int(line.split()[7])\n if 'Cartesian components (eV/A)' in line:\n starting_line = index + 4\n for i in range(n_atoms):\n f = starting_line + i\n nruter.append([float(castep_forces[f].split()[m]) for m in range(3,6)]) \n nruter=np.array(nruter,dtype=np.double)\n return nruter", "def sniff( self, filename ):\n handle = open(filename)\n line = handle.readline()\n\n #nb carac\n first = line.split()\n if ( not first[0].isdigit() ):\n return False\n\n ncar=int(first[0]);\n line = handle.readline()\n first = line.split()\n if ( not first[0].isdigit() ):\n return False\n if ( not first[1].isdigit() ):\n return False\n\n nfix=int(first[0]);\n ncov=int(first[1]);\n\n #nom des effets\n line = handle.readline()\n first = line.split()\n if ( len(first) < (nfix + ncov)):\n return False\n\n for i in range(ncar):\n line = handle.readline()\n first = line.split()\n if ( len(first) < (2+2*nfix+ncov) ):\n return False\n\n handle.close()\n\n return True", "def _read_smat(filename):\n return _read_hcore(filename)", "def svm_read_feature(data_file_name, digit):\n\tprob_y = []\n\tprob_x = []\n\tfor line in open(data_file_name):\n\t\t#print line\n\t\tline = line.split(None, 1)\n\t\t#print line\n\t\t# In case an instance with all zero features\n\t\tif len(line) == 1: line += ['']\n\t\tlabel, features = line\n\t\t#parse prob_x\n\t\txi = {}\n\t\tind = 1\n\t\tfor e in features.split():\n\t\t\txi[ind] = float(e)\n\t\t\tind += 1\n\t\t#parse prob_y\n\t\tif int(float(label)) == digit:\n\t\t\tprob_y += [float(+1)]\n\t\telse:\n\t\t\tprob_y += [float(-1)]\n\t\tprob_x += [xi]\n\treturn (prob_y, prob_x)", "def psipred(infile, sequence):\n aa2sec = {\n 'H': [1, 0, 0],\n 'E': [0, 1, 0],\n 'C': [0, 0, 1]\n }\n result = []\n with open(infile, 'r') as fh:\n for line in fh:\n if line.startswith('Pred:'):\n spl = line.strip().split(' ')\n if len(spl) < 2:\n continue\n for aa in spl[1]:\n result.append(aa2sec[aa])\n\n return np.array([result])", "def load_SCOUT_permittivity(filename):#{{{\n\n\n ## Open the file for binary access\n f = open(filename, \"rb\")\n\n ## Load the number of data points, type of x axis and its boundaries\n f.seek(151); datalength = np.fromfile(f, dtype=np.uint16, count=1)[0]\n print(datalength)\n f.seek(160); x_axis_type = np.fromfile(f, dtype=np.uint8, count=1)[0]\n print(x_axis_type)\n f.seek(166); x_start, x_end = np.fromfile(f, dtype=np.float32, count=2)\n print(x_start)\n print(x_end)\n ## Load the n, k data\n f.seek(174); raw_eps = np.fromfile(f, dtype=np.float32, count=datalength*2)\n f.close\n\n eps = raw_eps[::2] + 1j*raw_eps[1::2]\n \n from scipy.constants import h, c, eV\n if x_axis_type == 2: # 'eV' \n freq = np.linspace(x_start*eV/h, x_end*eV/h, datalength) \n elif x_axis_type == 3: # 'um' \n wavelength = np.linspace(x_start*1e-6, x_end*1e-6, datalength)\n freq = c/wavelength\n elif x_axis_type == 0: # 'cm-1' \n freq = np.linspace(x_start*100*c, x_end*100*c, datalength) \n\n return freq, eps", "def spot1d_psi(infile, sequence):\n return np.loadtxt(infile, usecols=11, skiprows=1).reshape((1, -1, 1))", "def read_action(path):\n print(\"Reading action data from file \", path)\n\n df = pd.read_csv(path, sep=' ', header=None, engine='python')\n max_state = int(df[0].max())\n min_state = int(df[0].min())\n a_mat = np.zeros((max_state, max_state), dtype='float16')\n\n # for s in np.arange(min_state, max_state + 1):\n for s in np.arange(min_state, max_state + 1):\n df_s = df[df[0] == s]\n for _, row in df_s.iterrows():\n _s = int(row[1])\n p = row[2]\n a_mat[s - 1][_s - 1] = p\n print(\"Finished \", path, \"!\")\n\n return a_mat", "def read_szx_fmv_13(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"abs_line_number\"\n ]\n\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"num_val_trip\", ulong_nan), (\"f_kp\", byte_nan),\n (\"f_usable\", byte_nan), (\"land_frac\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n data[\"swath_indicator\"] = data.pop(\"swath indicator\")\n\n data[\"f_land\"] = data.pop(\"land_frac\")\n\n return data, metadata", "def get_min_diff(zero_f=\"NIST/human_hcd_synthetic_oxidized.msp\",\n\t\t\t\t one_f=\"NIST/human_hcd_synthetic_native.msp\",\n\t\t\t\t outfile=\"res_small/selected_features_diff.txt\",\n\t\t\t\t top_mean = 1000,\n\t\t\t\t top_peaks = 50,\n\t\t\t\t max_distance = 275,\n\t\t\t\t distance_bins = 0.005,\n\t\t\t\t windowed_mode=False):\n\t\n\t#Check the file extension and parse to get features for class zero\n\tif zero_f.endswith(\".mgf\"): feats_zero_sum,feat_bins,instance_names,count_zero = read_mgf(zero_f,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t sum_feats=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t max_dist=max_distance,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t step_size=distance_bins,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t top_peaks=top_peaks)\n\telif zero_f.endswith(\".msp\"): feats_zero_sum,feat_bins,instance_names,count_zero = read_msp(zero_f,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsum_feats=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmax_dist=max_distance,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstep_size=distance_bins,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttop_peaks=top_peaks)\n\telse: return(False)\n\t\n\t#Check the file extension and parse to get features for class one\n\tif one_f.endswith(\".mgf\"): feats_one_sum,feat_bins,instance_names,count_one = read_mgf(one_f,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t sum_feats=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t max_dist=max_distance,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t step_size=distance_bins,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t top_peaks=top_peaks)\n\telif one_f.endswith(\".msp\"): feats_one_sum,feat_bins,instance_names,count_one = read_msp(one_f,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t sum_feats=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t max_dist=max_distance,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t step_size=distance_bins,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t top_peaks=top_peaks)\n\telse: return(False)\n\t\n\t#Get average difference between the bins of both groups\n\tdiffs = [abs(m1-m2) for m1,m2 in zip(feats_zero_sum.mean(axis=0).tolist()[0],feats_one_sum.mean(axis=0).tolist()[0])]\n\t\n\t#Get the indexes of the biggest differences in bins\n\tindexes_diff = sorted(list(enumerate(diffs)),key=itemgetter(1),reverse=True)\n\tselected_features_diff = [feat_bins[ind] for ind,val in indexes_diff[0:top_mean]]\n\tselected_features_diff.sort()\n\t\n\t#For the important bins we need the next number to create a closed bin; calculated vals; extend to bins\n\tdiff_bins = [sfd+distance_bins for sfd in selected_features_diff]\n\tdiff_bins.extend(selected_features_diff)\n\tdiff_bins.sort()\n\t\n\t#Remove duplicate values\n\tdiff_bins = list(set(diff_bins))\n\tdiff_bins.sort()\n\t\n\t#Write feats to a file\n\toutfile_feats = open(outfile,\"w\")\n\toutfile_feats.write(\"\\n\".join(map(str,diff_bins)))\n\toutfile_feats.close()\n\n\treturn(diff_bins)", "def find_pos_hmmer_hitsx(infilepath1, infilepath2, fwdeval, reveval,\n outfilepath, just_evalue):\n #print('\\t' + os.path.basename(infilepath1))\n #print('\\t' + os.path.basename(infilepath2))\n #print('\\t' + str(fwdeval))\n #print('\\t' + str(reveval))\n #print('\\t' + os.path.basename(outfilepath))\n #print('\\n')\n # Get list of redundant accessions from the second infile.\n\n red_acc_list = get_red_acc_list(infilepath2)\n\n # Set in and out file variables.\n infilehandle = open(infilepath1)\n outfilehandle = open(outfilepath, 'w')\n\n # Loop through lines in spreadsheet and write ones with one of the\n # redundant accessions in the top hit position to a new sheet.\n line_num = 0\n for line in infilehandle:\n # Identify the top hit accession from the rBLAST for each HMMer hit.\n line_num += 1\n if not line.startswith('Forward'):\n if not line.startswith('\\n'):\n line_list = line.split(',')\n if len(line_list) > 6:\n top_hit_acc = line_list[6]\n positive = False\n\n # If the top hit accession matches one of the redundant\n # accessions, then write to the output spreadsheet.\n for red_acc in red_acc_list:\n #print('Does ' + red_acc + ' = ' + top_hit_acc + ' ?')\n if red_acc.strip() == top_hit_acc.strip():\n #print('Yes\\n')\n positive = True\n else:\n #print('No\\n')\n pass\n\n # If the just_evalue option is set to True, then ignore\n # what the top reverse blast hit is.\n if just_evalue:\n positive = True\n\n if positive:\n # Only write line if evalue criteria met.\n fhmmer_e = line_list[5]\n rblast_e = line_list[7]\n if (float(fhmmer_e) <= float(fwdeval)) and \\\n (float(rblast_e) <= float(reveval)):\n outfilehandle.write(line)\n\n # Close files.\n infilehandle.close()\n outfilehandle.close()", "def read_szx_fmv_11(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\"sat_track_azi\"]\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath_indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"f_kp\", byte_nan), (\"f_usable\", byte_nan), (\"f_f\", uint_nan),\n (\"f_v\", uint_nan), (\"f_oa\", uint_nan), (\"f_sa\", uint_nan),\n (\"f_tel\", uint_nan), (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n return data, metadata", "def readAMBERTop(self, phys, filename):\r\n\r\n def skipLine(data):\r\n nl = data.index('\\n')\r\n return data[nl+1:len(data)]\r\n\r\n def jumpTo(data, target):\r\n fp = data.index(target)\r\n return data[fp:len(data)]\r\n\r\n def readRemove(data, size):\r\n retval = data[0:size-1]\r\n return data[size:len(data)]\r\n\r\n def getInteger(data):\r\n pos = 0\r\n retval = \"\"\r\n while (not data[pos].isdigit()):\r\n pos = pos + 1\r\n while (data[pos].isdigit()):\r\n retval = retval + data[pos]\r\n pos = pos + 1\r\n data = data[pos:len(data)]\r\n return int(retval), data\r\n\r\n def parse(data, arr, str, count, dtype, tupsize=1):\r\n data = jumpTo(data, \"%FLAG \"+str)\r\n data = jumpTo(data, \"%FORMAT\")\r\n numPerLine, data = getInteger(data)\r\n fieldsize, data = getInteger(data)\r\n data = skipLine(data) \r\n \r\n arr2 = []\r\n numread = 0\r\n for j in range(0, (tupsize*count-1) / numPerLine + 1):\r\n for i in range(0, numPerLine):\r\n if (tupsize == 1):\r\n arr.append(dtype(data[0:fieldsize].strip()))\r\n else:\r\n arr2.append(dtype(data[0:fieldsize].strip()))\r\n if (len(arr2) == tupsize):\r\n arr.append(arr2)\r\n arr2 = []\r\n numread += 1\r\n data = data[fieldsize:len(data)]\r\n if (numread == tupsize*count):\r\n break\r\n data = skipLine(data) \r\n return data\r\n\r\n def scan(data, str):\r\n return (data.count(str) != 0)\r\n\r\n\r\n f = open(filename, 'r')\r\n data = f.read()\r\n\r\n # First Line: VERSION ...\r\n data = skipLine(data)\r\n\r\n # Go To: %FLAG POINTERS\r\n data = jumpTo(data, '%FLAG POINTERS')\r\n\r\n data = jumpTo(data, '%FORMAT')\r\n numPerLine, data = getInteger(data)\r\n fieldsize, data = getInteger(data)\r\n data = skipLine(data)\r\n \r\n temp = []\r\n numread = 0\r\n for j in range(0, 31 / numPerLine + 1):\r\n for i in range(0, numPerLine):\r\n temp.append(int(data[0:8]))\r\n data = data[8:len(data)]\r\n numread += 1\r\n if (numread == 31):\r\n break\r\n data = skipLine(data)\r\n \r\n [natoms, ntypes, nbonh, mbona, ntheth, mtheta, nphih, mphia, nhparm, nparm, nnb, nres, nbona, ntheta, nphia, numbnd, numang, nptra, natyp, nphb, ifpert, nbper, ngper, ndper, mbper, mgper, mdper, ifbox, nmxrs, ifcap, numextra] = temp \r\n\r\n\r\n #################################################\r\n # Read AtomTypes\r\n atomnames = []\r\n charges = []\r\n masses = []\r\n atindex = []\r\n exclusions = []\r\n nparams = []\r\n reslabels = []\r\n respointers = []\r\n forceconstants = [[], [], []] # bond, angle, dihedral\r\n equilvals = [[], [], [[], []]] # bond, angle, dihedral\r\n scee_scales = []\r\n scnb_scales = []\r\n solty = []\r\n lj_acoef = []\r\n lj_bcoef = []\r\n\r\n data = parse(data, atomnames, \"ATOM_NAME\", natoms, str) \r\n data = parse(data, charges, \"CHARGE\", natoms, float)\r\n data = parse(data, masses, \"MASS\", natoms, float)\r\n data = parse(data, atindex, \"ATOM_TYPE_INDEX\", natoms, int)\r\n data = parse(data, exclusions, \"NUMBER_EXCLUDED_ATOMS\", natoms, int)\r\n data = parse(data, nparams, \"NONBONDED_PARM_INDEX\", ntypes*ntypes, int)\r\n data = parse(data, reslabels, \"RESIDUE_LABEL\", nres, str)\r\n data = parse(data, respointers, \"RESIDUE_POINTER\", nres, int)\r\n data = parse(data, forceconstants[0], \"BOND_FORCE_CONSTANT\", numbnd, float)\r\n data = parse(data, equilvals[0], \"BOND_EQUIL_VALUE\", numbnd, float)\r\n data = parse(data, forceconstants[1], \"ANGLE_FORCE_CONSTANT\", numang, float)\r\n data = parse(data, equilvals[1], \"ANGLE_EQUIL_VALUE\", numang, float)\r\n data = parse(data, forceconstants[2], \"DIHEDRAL_FORCE_CONSTANT\", nptra, float)\r\n data = parse(data, equilvals[2][0], \"DIHEDRAL_PERIODICITY\", nptra, float)\r\n data = parse(data, equilvals[2][1], \"DIHEDRAL_PHASE\", nptra, float)\r\n if (scan(data, \"SCEE_SCALE_FACTOR\")):\r\n data = parse(data, scee_scales, \"SCEE_SCALE_FACTORS\", nptra, float)\r\n else:\r\n for i in range(0, nptra):\r\n scee_scales.append(1.2) # Default \r\n if (scan(data, \"SCNB_SCALE_FACTOR\")):\r\n data = parse(data, scnb_scales, \"SCNB_SCALE_FACTORS\", nptra, float)\r\n else:\r\n for i in range(0, nptra):\r\n scnb_scales.append(2.0) # Default \r\n\r\n data = parse(data, solty, \"SOLTY\", natyp, float)\r\n data = parse(data, lj_acoef, \"LENNARD_JONES_ACOEF\", ntypes*(ntypes+1)/2, float)\r\n data = parse(data, lj_bcoef, \"LENNARD_JONES_BCOEF\", ntypes*(ntypes+1)/2, float)\r\n\r\n\r\n ##########################################################\r\n # STRUCTURE\r\n\r\n bonds = [[], []] # With H, Without H\r\n angles = [[], []] # With H, Without H\r\n dihedrals = [[], []] # With H, Without H\r\n impropers = [[], []] # With H, Without H\r\n excluded_atoms = [] \r\n hbond_acoef = []\r\n hbond_bcoef = []\r\n hbcut = []\r\n amber_atom_types = []\r\n tree_chain = []\r\n join_array = []\r\n irotat = []\r\n radii = []\r\n screen = []\r\n\r\n data = parse(data, bonds[0], \"BONDS_INC_HYDROGEN\", nbonh, int, 3)\r\n data = parse(data, bonds[1], \"BONDS_WITHOUT_HYDROGEN\", nbona, int, 3)\r\n data = parse(data, angles[0], \"ANGLES_INC_HYDROGEN\", ntheth, int, 4)\r\n data = parse(data, angles[1], \"ANGLES_WITHOUT_HYDROGEN\", ntheta, int, 4)\r\n data = parse(data, dihedrals[0], \"DIHEDRALS_INC_HYDROGEN\", nphih, int, 5)\r\n data = parse(data, dihedrals[1], \"DIHEDRALS_WITHOUT_HYDROGEN\", nphia, int, 5)\r\n \r\n # MERGE ARRAYS - PM HANDLES THE H+\r\n final_bonds = bonds[0] + bonds[1]\r\n final_angles = angles[0] + angles[1]\r\n final_dihedrals = dihedrals[0] + dihedrals[1]\r\n final_impropers = []\r\n \r\n # CLEAN UP THE TRASH\r\n del(bonds)\r\n del(angles)\r\n del(dihedrals)\r\n \r\n\r\n # Move impropers into their own array\r\n i = 0\r\n while (i < len(final_dihedrals)):\r\n if (final_dihedrals[i][2] < 0): # 1-4 exclusions are handled by our back end\r\n final_dihedrals[i][2] *= -1\r\n if (final_dihedrals[i][3] < 0):\r\n final_dihedrals[i][3] *= -1 # Make + again\r\n final_impropers.append(final_dihedrals[i])\r\n final_dihedrals.remove(final_dihedrals[i])\r\n i -= 1\r\n i += 1\r\n\r\n # Convert charge units\r\n for i in range(0, len(charges)):\r\n charges[i] /= 18.223\r\n\r\n\r\n data = parse(data, excluded_atoms, \"EXCLUDED_ATOMS_LIST\", nnb, int)\r\n data = parse(data, hbond_acoef, \"HBOND_ACOEF\", nphb, float)\r\n data = parse(data, hbond_bcoef, \"HBOND_BCOEF\", nphb, float)\r\n data = parse(data, hbcut, \"HBCUT\", nphb, float)\r\n data = parse(data, amber_atom_types, \"AMBER_ATOM_TYPE\", natoms, str)\r\n data = parse(data, tree_chain, \"TREE_CHAIN_CLASSIFICATION\", natoms, str)\r\n data = parse(data, join_array, \"JOIN_ARRAY\", natoms, int)\r\n data = parse(data, irotat, \"IROTAT\", natoms, int)\r\n data = parse(data, radii, \"RADII\", natoms, float)\r\n data = parse(data, screen, \"SCREEN\", natoms, float)\r\n\r\n # Further process dihedrals and impropers\r\n # Deal with multiplicity\r\n # A bit ugly, but the fastest for now\r\n # forceconstants[2][dihedrals[0][i][4]-1], int(equilvals[2][0][dihedrals[0][i][4]-1]), equilvals[2][1][dihedrals[0][i][4]-1]\r\n\r\n mult_di = dict()\r\n mult_im = dict()\r\n for i in range(0, len(final_dihedrals)):\r\n di_id = str(final_dihedrals[i][0])+' '+str(final_dihedrals[i][1])+' '+str(final_dihedrals[i][2])+' '+str(final_dihedrals[i][3])\r\n if (not mult_di.has_key(di_id)):\r\n mult_di[di_id] = [1, False, [forceconstants[2][final_dihedrals[i][4]-1]], [int(equilvals[2][0][final_dihedrals[i][4]-1])], [equilvals[2][1][final_dihedrals[i][4]-1]]]\r\n else:\r\n mult_di[di_id][0] += 1\r\n mult_di[di_id][2].append(forceconstants[2][final_dihedrals[i][4]-1])\r\n mult_di[di_id][3].append(int(equilvals[2][0][final_dihedrals[i][4]-1]))\r\n mult_di[di_id][4].append(equilvals[2][1][final_dihedrals[i][4]-1])\r\n \r\n for i in range(0, len(final_impropers)):\r\n im_id = str(final_impropers[i][0])+' '+str(final_impropers[i][1])+' '+str(final_impropers[i][2])+' '+str(final_impropers[i][3])\r\n if (not mult_im.has_key(di_id)):\r\n mult_im[im_id] = [1, False, [forceconstants[2][final_impropers[i][4]-1]], [int(equilvals[2][0][final_impropers[i][4]-1])], [equilvals[2][1][final_impropers[i][4]-1]]]\r\n else:\r\n mult_im[im_id][0] += 1\r\n mult_im[im_id][2].append(forceconstants[2][final_impropers[i][4]-1])\r\n mult_im[im_id][3].append(int(equilvals[2][0][final_impropers[i][4]-1]))\r\n mult_im[im_id][4].append(equilvals[2][1][final_impropers[i][4]-1])\r\n\r\n\r\n\r\n \r\n #[natoms, ntypes, nbonh, mbona, ntheth, mtheta, nphih, mphia, nhparm, nparm, nnb, nres, nbona, ntheta, nphia, numbnd, numang, nptra, natyp, nphb, ifpert, nbper, ngper, ndper, mbper, mgper, mdper, ifbox, nmxrs, ifcap, numextra] = temp \r\n #phys.myPSF.createAll(natoms, nbonh+mbona, ntheth+mtheta,\r\n # len(dihedrals[0])+len(dihedrals[1]),\r\n # len(impropers[0])+len(impropers[1]),\r\n # 0, 0, 0, 0)\r\n \r\n # Add atoms\r\n curres = 1\r\n for i in range(0, natoms):\r\n phys.myPSF.addAtom(i, 'SIM', curres, reslabels[curres-1],\r\n atomnames[i], atomnames[i], charges[i],\r\n masses[i]) \r\n if (curres != nres and i >= respointers[curres]):\r\n curres += 1\r\n\r\n # Add bonds\r\n for i in range(0, nbonh+nbona):\r\n phys.myPSF.addBond(i+1, final_bonds[i][0]/3+1, final_bonds[i][1]/3+1)\r\n phys.myPAR.addBond(i+1, atomnames[final_bonds[i][0]/3], atomnames[final_bonds[i][1]/3], forceconstants[0][final_bonds[i][2]/3], equilvals[0][final_bonds[i][2]/3])\r\n \r\n # Add angles\r\n for i in range(0, ntheth+ntheta):\r\n phys.myPSF.addAngle(i+1, final_angles[i][0]/3+1, final_angles[i][1]/3+1, final_angles[i][2]/3+1)\r\n phys.myPAR.addAngle(i+1, atomnames[final_angles[i][0]/3], atomnames[final_angles[i][1]/3], atomnames[final_angles[i][2]/3], forceconstants[1][final_angles[i][3]/3], equilvals[1][final_angles[i][3]/3])\r\n \r\n # Add dihedrals\r\n for i in range(0, len(final_dihedrals)):\r\n di_id = str(final_dihedrals[i][0])+' '+str(final_dihedrals[i][1])+' '+str(final_dihedrals[i][2])+' '+str(final_dihedrals[i][3])\r\n mult = mult_di[di_id][0]\r\n checked = mult_di[di_id][1]\r\n print di_id, \" \", mult\r\n if (not checked):\r\n if (mult == 1):\r\n phys.myPSF.addDihedral(i+1, final_dihedrals[i][0]/3+1, final_dihedrals[i][1]/3+1, int(numpy.abs(final_dihedrals[i][2]))/3+1, final_dihedrals[i][3]/3+1)\r\n phys.myPAR.addDihedral(i+1, atomnames[final_dihedrals[i][0]/3], atomnames[final_dihedrals[i][1]/3], atomnames[int(numpy.abs(final_dihedrals[i][2]))/3], atomnames[final_dihedrals[i][3]/3], forceconstants[2][final_dihedrals[i][4]-1], int(equilvals[2][0][final_dihedrals[i][4]-1]), equilvals[2][1][final_dihedrals[i][4]-1])\r\n else:\r\n mult_di[di_id][1] = True\r\n # Add dihedral with the appropriate multiplicity\r\n # Force constants, periodicity and phase shifts are in [2], [3], and [4] respectively\r\n fcvec = PARReader.VectorOfDouble()\r\n periodvec = PARReader.VectorOfInt()\r\n phasevec = PARReader.VectorOfDouble() \r\n for j in range(0, len(mult_di[di_id][2])):\r\n fcvec.push_back(mult_di[di_id][2][j])\r\n periodvec.push_back(mult_di[di_id][3][j])\r\n phasevec.push_back(mult_di[di_id][4][j])\r\n phys.myPSF.addDihedral(i+1, final_dihedrals[i][0]/3+1, final_dihedrals[i][1]/3+1, int(numpy.abs(final_dihedrals[i][2]))/3+1, final_dihedrals[i][3]/3+1)\r\n phys.myPAR.addDihedral(i+1, atomnames[final_dihedrals[i][0]/3], atomnames[final_dihedrals[i][1]/3], atomnames[int(numpy.abs(final_dihedrals[i][2]))/3], atomnames[final_dihedrals[i][3]/3], mult, fcvec, periodvec, phasevec)\r\n \r\n\r\n\r\n\r\n for i in range(0, len(final_impropers)):\r\n im_id = str(final_impropers[i][0])+' '+str(final_impropers[i][1])+' '+str(final_impropers[i][2])+' '+str(final_impropers[i][3])\r\n mult = mult_im[im_id][0]\r\n checked = mult_im[im_id][1]\r\n print im_id, \" \", mult\r\n if (not checked):\r\n if (mult == 1):\r\n phys.myPSF.addImproper(i+1, final_impropers[i][0]/3+1, final_impropers[i][1]/3+1, int(numpy.abs(final_impropers[i][2]))/3+1, final_impropers[i][3]/3+1)\r\n phys.myPAR.addImproper(i+1, atomnames[final_impropers[i][0]/3], atomnames[final_impropers[i][1]/3], atomnames[int(numpy.abs(final_impropers[i][2]))/3], atomnames[final_impropers[i][3]/3], forceconstants[2][final_impropers[i][4]-1], int(equilvals[2][0][final_impropers[i][4]-1]), equilvals[2][1][final_impropers[i][4]-1])\r\n else:\r\n mult_im[im_id][1] = True\r\n # Add dihedral with the appropriate multiplicity\r\n # Force constants, periodicity and phase shifts are in [2], [3], and [4] respectively\r\n fcvec = PARReader.VectorOfDouble()\r\n periodvec = PARReader.VectorOfInt()\r\n phasevec = PARReader.VectorOfDouble() \r\n for j in range(0, len(mult_im[im_id][2])):\r\n fcvec.push_back(mult_im[im_id][2][j])\r\n periodvec.push_back(mult_im[im_id][3][j])\r\n phasevec.push_back(mult_im[im_id][4][j])\r\n phys.myPSF.addImproper(i+1, final_impropers[i][0]/3+1, final_impropers[i][1]/3+1, int(numpy.abs(final_impropers[i][2]))/3+1, final_impropers[i][3]/3+1)\r\n phys.myPAR.addImproper(i+1, atomnames[final_impropers[i][0]/3], atomnames[final_impropers[i][1]/3], atomnames[int(numpy.abs(final_impropers[i][2]))/3], atomnames[final_impropers[i][3]/3], mult, fcvec, periodvec, phasevec)\r\n\r\n \r\n # Need to add garbage nonbonded stuff for now\r\n for i in range(0, natoms):\r\n phys.myPAR.addNonbonded(i, atomnames[i], 1, 1, 1, 1, 1, 1)\r\n\r\n # Add VDW parameters\r\n # AMBER has the Aij and Bij already in the parameter file\r\n # This actually makes life easier.\r\n # CHARMM does not, they simply have the original sigma and epsilon.\r\n # To compensate for this, for now we will leave the nonbondeds empty in phys.myPAR\r\n # We will then access the LennardJones parameter table in Topology directly\r\n k = 0\r\n phys.myTop.resizeLennardJonesParameters(ntypes)\r\n for i in range(0, ntypes):\r\n for j in range(i, ntypes):\r\n params = GenericTopology.LennardJonesParameters(lj_acoef[k], lj_bcoef[k])\r\n k += 1\r\n phys.myTop.setLennardJonesParameters(i, j, params)\r\n \r\n phys.myPAR.readFlag = 1\r\n phys.build()", "def read_input():\n # Use with to make sure the file will be closed after the block executed\n with open('snapshot_input.txt') as f:\n # Split the line at line breaks\n x = f.read().splitlines()\n # Get the data of restructuring, three positive integers N , C , and D\n # Use generator expression for time and space efficiency\n restructuring_info = (i.split() for i in x if len(i.split())==3)\n # Get the data of single machine, four integers D, P, R and G\n machine_info = (i.split() for i in x if len(i.split())!=3)\n # Get the length of restructuring data\n length = sum(1 for i in x if len(i.split())==3)\n\n return restructuring_info, machine_info, length", "def spot1d_sec(infile, sequence):\n return np.loadtxt(infile, usecols=[14, 13, 12], skiprows=1).reshape((1, -1, 3))", "def scampi(infile, sequence):\n aa2topo = {\n 'I': [1, 0, 0, 0],\n 'M': [0, 1, 0, 0],\n 'O': [0, 0, 1, 0]\n }\n result = []\n with open(infile, 'r') as fh:\n for line in fh:\n if not line.startswith('>'):\n for aa in line.strip():\n result.append(aa2topo[aa])\n\n return np.array([result])", "def test_read_0_1_smirnoff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirnoff99Frosst_reference_0_1_spec.offxml\"\n )\n )", "def check_dense_gas(dir='./'):\n import glob\n import pandas as pd\n ff = glob.glob('*.gas')\n\n for i in ff:\n f = pd.read_pickle(i)\n print(i)\n print (f['f_H21'] > 0.0).sum()\n\n print(\"Total dense gas mass: \")\n print(f['m'] * f['f_H21']).sum()\n return None", "def ReadTinker():\n # Total Potential Energy : {f} Kcal/mole\n total_line = \" Total Potential Energy :\"\n with open('LICHM_TINKEREnergy_0.log') as f:\n for line in f:\n if line.startswith(total_line):\n # print(line)\n TinkE = re.findall(r'\\-\\d+\\.*\\d*', line)\n TinkE = float(TinkE[0])\n # if AMOEBA == True:\n # if line.startswith(\"Polarization\"):\n # Epol = re.findall(r'\\-\\d+\\.*\\d*', line)\n # Epol = float(Epol[0])\n # elif line.startswith(\"Implicit Solvation\")\n # Esolv = re.findall(r'\\-\\d+\\.*\\d*', line)\n # Esolv = float(Esolv[0])\n f.close()\n # if AMOEBA == True:\n # TINKERPolForces = EPol + ESolv\n # TinkE += TINKERPolForces\n #\n TinkE *= kcal2ev\n return TinkE", "def _read_libffm_file(self, filename):\n\n X_true = np.zeros((self.num_rows, self.num_features))\n y_true = np.zeros((self.num_rows, 1))\n field_true = np.zeros((self.num_features, 1))\n with open(filename, 'r') as f:\n i = 0\n for line in f:\n tmp_row = line.replace('\\n', '').split(' ')\n\n # extract label\n y_true[i] = int(tmp_row[0])\n\n # extract data and fields\n for k in range(1, len(tmp_row)):\n if len(tmp_row[k]) > 0:\n tmp_str = tmp_row[k].split(':')\n j = int(tmp_str[1])\n field_true[j] = int(tmp_str[0])\n tmp_data = float(tmp_str[2])\n X_true[i, j] = tmp_data\n i = i + 1\n\n return X_true, y_true, field_true", "def event_m10_29_x38(flag1=105405):\r\n \"\"\"State 0,1: Intrusion MAP determination\"\"\"\r\n CompareEventFlag(0, flag1, 0)\r\n if ConditionGroup(0):\r\n \"\"\"State 2: Move to: Madura side\"\"\"\r\n return 0\r\n else:\r\n \"\"\"State 3: Move to: Forest side of the imaginary shadow\"\"\"\r\n return 1", "def atmparamread(filename):\n f = open(filename, 'r')\n f.readline()\n line = f.readline()\n #Td = float(line.split()[0])\n #Pd = float(line.split()[1])\n #Mc = float(line.split()[2])\n #rc = float(line.split()[3])\n n = int(line.split()[0])\n f.readline()\n atm = 0*numpy.ndarray(shape=(n, ncol), dtype=float)\n S = 0*numpy.ndarray(shape=(n), dtype=float)\n for i in range(n):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(ncol ):\n atm[i, j] = float(line.split()[j+1])\n f.close()\n return atm, S", "def sniff( self, filename ):\n handle = open(filename)\n line1 = handle.readline()\n line2 = handle.readline()\n handle.close()\n header = line1.split()\n first = line2.split()\n\n # le nombre de marqueur definit dans l'entete doit correspondre au nombre de marqueur definit pour le premier individu\n if ( (len(header)*2)+1 != len(first) ):\n return False\n # le nombre d'allele est pair\n if ( (len(first)-1)%2 != 0 ):\n return False\n\n return True", "def read_orig_values(self):\n\n self.ovmap = {}\n\n for line in open(self.mname, 'r'):\n featval, bits = line.strip().split(',')\n feat, val = featval.split(':')\n\n for i, b in enumerate(bits):\n f = '{0}:b{1}'.format(feat, i + 1)\n v = self.fvmap.dir[(f, '1')]\n\n if v not in self.ovmap:\n self.ovmap[v] = [feat]\n\n if -v not in self.ovmap:\n self.ovmap[-v] = [feat]\n\n self.ovmap[v if b == '1' else -v].append(val)", "def read_cleaned(file):\n wvlen, band, lamFlam, elamFlam, flamFlam, beam, odate, ref = [],[],[],[],[],[],[],[]\n with open(file, 'r') as f_in:\n for line in f_in:\n try:\n # ensure line contains data:\n a = float(line[0])\n except ValueError:\n a = 'dummy'\n try:\n # ensure mag or flux entry is not '--'\n m = float(line.split(' ')[2])\n except ValueError:\n m = 'dummy'\n \n if isinstance(a, float) and isinstance(m, float):\n wvlen.append(float(line.strip().split(' ')[0])) # in metres\n band.append(line.strip().split(' ')[1])\n lamFlam.append(float(line.strip().split(' ')[2]))\n elamFlam.append(line.strip().split(' ')[3])\n flamFlam.append(line.strip().split(' ')[4])\n beam.append(line.strip().split(' ')[5])\n odate.append(line.strip().split(' ')[6])\n ref.append(line.strip().split(' ')[7])\n \n return wvlen, band, lamFlam, elamFlam, flamFlam, beam, odate, ref", "def read_data(feature_file, label_file):", "def test_io_import_fmi_pgm_shape():\n root_path = pysteps.rcparams.data_sources[\"fmi\"][\"root_path\"]\n filename = os.path.join(root_path, \"20160928\",\n \"201609281600_fmi.radar.composite.lowest_FIN_SUOMI1.pgm.gz\")\n R, _, _ = pysteps.io.import_fmi_pgm(filename, gzipped=True)\n assert R.shape == (1226, 760)", "def read_multinest_file(shower_name):\n\twith open(fit_dir+'fit_'+shower_name+'.out') as file:\n\t\tline = file.readline().split()\n\t\tslope = 1-float(line[0])\n\t\tslope_err_plus = -float(line[1])\n\t\tslope_err_minus = float(line[2])\n\t\ty_int = float(file.readline().split()[0])\n\treturn slope, slope_err_plus, slope_err_minus, y_int", "def readShiftFile(self, filename):\n order = []\n fshift = open(filename,'r')\n flines = fshift.readlines()\n fshift.close()\n\n common = [f.strip('#').strip() for f in flines if f.startswith('#')]\n c=[line.split(': ') for line in common]\n\n # Remove any line comments in the shift file - lines starting with '#'\n # but not part of the common block.\n for l in c:\n if l[0] not in ['frame', 'refimage', 'form', 'units']:\n c.remove(l)\n\n for line in c: line[1]=line[1].strip()\n self.update(c)\n\n files = [f.strip().split(' ',1) for f in flines if not (f.startswith('#') or f.strip() == '')]\n for f in files:\n order.append(f[0])\n\n self['order'] = order\n\n for f in files:\n # Check to see if filename provided is a full filename that corresponds\n # to a file on the path. If not, try to convert given rootname into\n # a valid filename based on available files. This may or may not\n # define the correct filename, which is why it prints out what it is\n # doing, so that the user can verify and edit the shiftfile if needed.\n #NOTE:\n # Supporting the specification of only rootnames in the shiftfile with this\n # filename expansion is NOT to be documented, but provided solely as\n # an undocumented, dangerous and not fully supported helper function for\n # some backwards compatibility.\n if not os.path.exists(f[0]):\n f[0] = fu.buildRootname(f[0])\n print('Defining filename in shiftfile as: ', f[0])\n\n f[1] = f[1].split()\n try:\n f[1] = [float(s) for s in f[1]]\n except:\n msg = 'Cannot read in ', s, ' from shiftfile ', filename, ' as a float number'\n raise ValueError(msg)\n msg = \"At least 2 and at most 4 shift values should be provided in a shiftfile\"\n if len(f[1]) < 2:\n raise ValueError(msg)\n elif len(f[1]) == 3:\n f[1].append(1.0)\n elif len(f[1]) == 2:\n f[1].extend([0.0, 1.0])\n elif len(f[1]) > 4:\n raise ValueError(msg)\n\n fdict = dict(files)\n self.update(fdict)", "def findfeatures(self):\n self.set_wdiff()\n\n #xp, wp=st.findfeatures(self.xarr, self.farr, self.slines, self.sfluxes,\n # self.ws, mdiff=self.mdiff, wdiff=self.wdiff, sigma=self.sigma, niter=self.niter, sections=3)\n xp,wp=st.crosslinematch(self.xarr, self.farr, self.slines, self.sfluxes,\n self.ws, mdiff=self.mdiff, wdiff=20, sigma=self.sigma, niter=self.niter)\n for x, w in zip(xp, wp):\n if w not in self.wp and w>-1: \n self.xp.append(x)\n self.wp.append(w)\n self.plotFeatures()\n self.redraw_canvas()", "def readMAXIPOLdataBrad(filename, day=False, sigcut=0.0, ctscut=0, cols=None,\n nhits=None, neg=False):\n\n if cols is None: cols=(2,3)\n\n print(\"Reading data from columns %d-%d\" % tuple(cols))\n \n ngood = 0;\n ncut = 0;\n offsets = {'el': 0.295, 'az': 0.05} ## degrees (brad)\n #offsets = {'el': 0.289, 'az': 0.065} ## degrees (Jeff)\n az=[]; el=[]; beam=[]; sig=[]; cts=[]\n if filename.endswith('gz'):\n fileobj = gzip.open(filename, \"r\");\n else:\n fileobj = open(filename, \"r\");\n for line in fileobj:\n line = line.strip().split()\n# az1, el1, b1, s1, c1 = (\n# float(line[0]), float(line[1]), float(line[2]), float(line[3]), int(line[4]))\n az1, el1, b1, s1, c1= (\n float(line[0]), float(line[1]), float(line[cols[0]]), float(line[cols[1]]),\n int(line[-1]))\n \n if nhits:\n s1 /= sqrt(c1)\n\n if day:\n az1 += offsets['az']*60.0 ## arcmin\n el1 += offsets['el']*60.0\n \n if s1>sigcut and c1>ctscut:\n ## only keep pixels with good data\n az.append(az1); el.append(el1); beam.append(b1);\n sig.append(s1); cts.append(c1)\n ngood += 1\n else:\n ncut += 1\n \n\n fileobj.close()\n \n print('Data read: ncut=%d, ngood=%d' % (ncut, ngood))\n\n beam = asarray(beam, float64)\n sig = asarray(sig, float64)\n az = asarray(az, float64)\n el = asarray(el, float64)\n cts = asarray(cts, float64)\n\n if neg is not False and ((neg is None and beam.mean() < 0) or neg):\n print('negating data')\n beam = -beam\n\n return BeamData(az, el, beam, sig, cts=cts)", "def read_inputs(self):\n curdir = os.getcwd()\n os.chdir(self.fst_dir)\n rstat = self.readFST()\n if rstat == 0:\n os.chdir(curdir)\n return 0\n # the names of the next files are either set by caller or come from the reading the FAST file\n rstat = self.readNoise()\n rstat = self.readAD()\n rstat = self.readBlade()\n rstat = self.readPtfm()\n os.chdir(curdir)", "def main(sourceDataFile, w):\n ## Extract test data\n import csv\n import math\n import numpy as np\n\n testMatrix = []\n print \"opening csv into python object\"\n with open(sourceDataFile) as source:\n count = 0\n reader = csv.reader(source)\n for r in reader:\n if count == 0:\n count += 1\n continue\n testMatrix.append(r)\n count += 1\n\n ## Create the feature vecotrs\n tripTime = []\n tripDistance = []\n straightTripDistance = []\n plat = []\n plong = []\n dlat = []\n dlong = []\n pickupTimeRawest = []\n count = 0\n\n for row in testMatrix:\n if len(row) == 14:\n continue\n if count % 500000 == 0:\n print \"count (test Matrix read): {}\".format(count)\n tripTime.append(float(row[9]))\n tripDistance.append(float(row[10]))\n straightTripDistance.append(float(row[15]))\n plat.append(float(row[12]))\n plong.append(float(row[11]))\n dlat.append(float(row[14]))\n dlong.append(float(row[13]))\n pickupTimeRawest.append(row[6])\n count +=1\n\n ## Transform pickupTime to a binary variable\n pickupTime = []\n from runLSFTrain import convertTimeToPlottable\n pickupTimeRaw = [convertTimeToPlottable(time.split()[1]) for time in pickupTimeRawest]\n for time in pickupTimeRaw:\n if (5 <= time) and (time <= 17):\n pickupTime.append(1)\n else:\n assert (time < 5) or (time > 17)\n pickupTime.append(0)\n\n ## Normalize each feature (turn it into a standard gaussian with mean 0, std 1)\n print \"normalizing features\"\n for featureList in (tripDistance, straightTripDistance, plat, plong, dlat, dlong, pickupTime):\n mean = np.mean(featureList)\n std = np.std(featureList)\n for index, elem in enumerate(featureList):\n featureList[index] = (float(elem) - mean)/float(std)\n \n featureMatrix = [dlat, dlong, plat, plong, pickupTime, straightTripDistance, tripDistance, [1 for elem in dlat]]\n ## Calculate OLS, TLS, and correlation between expected and predicted values\n olsResiduals = []\n tlsResiduals = []\n predictedVals = []\n for index in range(len(dlat)):\n X = [featureMatrix[column][index] for column in range(8)]\n predicted = np.dot(w, X)\n trueVal = tripTime[index]\n point = list(X)\n point[-1] = trueVal\n olsResiduals.append(predicted - trueVal)\n tlsResiduals.append(orthogonalDistance(w, point))\n predictedVals.append(predicted)\n squareOLSResiduals = [residual * residual for residual in olsResiduals]\n squareTLSResiduals = [residual * residual for residual in tlsResiduals]\n OLSError = np.mean(squareOLSResiduals)\n TLSError = np.mean(squareTLSResiduals)\n r = np.corrcoef(predictedVals, tripTime)[0][1]\n\n print \"OLS accuracy: {}\".format(OLSError)\n print \"TLS accuracy: {}\".format(TLSError)\n print \"Correlation between predicted and expected values: {}\".format(r)", "def read_feature_dict(path):\n feature_dict = []\n with open(path, 'r', encoding='utf-8') as dictfile:\n for line in dictfile:\n if line.lstrip(' \\t').startswith('#'):\n # This line is a comment line, ignore it\n continue\n else:\n # This line contains one or more tokens, split them up and wrap them in the format for VisaS POS files.\n tokens = line.rstrip(' \\t\\n').rstrip(' \\t').split()\n dict_tokens = \"\"\n for token in tokens:\n quantifier = \"\"\n if re.match(\"\\(.+\\)([?*+])\",token):\n quantifier = re.match(\"\\(.+\\)([?*+])\",token).group(1)\n token = token.lstrip('(').rstrip(')?*+')\n if '_' in token:\n if token.startswith('_'):\n # token starts with '_' and is a POS tag\n if quantifier:\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\\S+?_\" + token.lstrip('_').replace(\"(\",\"(?:\") + \" )\" + quantifier\n else:\n dict_tokens = dict_tokens + \"(\\d+)\\.(\\d+):\\S+?_\" + token.lstrip('_').replace(\"(\",\"(?:\") + \" \"\n else:\n try:\n # token is a lemma with POS tag attached, split the lemma and pos tag\n pos_token = token.split('_')\n if quantifier:\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\" + pos_token[0].replace(\"(\",\"(?:\") + \"_\" + pos_token[1].replace(\"(\",\"(?:\") + \" )\" + quantifier\n else:\n dict_tokens = dict_tokens + \"(\\d+)\\.(\\d+):\" + pos_token[0].replace(\"(\",\"(?:\") + \"_\" + pos_token[1].replace(\"(\",\"(?:\") + \" \"\n\n except IndexError:\n print(\"Warning! Invalid token found in line '\" + line + \"'\")\n elif token == '...':\n # ... is converted to one or more arbitrary tokens\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\\S+_\\S+? )+\"\n else:\n # token is a lemma without POS tag\n if quantifier:\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\" + token.replace(\"(\", \"(?:\") + \"_\\S+? )\" + quantifier\n else:\n dict_tokens = dict_tokens + \"(\\d+)\\.(\\d+):\" + token.replace(\"(\", \"(?:\") + \"_\\S+? \"\n if dict_tokens:\n feature_dict.append(dict_tokens)\n if len(feature_dict) is 0:\n print(\"Warning! No valid entries found in dictionary \" + path)\n return None\n else:\n return feature_dict", "def read_flow(filename):\n with open(filename, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n data = np.fromfile(f, np.float32, count=int(2*w*h))\n # Reshape data into 3D array (columns, rows, bands)\n return np.resize(data, (h[0], w[0], 2))", "def parse_data(filepath):\n settings = dict()\n intensity = list()\n # Boolean flags to check when to start/stop\n # reading parameters\n read_params = False\n read_int = False\n read_zeeman = False\n finished = False\n fieldoff_intensities = list()\n fieldon_intensities = list()\n with open(filepath) as read_file:\n for line in read_file:\n if \"*****\" in line:\n read_int = False\n if finished is True:\n break\n if \"Scan\" in line:\n if \"[Field ON]\" in line:\n read_zeeman = True\n scan_details = line.split()\n settings[\"ID\"] = int(scan_details[1])\n # settings[\"Date\"] = str(scan_details[4])\n read_params = True\n read_int = False\n continue\n if read_int is True:\n if read_zeeman is False:\n fieldoff_intensities += [float(value) for value in line.split()]\n else:\n fieldon_intensities += [float(value) for value in line.split()]\n finished = True\n if read_params is True and len(line.split()) > 1:\n # Read in the frequency step, frequency, and other info\n # needed to reconstruct the frequency data\n scan_params = line.split()\n shift = 1\n settings[\"Frequency\"] = float(scan_params[0])\n settings[\"Frequency step\"] = float(scan_params[1])\n if len(scan_params) == 4:\n settings[\"Multiplier\"] = 1.\n shift = 0\n # If the multiplier data is there, we don't shift the read\n # index over by one\n else:\n settings[\"Multiplier\"] = float(scan_params[2])\n settings[\"Center\"] = float(scan_params[2 + shift])\n settings[\"Points\"] = int(scan_params[3 + shift])\n read_params = False\n # Start reading intensities immediately afterwards\n read_int = True\n continue\n fieldoff_intensities = np.array(fieldoff_intensities)\n fieldon_intensities = np.array(fieldon_intensities)\n\n # Generate the frequency grid\n settings[\"Frequency step\"] = settings[\"Frequency step\"] * settings[\"Multiplier\"]\n # This calculates the length of either side\n side_length = settings[\"Frequency step\"] * (settings[\"Points\"] // 2)\n start_freq = settings[\"Frequency\"] - side_length\n end_freq = settings[\"Frequency\"] + side_length\n frequency = np.linspace(start_freq, end_freq, settings[\"Points\"])\n\n return frequency, fieldoff_intensities, fieldon_intensities, settings", "def main():\n regexham = r'\\s+\\((\\d+,\\s*\\d+)\\)\\s+([\\-+]?\\d+\\.\\d+[eEdD]?[\\-+]?\\d+)' #to extract the Hamiltonian.\n root = '.'\n #fname = 'output_files/'\n ciffci = CIFlow_Reader('testfci.dat', regexp = regexham , read_ham= True)\n ciffcipar = CIFlow_Reader( 'psi0_output10outputfci.dat', regexp = regexham , read_ham = True)\n #print ciffci.calc_overlap(cifdoci)\n #print e.get_groundstate('00000000000011|00000000000011') \n\n psir = rp.PsiReader('psi0_output10.dat', isbig = False, numorbs = -1 , read_ints = False)\n\n detlist = dw.cimain(psir.values['nalpha'],psir.values['nbeta'], psir.values['norb'], [range(1,psir.values['nalpha']+psir.values['nbeta']), []], [] , fname = 'determinants.dat' ,ref = [lambda x , y , z : psir.get_hf_orbs()] , add_frozen = 0, write = False) #CISDDOCI\n count = 0\n for det in detlist:\n for det2 in detlist:\n #+ because the eigenvectors have already a different phasefactor of 1.\n if abs(ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) - ciffcipar.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) ) > 1e-10 :\n print 'difference in hamiltonian row: ' , det[0]+'|'+det[1] , \" col: \" , det2[0]+'|'+det2[1] , 'fci: ', ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) , 'fciaddres: ' , ciffcipar.get_mat_element(det[0]+'|'+det[1],det2[0]+'|'+det2[1]) \n count += 1\n print 'There were ' , count , ' different elements'", "def loadFromTSreduceFile(filename):\n\tprint \"Loading the tsreduce file: %s\"%filename\n\tultracam = False\n\tultraspec = False\n\tsaft = True\n\tinputFile = open(filename,'rt')\n\tfirstLine = inputFile.readline()\n\tif \"tsreduce\" not in firstLine:\n\t\tprint \"This is not a tsreduce file. (I couldn't find the string 'tsreduce' in the first line.)\"\n\t\treturn\n\t\t\n\ttelescopeName = 'Warwick One Metre'\n\ttelescope = findTelescope(telescopeName)\n\t\n\tfilenames = []\n\tmidTimes = []\n\tMJDs = []\n\tcounts = []\n\tskys = []\n\tsigmas = []\n\terrors = []\n\ttimeFlags = []\n\texposures = []\n\tFWHMs = []\n\tbetas = []\n\txs = []\n\tys = []\n\tnumApertures = -1\n\t\n\tfor line in inputFile:\n\t\tif line[0] == '#':\n\t\t\tif 'ReferenceTime' in line:\n\t\t\t\tparts = line.split()\n\t\t\t\treferenceDateString = parts[2]\n\t\t\t\treferenceTimeString = parts[3]\n\t\t\t\treferenceDateUTC = astropy.time.Time(referenceDateString + \" \" + referenceTimeString)\n\t\t\t\ttLocation = astropy.coordinates.EarthLocation.from_geodetic(telescope['longitude'], telescope['latitude'], telescope['altitude'])\n\t\t\t\treferenceDateUTC.location = tLocation\n\t\t\t\treferenceDate = referenceDateUTC\n\t\t\tif 'FramePattern' in line:\n\t\t\t\ttargetString = generalUtils.getBetweenChars(line, '^', '(')\n\t\telse:\n\t\t\tparts = line.strip().split()\n\t\t\tif numApertures == -1:\n\t\t\t\tnumApertures = (len(parts)-2) / 6\n\t\t\t\tprint \"number of apertures is: \", numApertures\n\t\t\t\n\t\n\tfor a in range(numApertures):\n\t\tprint \"Aperture: \", a\n\t\tcounts = []\n\t\tskys = []\n\t\tsigmas = []\n\t\terrors = []\n\t\ttimeFlags = []\n\t\texposures = []\n\t\tFWHMs = []\n\t\tbetas = []\n\t\txs = []\n\t\tys = []\n\t\tinputFile.seek(0)\n\t\tindex = 0\n\t\tfor line in inputFile:\n\t\t\tif line[0] == '#':\n\t\t\t\tcontinue\t\n\t\t\tparts = line.strip().split()\n\t\t\tif numApertures == -1:\n\t\t\t\tnumApertures = (len(parts)-2) / 6\n\t\t\t\tprint \"number of apertures is: \", numApertures\n\t\t\t\n\t\t\tif a==0:\n\t\t\t\tfilenames.append(parts[0])\n\t\t\t\tmidTimes.append(float(parts[1]))\n\t\t\t\tincrement = astropy.time.TimeDelta(midTimes[index], format='sec')\n\t\t\t\tastroDate = referenceDate + increment\n\t\t\t\tMJDs.append(astroDate.mjd)\n\t\t\t\t\n\t\t\t\n\t\t\tcolumnOffset = a * 6\n\t\t\t\n\t\t\tcounts.append(float(parts[2 + columnOffset]))\n\t\t\texposures.append(1.0)\n\t\t\tsigmas.append(float(parts[3 + columnOffset]))\n\t\t\tskys.append(float(parts[4 + columnOffset]))\n\t\t\txs.append(float(parts[5 + columnOffset]))\n\t\t\tys.append(float(parts[6 + columnOffset]))\n\t\t\tFWHMs.append(float(parts[7 + columnOffset]))\n\t\t\t\n\t\t\tprint \"%s %5.7f Counts/s: %.2f[%.2f] (%.2f, %.2f) {%.2f}\"%(filenames[index], MJDs[index], counts[-1], sigmas[-1], xs[-1], ys[-1], FWHMs[-1])\n\t\t\t\n\t\t\t\n\t\t\tindex+=1\n\t\t\n\t\t\t\t\n\t\tphotometry = {}\n\t\t\n\t\tphotometry['MJD'] = numpy.array(MJDs)\n\t\tphotometry['exposure'] = numpy.array(exposures)\n\t\tphotometry['FWHM'] = numpy.array(FWHMs)\n\t\tphotometry['x'] = numpy.array(xs)\n\t\tphotometry['y'] = numpy.array(ys)\n\t\tphotometry['counts'] = numpy.array(counts)\n\t\tphotometry['sigma'] = numpy.array(sigmas)\n\t\tphotometry['sky'] = numpy.array(skys)\n\t\t\n\t\tid = slots.getNextSlotID()\n\t\tprint \"new ID:\", id\n\t\tslot = photometryClasses.slotObject(id)\n\t\tslot.setPhotometry(photometry)\n\t\tslot.setTimeColumn('MJD')\n\t\tslot.setYColumn('counts')\n\t\tslot.setYError('sigma')\n\t\tslot.target = targetString\n\t\tslot.filter = 'Rise-1'\n\t\tslot.telescope = telescope\n\t\tslot.aperture = a\n\t\tnumSlots = slots.addSlot(slot)\n\t\t\n\t\tprint referenceDate, referenceDate.scale\n\t\tprint referenceDateUTC, referenceDateUTC.scale\n\t\ttLocation = astropy.coordinates.EarthLocation.from_geodetic(telescope['longitude'], telescope['latitude'], telescope['altitude'])\n\t\treferenceDate.location = tLocation\n\t\tprint referenceDate, referenceDate.scale, referenceDate.location\n\t\tprint referenceDate.tdb", "def openFullProfFile(self, filename):\n handle = open(filename)\n lines = handle.readlines()\n handle.close()\n atoms = []\n bonds = []\n conns = []\n for line in lines:\n if line[0:4] == \"CELL\":\n #format of line: CELL a b c alpha beta gamma\n vals = line.split()\n print vals\n a = float(vals[1])\n b = float(vals[2])\n c = float(vals[3])\n alpha = float(vals[4])\n gamma = float(vals[5])\n beta = float(vals[6])\n elif line[0:6] == \"SPACEG\":\n #this is the space group in Hermann-Mauguin notation.\n hm_spacegroup = (line[6:]).strip().upper()\n space_group = GetSpaceGroup(hm_spacegroup)\n elif line[0:3] == \"BOX\":\n #Format: xmin xmax ymin ymax zmin zmax\n #In this program, however, xmin, ymin, zmin = 0,0,0 always.\n vals = line.split()\n a_diff = float(vals[2]) - float(vals[1])\n b_diff = float(vals[4]) - float(vals[3])\n c_diff = float(vals[6]) - float(vals[5])\n a_cutoff = int(a_diff)\n b_cutoff = int(b_diff)\n c_cutoff = int(c_diff)\n if a_diff - a_cutoff > 0:\n a_cutoff += 1\n if b_diff - b_cutoff > 0:\n b_cutoff += 1\n if c_diff - c_cutoff > 0:\n c_cutoff += 1\n elif line[0:4] == \"ATOM\":\n vals = line.split()\n label = vals[1]\n symbol = vals[2]\n a_coord = float(vals[3])\n b_coord = float(vals[4])\n c_coord = float(vals[5])\n position = (a_coord, b_coord, c_coord)\n #Get the radius which is right after the word \"RADIUS\"\n for i in range(len(vals)):\n if vals[i] == \"RADIUS\":\n radius = float(vals[i+1])\n break\n else:\n radius = None\n #Get the color which is right after the word \"COLOR\"\n for i in range(len(vals)):\n if vals[i] == \"COLOR\":\n color = [float(vals[i+1]), float(vals[i+2]), float(vals[i+3])]\n break\n else:\n color = None\n #atomData format (each line):\n #label massNum aPos bPos cPos anisotropy_a anisotropy_b anistropy_c spin valence\n atoms.append([label, symbol, position, radius, color])\n elif line[0:4] == \"BOND\":\n #Format: BOND label1 label2 min_dist max_dist RADIUS rad COLOR r g b t\n #The color and radius need not be there and will be ignored for now since\n #the color and radius of bonds is hardcoded in right now.\n vals = line.split()\n bonds.append([vals[1], vals[2], vals[3], vals[4]])\n elif line[0:4] == \"CONN\":\n #Format: BOND symbol1 symbol2 min_dist max_dist RADIUS rad COLOR r g b t\n #The color and radius need not be there and will be ignored for now since\n #the color and radius of bonds is hardcoded in right now.\n vals = line.split()\n conns.append([vals[1], vals[2], vals[3], vals[4]])\n \n \n self.newCell(space_group.number, a, b, c, alpha, beta, gamma, 1, 1, 1,\n a_cutoff, b_cutoff, c_cutoff)\n \n for atom in atoms:\n #FPStudio does not seem to support isotopes\n massNum = None\n self.addAtom(atom[1], atom[2], massNum = massNum, radius = atom[3], rgb = atom[4])\n \n for bond in bonds:\n self.createBonds(label1 = bonds[0], label2 = bonds[1],\n minDist = bonds[2], maxDist = bonds[3])\n for conn in conns:\n self.createBonds(symbol1 = conns[0], symbol2 = conns[1],\n minDist = conns[2], maxDist = conns[3])\n \n self.refreshGUI()\n #self.cellChange(space_group.number, a, b, c, alpha, beta, gamma, magNa = 1, magNb = 1, magNc = 1, cutNa = a_cutoff, cutNb = b_cutoff, cutNc = c_cutoff, atomData = atoms)\n #self.updateCell(space_group.number, a, b, c, alpha, beta, gamma, magNa = 1, magNb = 1, magNc = 1, cutNa = a_cutoff, cutNb = b_cutoff, cutNc = c_cutoff, atomData = atoms)\n #self.refreshGUI()\n \n #send signal to the cell window to show the info that has been loaded and to vtkWindow to draw it\n send(signal = \"File Load\", sender = \"Session\",\n spaceGroup = space_group.number, a = a, b = b, c = c,\n alpha = alpha, beta = beta, gamma = gamma, magNa = a_cutoff,\n magNb = b_cutoff, magNc = c_cutoff, cutNa = a_cutoff,\n cutNb = b_cutoff, cutNc = c_cutoff)\n \n \n #TODO: use these values extracted. You could combine the three file opening functions.\n #Each function would have to extract values form it's format and then a single function\n #could be used for all three to construct the model from the extracted values.e", "def readFlow(fn):\n with open(fn, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n return None\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n # print 'Reading %d x %d flo file\\n' % (w, h)\n data = np.fromfile(f, np.float32, count=2*int(w)*int(h))\n # Reshape data into 3D array (columns, rows, bands)\n # The reshape here is for visualization, the original code is (w,h,2)\n return np.resize(data, (int(h), int(w), 2))", "def process_test_32():\n fname = f\"{TEST_PATH}/set_32.csv\"\n fout = f\"{TEST_PATH}/test_32.smi\"\n fout_gse = f\"{TEST_PATH}/test_32.with.gse.smi\"\n logging.info(f\"Processing {fname}\")\n\n cmpd_list = []\n with open(fname, 'r') as fin:\n for line in fin:\n if not line.startswith(\"#\"):\n pairs = line.rstrip('\\n').split(',')\n canon_smiles = canonicalize_smiles(pairs[0])\n logS = float(pairs[5])\n cmpd_list.append((canon_smiles, logS))\n\n with open(fout, 'w', encoding=\"ascii\") as fo:\n for el in cmpd_list:\n smiles = el[0]\n fo.write(f\"{smiles}\\n\")\n\n with open(fout_gse, 'w', encoding=\"ascii\") as fo:\n for el in cmpd_list:\n smiles = el[0]\n logS = el[1]\n fo.write(f\"{smiles},{logS}\\n\")\n\n logging.info(f\"Saved {fout}\")", "def corescan(filename, core):\n \n pssm = np.loadtxt(filename, skiprows=1)\n pssmf = pssm[:,1:].transpose()\n\n # iterpssm = np.concatenate((matlog, pssmf, matlog), axis=1) #iterable PSSM , flanked by buffer arrays\n\n lenpssm = len(pssmf.transpose())\n\n score = -1000\n pos = 0\n for j in regenerateseq(core, \"numpy\"):\n beta = pssmwalk(pssmf,j, 0, \"numpy\")\n \n\n betascore = beta[0]\n\n betapos = beta[1]\n \n if betascore > score :\n score = betascore\n pos = betapos\n else:\n pass\n\n return [score,pos,pssmf]", "def readMAXIPOLdataLuis(filename):\n\n ia=[]; ja=[]\n i=[]; j=[]; beam=[]; sig=[]; cts=[]\n for line in open(filename, \"r\"):\n line = line.strip().split()\n i1, j1, b1, s1, c1 = (int(line[0]), int(line[1]), \n float(line[2]), float(line[3]), int(line[4]))\n ia.append(i1); ja.append(j1)\n if b1 != 0 and s1 !=0:\n ## only keep pixels with data\n i.append(i1); j.append(j1); beam.append(b1)\n sig.append(s1); cts.append(c1)\n\n beam = asarray(beam, float64)\n sig = asarray(sig, float64)\n ## map i and j (before deletion) onto (-1,1)\n x = array([2*(ii-min(ia))/(max(ia)-min(ia))-1 for ii in i], float64)\n y = array([2*(jj-min(ja))/(max(ja)-min(ja))-1 for jj in j], float64)\n\n return BeamData(x, y, beam, sig, cts=cts)", "def mse_converter( fname ):\n lines = []\n with open( fname ) as fh:\n for line in fh.readlines():\n if len(line) > 1: # avoid empty lines\n if line.startswith('m'):\n continue\n # strip off \\n and split on tabs\n line = line.strip().split( '\\t' )\n lines.append( ( float(line[0]), float(line[1]) ) )\n return numpy.array( lines )", "def compute_all_features(mp3_file):\n # Decode and read mp3\n audio, _ = librosa.load(mp3_file, sr=SR)\n\n # Compute mels\n mel = compute_melspecs(audio)\n\n # Save\n out_file = os.path.join(\n OUTPUT_DIR, os.path.basename(mp3_file).replace(\".mp3\", \"-mel.npy\"))\n np.save(out_file, mel)", "def read_data(self):\n data = np.genfromtxt(self.__file) # Planck SED\n self.__nu = 10.0**data[:,0]\n self.__nuF = 10.0**data[:,2]\n self.__err = 10.0**data[:,3]\n #self.__W = 10.0**data[:,4]\n self.__yerr = [ self.__nuF - self.__nuF / self.__err, \\\n self.__nuF * self.__err - self.__nuF ]\n self.__maxY = max( self.__nuF )\n self.__minY = min( self.__nuF )", "def read_feat_file(filepath):\n term2feat = {}\n shard_size = 0\n for line in open(filepath):\n t, df, sum_tf, sum_prob, sum_logprob, sum_sqr_logprob, min_logprob = line.split()\n t = t.strip()\n if '-1' in t:\n shard_size = int(df) \n continue\n df = int(df)\n sum_logprob = float(sum_logprob)\n sum_sqr_logprob = float(sum_sqr_logprob)\n min_logprob = float(min_logprob)\n feat = ShardTermFeat()\n feat.df = int(df)\n feat.e = sum_logprob / df\n feat.sqr_e = sum_sqr_logprob / df\n feat.var = feat.sqr_e - feat.e**2\n if df == 1 or abs(feat.var) < 0.000999:\n feat.var = 0\n assert (feat.var >= 0), \"{0} {1} {2} {3}\".format(feat.e, feat.sqr_e, feat.df, feat.var)\n feat.min = min_logprob\n term2feat[t] = feat\n return term2feat, shard_size", "def Read_Spectrum(Path,borne1 = 0,borne2 = 0) :\n x,y=[],[]\n fs = open(Path, 'r')\n #print('Open new fic') \n#index_array = 0\n while 1: \n txt = fs.readline()\n #print(txt)\n if ((txt =='')|(txt == '\\r\\n')): \n break\n #print(txt)\n ii=-1\n while 1: # on cherche le premier espace qui limite le premier nombre\n ii = ii+1\n #print(ii)\n if ((txt[ii] == ' ') |(txt[ii] == '\\t')):\n break\n \n x.append(float(txt[0:ii]))\n y.append(float(txt[ii:])) \n# if len(txt) == 21 : #nu >= 10000 cm-1\n# x.append(float(txt[0:11]))\n# y.append(float(txt[11:]))\n# elif len(txt) == 20 : #nu >= 1000 cm-1\n# x.append(float(txt[0:10]))\n# y.append(float(txt[10:]))\n# elif len(txt) == 19 : #nu >= 100 cm-1\n# x.append(float(txt[0:9]))\n# y.append(float(txt[9:]))\n# elif len(txt) == 18 : #nu >= 10 cm-1\n# x.append(float(txt[0:8]))\n# y.append(float(txt[8:]))\n# elif len(txt) == 17 : #nu >= 1 cm-1\n# x.append(float(txt[0:7]))\n# y.append(float(txt[7:]))\n\n #x[index_array],y[index_array] = float(txt[0:9]),float(txt[10:17])\n #index_array = index_array+1\n \n fs.close()\n x = np.array(x)\n y = np.array(y)\n if ((borne1 == 0) & (borne2 == 0)) :\n pass \n else :\n index_ok = ((x<borne2) & (x>borne1))\n x = x[index_ok]\n y = y[index_ok]\n\n return x,y", "def read_gff3(self,gff3_file):\r\n with open(gff3_file) as infile:\r\n set = None\r\n for line in infile:\r\n if line[0] == '#':\r\n if line[:3] == '###' and set:\r\n self.sets.append(set)\r\n set = None\r\n if line.startswith(\"##sequence-region\"):\r\n splitline = line.split()\r\n self.sequence_regions[splitline[1]] = line\r\n #TODO: properly deal with comment lines.\r\n self.sets.append(line)\r\n else:\r\n line = GFF3_line(set,line)\r\n #adding the feature individually\r\n self.features_id[line.attributes.id] = line\r\n if line.attributes.name:\r\n if line.attributes.name in self.features_name:\r\n #TODO: find a way to handle features that have the same name.\r\n pass#print(line.attributes.id, line.attributes.name, self.features_name[line.attributes.name].attributes.id)\r\n else:\r\n self.features_name[line.attributes.name] = line\r\n #adding the set of features\r\n if line.type == \"region\" and not line.attributes.parent:\r\n #this feature has been deemed redundant and is not used in recent versions of the gff3,\r\n if set:\r\n #this is the first element of a set,\r\n # old set needs to be added to the list and a new set created\r\n self.sets.append(set)\r\n set = GT_seq_location()\r\n else:\r\n set = GT_seq_location()\r\n #if the set is none, it was also during init, and we need to set the owner_set again\r\n line._owner_set = set\r\n set._flanking_region = line\r\n elif line.type == \"flanking_region\":\r\n if set and set.flanking_region:\r\n # this can also be the first element of a set,\r\n # if the set already has a flanking region\r\n # old set needs to be added to the list and a new set created\r\n self.sets.append(set)\r\n set = GT_seq_location()\r\n else:\r\n set = GT_seq_location()\r\n #if the set is none, it was also during init, and we need to set the owner_set again\r\n line._owner_set = set\r\n set.flanking_region = line\r\n elif line.type == \"region\" and line.attributes.parent:\r\n set.gt_seq_region.append(line)\r\n elif line.type == \"PCR_product\":\r\n set.pcr_product.append(line)\r\n elif line.type == \"forward_primer\":\r\n set.forward_primer.append(line)\r\n elif line.type == \"reverse_primer\":\r\n set.reverse_primer.append(line)\r\n elif line.type == \"SNP\":\r\n set.snp.append(line)\r\n else:\r\n pass#print(\"line of type {} not added.\".format(line.type))\r\n if set:\r\n # there was no '###' at the end of the file so the last set needs to be added.\r\n self.sets.append(set)", "def read(file):\n\n blocks = ['bus', 'load', 'fshunt', 'gen', 'branch', 'transf', 'area',\n 'twotermdc', 'vscdc', 'impedcorr', 'mtdc', 'msline', 'zone',\n 'interarea', 'owner', 'facts', 'swshunt', 'gne', 'Q']\n nol = [1, 1, 1, 1, 1, 4, 1,\n 0, 0, 0, 0, 0, 1,\n 0, 1, 0, 0, 0, 0]\n rawd = re.compile('rawd\\d\\d')\n\n retval = True\n version = 0\n b = 0 # current block index\n raw = {}\n for item in blocks:\n raw[item] = []\n\n data = []\n mdata = [] # multi-line data\n mline = 0 # line counter for multi-line models\n\n # parse file into raw with to_number conversions\n fid = open(file, 'r')\n for num, line in enumerate(fid.readlines()):\n line = line.strip()\n if num == 0: # get basemva and frequency\n data = line.split('/')[0]\n data = data.split(',')\n\n mva = float(data[1])\n freq = float(data[5])\n version = int(data[2])\n\n if not version:\n version = int(rawd.search(line).group(0).strip('rawd'))\n if version < 32 or version > 33:\n logging.warning('RAW file version is not 32 or 33. Error may occur.')\n continue\n elif num == 1: # store the case info line\n logging.info(line)\n continue\n elif num == 2:\n continue\n elif num >= 3:\n if line[0:2] == '0 ' or line[0:3] == ' 0 ': # end of block\n b += 1\n continue\n elif line[0] is 'Q': # end of file\n break\n data = line.split(',')\n\n data = [to_number(item) for item in data]\n mdata.append(data)\n mline += 1\n if mline == nol[b]:\n if nol[b] == 1:\n mdata = mdata[0]\n raw[blocks[b]].append(mdata)\n mdata = []\n mline = 0\n fid.close()\n\n # add device elements params and add to PSAT formatted dictionary\n\n for data in raw['bus']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10\n ID, NAME, BasekV, Type, Area Zone Owner Va, Vm, latitude longitude\n \"\"\"\n idx = data[0]\n ty = data[3]\n angle = data[8]\n try:\n lat = data[9]\n except:\n # logging.warning('<No Coordinates in .raw file>')\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5]]\n else:\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n 'latitude': data[9],\n 'longitude': data[10]\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5], data[9], data[10]]\n Settings.Bus.append(psatlist)\n Settings.BusNames.append(data[1])\n # Add BusSTORE Dictionary For Later Reference\n Settings.BusStore[idx] = param\n\n xcoord = [34.560040, 34.938385, 34.360040, 40.5152473, 40.3142473, 36.527401, 36.857401, 36.687401, 36.856401,\n 40.487041, 36.903901, 36.702901, 35.832561, 33.386047, 33.185047, 37.105571, 37.104154, 33.706718,\n 37.103549, 36.703539, 37.103559, 36.703549, 36.033561, 35.631561, 36.032561, 35.732561, 36.525401,\n 36.857401, 49.869314, 50.969314, 51.979314, 52.481674, 54.973192, 56.276212, 41.734596, 34.551015,\n 34.652015, 34.537507, 34.587507, 34.157904, 33.714453, 33.762453, 39.548160, 39.496160, 34.313143,\n 34.545782, 34.380686, 34.111686, 34.137762, 34.118650, 34.158650, 33.918650, 33.718650, 34.018650,\n 34.018650, 34.018650, 34.018650, 34.018650, 34.312456, 34.315456, 34.243600, 34.566258, 34.565258,\n 46.064672, 46.565672, 45.514571, 45.606833, 45.806833, 44.890000, 45.596416, 45.295416, 45.891161,\n 47.954899, 46.511440, 45.913936, 45.713936, 46.669335, 47.954899, 47.624154, 43.784730, 44.482350,\n 42.006860, 42.934919, 42.731919, 43.013135, 44.068350, 43.558350, 42.438350, 42.938350, 44.068350,\n 43.558350, 43.048350, 42.638350, 44.068350, 43.558350, 43.048350, 42.638350, 43.620189, 39.120428,\n 40.398031, 35.216200, 35.215200, 36.202099, 39.777745, 39.539598, 37.052929, 35.403217, 35.352217,\n 36.807243, 39.567450, 40.807689, 40.806689, 41.008689, 39.555494, 37.954721, 38.406721, 38.906721,\n 38.656721]\n ycoord = [-109.277313, -110.303798, -109.777313, -107.546455, -107.546455, -108.325669, -108.654569, -108.486669,\n -108.325669, -107.185575, -111.390408, -111.390408, -111.448566, -112.860397, -112.659397, -108.243555,\n -108.441191, -112.322033, -111.590816, -111.190816, -111.190816, -111.590806, -111.648566, -111.248566,\n -111.249566, -111.647566, -108.655669, -108.323669, -122.150895, -122.150895, -122.150895, -121.61684,\n -121.924221, -122.21370, -108.790427, -117.568105, -117.538105, -118.607375, -118.658375, -118.280282,\n -118.146319, -118.096319, -112.52797, -112.72797, -118.690631, -118.389938, -118.478496, -118.478496,\n -118.299917, -118.095428, -118.095428, -118.095428, -118.095428, -118.195428, -118.395428, -117.995428,\n -117.795428, -117.995428, -118.481217, -118.891217, -118.391667, -117.166428, -117.368428, -106.60906,\n -106.80906, -122.681289, -121.114785, -122.113785, -123.29000, -121.312202, -121.114202, -106.612578,\n -118.997945, -112.88531, -120.692286, -120.693974, -119.571501, -120.997945, -122.219492, -118.77463,\n -121.019484, -121.316546, -114.419206, -114.419206, -120.956476, -120.79484, -120.93484, -121.216546,\n -121.156546, -121.215484, -121.135484, -121.255484, -121.175484, -121.013484, -120.733484, -121.053484,\n -120.973484, -118.865882, -122.073631, -122.263453, -120.847567, -120.900567, -120.129849, -122.142965,\n -122.262993, -121.021929, -119.450452, -119.450452, -121.779037, -122.276225, -122.135718, -121.935718,\n -121.935718, -121.24000, -121.18379, -121.10879, -121.27379, -121.23979]\n\n #for idx, line in enumerate(Settings.Bus):\n # line.extend([xcoord[idx], ycoord[idx]])\n\n maxV = 1.1\n minV = 0.9\n maxQ = 1\n minQ = 0\n convimp = 0\n status = 1\n loss = 1\n\n for data in raw['load']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11\n Bus, Id, Status, Area, Zone, PL(MW), QL (MW), IP, IQ, YP, YQ, OWNER\n \"\"\"\n\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n voltage = Settings.BusStore[busidx]['voltage']\n param = {'bus': busidx,\n 'Vn': vn,\n 'Sn': mva,\n 'p': (data[5] + data[7] * voltage + data[9] * voltage ** 2) / mva,\n 'q': (data[6] + data[8] * voltage - data[10] * voltage ** 2) / mva,\n 'owner': data[11],\n 'type': Settings.BusStore[busidx]['type'],\n 'voltage': voltage\n }\n\n psatlist = [busidx, mva, vn, param['p'], param['q'], maxV, minV, convimp, status]\n Settings.PQ.append(psatlist)\n \"\"\"CONFIRM THAT OTHER BUSES HAVE 0 P and 0 Q which are not added\"\"\"\n\n for data in raw['fshunt']:\n \"\"\"\n 0, 1, 2, 3, 4\n Bus, name, Status, g (MW), b (Mvar)\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n param = {'bus': busidx,\n 'Vn': vn,\n 'status': data[2],\n 'Sn': mva,\n 'g': data[3] / mva,\n 'b': data[4] / mva,\n }\n\n psatlist = [busidx, mva, vn, freq, param['g'], param['b'], param['status']]\n Settings.Shunt.append(psatlist)\n\n gen_idx = 0\n type = 6\n\n for data in raw['gen']:\n \"\"\"\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 12, 13, 14, 15, 16,17,18,19\n I,ID,PG,QG,QT,QB,VS,IREG,MBASE,ZR,ZX,RT,XT,GTAP,STAT,RMPCT,PT,PB,O1,F1\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n gen_mva = data[8]\n gen_idx += 1\n status = data[14]\n leak = 0\n param = {'Sn': gen_mva,\n 'Vn': vn,\n 'u': status,\n 'idx': gen_idx,\n 'bus': busidx,\n 'pg': status * data[2] / mva,\n 'qg': status * data[3] / mva,\n 'qmax': data[4] / mva,\n 'qmin': data[5] / mva,\n 'v0': data[6],\n 'ra': data[9], # ra armature resistance\n 'xs': data[10], # xs synchronous reactance\n 'pmax': data[16] / mva,\n 'pmin': data[17] / mva,\n }\n\n if Settings.BusStore[busidx]['type'] == 3: #Check Bus Type for Slack\n refangle = 0\n refBus = 1\n PGuess = 1\n swlist = [busidx, gen_mva, vn, param['v0'], refangle, param['qmax'], param['qmin'],\n maxV, minV, PGuess, loss, refBus, status]\n SW = swlist\n Settings.SW.append(swlist)\n Settings.SWStore[busidx] = param\n Settings.SynStore[busidx] = param\n continue\n\n if busidx not in Settings.BusStore.keys():\n \"\"\" Need data from .dyr file. Create initial list, then append data from .dyr\"\"\"\n else:\n # psatlist = [busidx, gen_mva, vn, freq, type, leak, param['ra'],param['xs']]\n # Syn.append(psatlist)\n Settings.SynStore[busidx] = param\n pvlist = [busidx, gen_mva, vn, param['pg'], Settings.BusStore[busidx]['voltage'],\n param['qmax'], param['qmin'], maxV, minV, loss, status]\n Settings.PV.append(pvlist)\n\n\n for data in raw['branch']:\n \"\"\"\n I,J,ID,R,X,B,RATEA,RATEB,RATEC,GI,BI,GJ,BJ,ST,LEN,O1,F1,...,O4,F4\n \"\"\"\n param = {'bus1': data[0],\n 'bus2': data[1],\n 'id' : data[2],\n 'r': data[3],\n 'x': data[4],\n 'b': data[5],\n 'rate_a': data[6],\n 'rate_b': data[7],\n 'rate_c': data[8],\n 'Vn': Settings.BusStore[data[0]]['Vn'],\n 'Vn2': Settings.BusStore[data[1]]['Vn'],\n 'length': data[14],\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n 'status': data[13]\n }\n\n psatlist = [param['bus1'], param['bus2'], param['rate_c'], param['Vn'], freq, EMPTY,\n param['length'], param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['status']]\n Settings.Lineij.append([data[0], data[1], data[2]])\n Settings.Lineji.append([data[1], data[0], data[2]])\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.branches += 1\n Settings.linecount += 1\n Settings.LineBusMatij[param['bus2']].append(Settings.branches)\n Settings.LineBusMatji[param['bus1']].append(Settings.branches)\n\n for data in raw['transf']:\n \"\"\"\n I,J,K,CKT,CW,CZ,CM,MAG1,MAG2,NMETR,'NAME',STAT,O1,F1,...,O4,F4\n R1-2,X1-2,SBASE1-2\n WINDV1,NOMV1,ANG1,RATA1,RATB1,RATC1,COD1,CONT1,RMA1,RMI1,VMA1,VMI1,NTP1,TAB1,CR1,CX1\n WINDV2,NOMV2\n \"\"\"\n if len(data[1]) < 5:\n ty = 2\n else:\n ty = 3\n if ty == 3:\n continue\n # raise NotImplementedError('Three-winding transformer not implemented')\n\n tap = data[2][0]\n phi = data[2][2]\n\n if tap == 1 and phi == 0:\n trasf = False\n else:\n trasf = True\n param = {'trasf': trasf,\n 'bus1': data[0][0],\n 'bus2': data[0][1],\n 'u': data[0][11],\n 'b': data[0][8],\n 'r': data[1][0],\n 'x': data[1][1],\n 'tap': tap,\n 'phi': phi,\n 'rate_a': data[2][3],\n 'Vn': Settings.BusStore[busidx]['Vn'],\n 'Vn2': Settings.BusStore[busidx]['Vn'],\n # 'length': data[?][?], FIND CORRECT INDEX\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n }\n psatlist = [param['bus1'], param['bus2'], param['rate_a'], param['Vn'], freq, EMPTY,\n EMPTY, param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['u']]\n\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.linecount += 1\n Settings.transformers += 1\n # ADD Line Data(All Branch Types) to Sys Param Dict after .dyr Transformer Data Added\n # Re-Order Line Data for correct sequence\n for key in Settings.LineOrd:\n for item in Settings.LineOrd[key]:\n Settings.Line.append(item)\n\n for data in raw['area']:\n Settings.Areas.append(data[4])\n\n for data in raw['zone']:\n Settings.Regions.append(data[1])\n\n return retval", "def predict(self, datafile):", "def test_read_0_2_smirnoff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirnoff99Frosst_reference_0_2_spec.offxml\"\n )\n )", "def process_file(input_file = 'NC_012655.ffn',output_file = 'NC_012655.output'):\n #prepare\n f = open(input_file, 'r')\n o = open(output_file,'w')\n seq = ''\n header = f.readline()\n o.write('GeneID Length GC \\n')\n #work\n for line in f:\n if not line.startswith('>'):\n seq += line\n else:\n o.write(process_gene(header = header, gene = seq))\n header = line\n seq = ''\n #finish\n f.close()\n o.close()\n return 0", "def _get_scfinfo(self, file):\n f = open_general(file)\n tmptxt = f.readlines()\n f.close()\n # get rms and number of iterations\n itmp, niter, rms = 0, -1, -1\n while itmp >= 0:\n itmp = search_string('average rms-error', tmptxt)\n if itmp >= 0:\n tmp = tmptxt.pop(itmp).replace('D', 'E').split()\n niter = int(tmp[1])\n rms = float(tmp[-1])\n # get max number of scf steps\n itmp = search_string('SCFSTEPS', tmptxt)\n if itmp >= 0:\n nitermax = int(tmptxt.pop(itmp).split()[-1])\n # get qbound\n itmp = search_string('QBOUND', tmptxt)\n if itmp >= 0:\n qbound = float(tmptxt.pop(itmp).split()[-1])\n # get imix\n itmp = search_string('IMIX', tmptxt)\n if itmp >= 0:\n imix = int(tmptxt.pop(itmp).split()[-1])\n # get mixfac\n itmp = search_string('MIXFAC', tmptxt)\n if itmp >= 0:\n mixfac = float(tmptxt.pop(itmp).split()[-1])\n # get fcm\n itmp = search_string('FCM', tmptxt)\n if itmp >= 0:\n fcm = float(tmptxt.pop(itmp).split()[-1])\n # set mixinfo\n mixinfo = [imix, mixfac, qbound, fcm]\n # set converged and nmax_reached logicals\n converged, nmax_reached = False, False\n if nitermax==niter: nmax_reached = True\n if rms<qbound: converged = True\n # return values\n return niter, nitermax, converged, nmax_reached, mixinfo", "def parseMISlightly(infile, summarize=False):\n # Create an empty base class that we'll fill up as we read through\n flight = flightprofile()\n\n # Read the file into memory so we can quickly parse stuff\n f = open(infile, 'r')\n cont = f.readlines()\n f.close()\n\n flight.hash = computeHash(infile)\n\n # Search for the header lines which will tell us how many legs there are.\n # Use a regular expression to make the searching less awful\n # Note: regexp searches can be awful no matter what\n head1 = \"Leg \\d* \\(.*\\)\"\n lhed = findLegHeaders(cont, re.compile(head1))\n\n # Guarantee that the loop matches the number of legs found\n flight.nlegs = len(lhed)\n\n head2 = \"UTC\\s*MHdg\"\n ldat = findLegHeaders(cont, re.compile(head2))\n\n if len(lhed) != len(ldat):\n print(\"FATAL ERROR: Couldn't find the same amount of legs and data!\")\n print(\"Check the formatting of the file? Or the regular expressions\")\n print(\"need updating because they changed the file format?\")\n print(\"Looking for '%s' and '%s'\" % (head1, head2))\n return -1\n\n # Since we know where the first leg line is, we can define the preamble.\n # Takes the flight class as an argument and returns it all filled up.\n flight = parseMISPreamble(cont[0:lhed[0]], flight, summarize=summarize)\n\n return flight, lhed, ldat, cont", "def read_is(filename):\n with open(filename, 'rb') as f:\n print(f'Reading {filename}')\n print(f'Reading Header...')\n is_type = [struct.unpack('c', f.read(1))[0].decode('utf-8')\n for i in range(4)]\n is_type = ''.join(is_type)\n if is_type not in ['IS01', 'IS02', 'IS03']:\n print(f'{is_type} : Invalid IS type, please check that '\n 'input file is a Inverse Solution matrix')\n raise ValueError\n print(f'IS type: {is_type}')\n n_channels = struct.unpack('I', f.read(4))[0]\n print(f'n_channels: {n_channels}')\n numsolutionpoints = struct.unpack('I', f.read(4))[0]\n print(f'n_solutionpoints: {numsolutionpoints}')\n numregularizations = struct.unpack('I', f.read(4))[0]\n print(f'n_regularizations: {numregularizations}')\n isinversescalar = struct.unpack('c', f.read(1))[0]\n if isinversescalar == b'\\x01':\n n_dim = 1\n print(f'Inverse solution is Scalar')\n elif isinversescalar == b'\\x00':\n print(f'Inverse solution is Vectorial')\n n_dim = 3\n else:\n raise ValueError(f'isinversescalar must be either 1 for scalar, '\n f'either 0 for vectorial, but '\n f'{ord(isinversescalar)} found.')\n\n if is_type in ['IS01', 'IS02']:\n buf = f.read(n_dim * numsolutionpoints * n_channel * 4)\n data = np.frombuffer(buf, dtype=np.float32)\n data = data.reshape(numsolutionpoints, ndim, n_channel)\n data = no.array([data])\n data = np.swapaxes(data, 1, 2)\n\n elif is_type == 'IS03':\n print(f\"Reading Variable Header...\")\n\n ch_names = []\n for _ in range(n_channels):\n name = [char for char in f.read(32).split(b'\\x00')\n if char != b''][0]\n ch_names.append(name.decode('utf-8'))\n\n solutionpoints_names = []\n for _ in range(numsolutionpoints):\n name = [char for char in f.read(16).split(b'\\x00')\n if char != b''][0]\n solutionpoints_names.append(name.decode('utf-8'))\n\n regularizations_values = []\n for _ in range(numregularizations):\n value = struct.unpack('d', f.read(8))[0]\n regularizations_values.append(value)\n print(f'Regularizations values: {regularizations_values}')\n\n regularizations_names = []\n for _ in range(numregularizations):\n name = [char for char in f.read(32).split(b'\\x00')\n if char != b''][0]\n regularizations_names.append(name.decode('utf-8'))\n print(f'Regularizations names: {regularizations_names}')\n\n regularisation_solutions = []\n buf = f.read(numregularizations\n * n_dim\n * numsolutionpoints\n * n_channels\n * 4)\n data = np.frombuffer(buf, dtype=np.float32)\n data = data.reshape(numregularizations, numsolutionpoints,\n n_dim, n_channels)\n data = np.swapaxes(data, 1, 2)\n\n regularisation_solutions = np.array(regularisation_solutions)\n inverse_solution = {'is_type': is_type,\n 'is_scalar': True if isinversescalar == \"0\" else False,\n 'ch_names': ch_names,\n 'solutionpoints_names': solutionpoints_names,\n 'regularizations_values': regularizations_values,\n 'regularizations_names': regularizations_names,\n 'regularisation_solutions': data}\n return(inverse_solution)", "def readMFAPairs(mfaFile1, mfaFile2):\n def fn(file):\n return \"\".join([ i[:-1] for i in open(file, 'r').readlines()[1:] ])\n j = [0]\n def fn2(i):\n if i == '-':\n return GAP\n k = j[0]\n j[0] += 1\n return k\n mfa1 = fn(mfaFile1)\n mfa2 = fn(mfaFile2)\n mfa2 = [ fn2(i) for i in mfa2 ]\n assert len(mfa1) == len(mfa2)\n return [ mfa2[i] for i in xrange(0, len(mfa1)) if mfa1[i] != '-' ]", "def Read_RMCA_basic(Complete_Path):\n fid = open(Complete_Path,'r')\n S = []\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n S.append(float(line))\n #R.append(float(line[27:-2]))\n return np.array(S)", "def readFastaFile(filename):", "def get_zeros(m, n, filename, starting=0):\n if starting:\n m = m+1- starting\n n = n+1- starting\n zeros = []\n for i, line in enumerate(open(filename)):\n if i+1 < m:\n continue\n if i+1 > n:\n break\n y = mpf(line.strip())\n zeros.append(y)\n if i+1 == m:\n t1 = y\n t2 = y\n return zeros, t1, t2", "def read_fermi(self):\n E_f=None\n for line in open('OUTCAR', 'r'):\n if line.rfind('E-fermi') > -1:\n E_f=float(line.split()[2])\n return E_f", "def import_pred_shifts(self, input_file, filetype, offset=None):\n \n # If no offset value is defined, use the default one\n if offset==None:\n offset = self.pars[\"pred_offset\"]\n \n if filetype == \"shiftx2\":\n preds_long = pd.read_csv(input_file)\n if any(preds_long.columns == \"CHAIN\"):\n if len(preds_long[\"CHAIN\"].unique())>1:\n print(\"Chain identifier dropped - if multiple chains are \"+\n \"present in the predictions, they will be merged.\")\n preds_long = preds_long.drop(\"CHAIN\", axis=1) \n preds_long = preds_long.reindex(columns=[\"NUM\",\"RES\",\"ATOMNAME\",\n \"SHIFT\"]) \n preds_long.columns = [\"Res_N\",\"Res_type\",\"Atom_type\",\"Shift\"]\n elif filetype == \"sparta+\":\n # Work out where the column names and data are\n with open(input_file, 'r') as f:\n for num, line in enumerate(f, 1):\n if line.find(\"VARS\")>-1:\n colnames_line = num\n colnames = line.split()[1:]\n break\n \n preds_long = pd.read_table(input_file, sep=\"\\s+\", names=colnames,\n skiprows=colnames_line+1)\n preds_long = preds_long[[\"RESID\",\"RESNAME\",\"ATOMNAME\",\"SHIFT\"]]\n preds_long.columns = [\"Res_N\",\"Res_type\",\"Atom_type\",\"Shift\"]\n \n # Sparta+ uses HN for backbone amide proton - convert to H\n preds_long.loc[preds_long[\"Atom_type\"]==\"HN\", \"Atom_type\"] = \"H\"\n else:\n print(\"import_pred_shifts: invalid filetype '%s'.\" % (filetype))\n return(None)\n \n # Add sequence number offset and create residue names\n preds_long[\"Res_N\"] = preds_long[\"Res_N\"] + offset\n preds_long.insert(1, \"Res_name\", (preds_long[\"Res_N\"].astype(str) + \n preds_long[\"Res_type\"]))\n preds_long[\"Res_name\"] = [s.rjust(5) for s in preds_long[\"Res_name\"]]\n \n # Convert from long to wide format\n preds = preds_long.pivot(index=\"Res_N\", columns=\"Atom_type\", \n values=\"Shift\")\n \n # Add the other data back in\n tmp = preds_long[[\"Res_N\",\"Res_type\",\"Res_name\"]]\n tmp = tmp.drop_duplicates(subset=\"Res_name\")\n tmp.index = tmp[\"Res_N\"]\n preds = pd.concat([tmp, preds], axis=1)\n \n # Make columns for the i-1 predicted shifts of C, CA and CB\n preds_m1 = preds[list({\"C\",\"CA\",\"CB\",\"Res_type\"}.\n intersection(preds.columns))].copy()\n preds_m1.index = preds_m1.index+1\n preds_m1.columns = preds_m1.columns + \"m1\"\n preds = pd.merge(preds, preds_m1, how=\"left\", \n left_index=True, right_index=True)\n \n # Restrict to only certain atom types\n atom_set = {\"H\",\"N\",\"C\",\"CA\",\"CB\",\"Cm1\",\"CAm1\",\"CBm1\",\"HA\"}\n preds = preds[[\"Res_name\",\"Res_N\",\"Res_type\",\"Res_typem1\"]+\n list(atom_set.intersection(preds.columns))]\n \n preds.index = preds[\"Res_name\"]\n preds.index.name = None\n \n self.preds = preds\n return(self.preds)", "def open_file(file_path):\r\n\r\n global vector_X\r\n global training_data_matrix\r\n global row_number_of_data_matrix\r\n global single_row\r\n\r\n global training_g1_list\r\n global training_g2_list\r\n global training_g3_list\r\n\r\n global test_g1_list\r\n global test_g2_list\r\n global test_g3_list\r\n\r\n # open file\r\n with open(file_path, \"r\") as csvfile:\r\n\r\n line_number = 0\r\n index_of_training_matrix = 0\r\n\r\n # read all rows of csv file\r\n reader = csv.reader(csvfile)\r\n\r\n next(reader, None) # skip the headers\r\n\r\n for row in reader:\r\n\r\n row = row[0]\r\n\r\n # read line split by comma and convert into float numbers\r\n single_row = [float(x) for x in row.split(\";\")]\r\n\r\n # take the first 20% of the data as test data\r\n # and the remaining as the training data\r\n if line_number < row_number_of_test_data_matrix:\r\n\r\n test_data_matrix[line_number] = [1.0] + single_row[:-3]\r\n\r\n test_g1_list[line_number] = single_row[-3]\r\n test_g2_list[line_number] = single_row[-2]\r\n test_g3_list[line_number] = single_row[-1]\r\n\r\n else:\r\n training_data_matrix[index_of_training_matrix] = [1.0] + single_row[:-3]\r\n\r\n training_g1_list[index_of_training_matrix] = single_row[-3]\r\n training_g2_list[index_of_training_matrix] = single_row[-2]\r\n training_g3_list[index_of_training_matrix] = single_row[-1]\r\n\r\n index_of_training_matrix += 1\r\n\r\n if line_number == (row_number_of_data_matrix - 1):\r\n break\r\n\r\n line_number += 1\r\n\r\n return test_data_matrix, training_data_matrix, \\\r\n test_g1_list, test_g2_list, test_g3_list, \\\r\n training_g1_list, training_g2_list, training_g3_list", "def _load_mock_mws_file_fstar_standards(filename):\n C_LIGHT = 300000.0\n desitarget.io.check_fitsio_version()\n data = fitsio.read(filename,\n columns= ['objid','brickid',\n 'RA','DEC','v_helio','SDSSr_true',\n 'SDSSr_obs','SDSSg_obs','SDSSz_obs'])\n\n objid = data['objid'].astype('i8')\n brickid = data['brickid'].astype('i8')\n ra = data['RA'].astype('f8') % 360.0 #enforce 0 < ra < 360\n dec = data['DEC'].astype('f8')\n v_helio = data['v_helio'].astype('f8')\n SDSSr_true = data['SDSSr_true'].astype('f8')\n SDSSg_obs = data['SDSSg_obs'].astype('f8')\n SDSSr_obs = data['SDSSr_obs'].astype('f8')\n SDSSz_obs = data['SDSSz_obs'].astype('f8')\n\n return {'objid':objid,'brickid':brickid,\n 'RA':ra, 'DEC':dec, 'Z': v_helio/C_LIGHT, \n 'SDSSr_true': SDSSr_true,'SDSSr_obs': SDSSr_obs,\n 'SDSSg_obs':SDSSg_obs,'SDSSz_obs':SDSSz_obs}" ]
[ "0.61413145", "0.6100522", "0.56102365", "0.55467093", "0.55414826", "0.5517877", "0.55111635", "0.5491323", "0.5489486", "0.54673564", "0.5458751", "0.54487556", "0.5440117", "0.5364303", "0.53435946", "0.53398484", "0.5336551", "0.5330061", "0.5308056", "0.5289896", "0.526527", "0.5262551", "0.5247705", "0.52463704", "0.524317", "0.5235943", "0.52209574", "0.5219252", "0.52103275", "0.520664", "0.5199027", "0.5196656", "0.5196493", "0.5190151", "0.5189315", "0.5178451", "0.5171908", "0.5163981", "0.5154239", "0.5142631", "0.51419616", "0.5138091", "0.5137457", "0.5133851", "0.51272804", "0.51230544", "0.5107135", "0.5105522", "0.5100935", "0.50922203", "0.5091269", "0.50857115", "0.50814164", "0.5081254", "0.50737315", "0.50684243", "0.5066131", "0.50570065", "0.5056073", "0.5054884", "0.5054192", "0.5051599", "0.5048631", "0.50434023", "0.50417334", "0.5032256", "0.5028691", "0.50230455", "0.5021889", "0.5021303", "0.5017214", "0.501288", "0.5012594", "0.5012092", "0.50113016", "0.50102365", "0.5008861", "0.5007138", "0.5002181", "0.49984762", "0.49982765", "0.49979302", "0.498118", "0.49690557", "0.4963904", "0.49573362", "0.49565467", "0.4956019", "0.49456373", "0.49456036", "0.4943459", "0.49417865", "0.49390626", "0.49350268", "0.49308226", "0.4928995", "0.492722", "0.49267986", "0.49254632", "0.4920815" ]
0.6439688
0
Crop images into the four corners, center, and their mirrored versions.
def _oversample(images, crop_dims): # Dimensions and center. im_shape = np.array(images[0].shape) crop_dims = np.array(crop_dims) im_center = im_shape[:2] / 2.0 # Make crop coordinates h_indices = (0, im_shape[0] - crop_dims[0]) w_indices = (0, im_shape[1] - crop_dims[1]) crops_ix = np.empty((5, 4), dtype=int) curr = 0 for i in h_indices: for j in w_indices: crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1]) curr += 1 crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([ -crop_dims / 2.0, crop_dims / 2.0 ]) crops_ix = np.tile(crops_ix, (2, 1)) # Extract crops crops = np.empty((NUM_OVER_SAMPLES * len(images), crop_dims[0], crop_dims[1], im_shape[-1]), dtype=np.float32) ix = 0 for im in images: for crop in crops_ix: crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :] ix += 1 crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors return crops
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crop_center_img(self):\n # TODO Task 1.1\n img = self.data\n img_with_missing_crop = np.copy(img)\n dim =128\n crop = dim // 2\n start = crop - (crop // 2)\n #ground truth overlaps img_with_missing_crop by 7 pixels in all directions\n img_with_missing_crop[:,start+7:start + crop-7, start+7:start + crop-7,:] = 0\n #255\n #inpu = Image.fromarray((img_with_missing_crop[1,:,:,:]*255).astype('uint8'))\n #inpu.save(\"cropped.png\")\n groundtruth_crop = img[:,start:start + crop, start:start + crop,:]\n self.data = (img_with_missing_crop, groundtruth_crop)", "def central_area_crop(imgs_array, crop_size=(144, 192, 160)):\n orig_shape = np.array(imgs_array.shape)\n crop_shape = np.array(crop_size)\n center = orig_shape // 2\n lower_limits = center - crop_shape // 2 # (13, 24, 40) (5, 24, 40)\n upper_limits = center + crop_shape // 2 # (141, 216, 200) (149, 216, 200)\n # upper_limits = lower_limits + crop_shape\n imgs_array = imgs_array[lower_limits[0]: upper_limits[0],\n lower_limits[1]: upper_limits[1], lower_limits[2]: upper_limits[2]]\n return imgs_array", "def crop_images_to_galaxy(self):\r\n\r\n left, right, top, bottom = 0, 0, 0, 0\r\n \r\n for _, img in self.images():\r\n seg_img = load_gals.get_seg_img(img)\r\n gal_val = seg_img[int(seg_img.shape[0] / 2), int(seg_img.shape[1] / 2)]\r\n inds = np.argwhere(seg_img == gal_val)\r\n x_inds, y_inds = inds[:,1], inds[:,0]\r\n left += np.min(x_inds); right += np.max(x_inds)\r\n top += np.min(y_inds); bottom += np.max(y_inds)\r\n \r\n left /= float(self.num_wb); right /= float(self.num_wb)\r\n top /= float(self.num_wb); bottom /= float(self.num_wb)\r\n\r\n center_x, center_y = self.width / 2, self.height / 2\r\n size = int(max(center_x - left, right - center_x, center_y - top, bottom - center_y)) + 2\r\n left, right = int(center_x - size), int(center_x + size)\r\n top, bottom = int(center_y - size), int(center_y + size)\r\n\r\n # make sure the values found are valid\r\n try:\r\n assert top >= 0; assert left >= 0; \r\n assert bottom <= self.height; assert right <= self.width\r\n except AssertionError:\r\n size -= 2\r\n left, right = int(center_x - size), int(center_x + size)\r\n top, bottom = int(center_y - size), int(center_y + size)\r\n if top < 0 or left < 0 or bottom > self.height or right > self.width:\r\n raise CroppingError\r\n\r\n # crop the images\r\n for c in self.gal_dict.keys():\r\n self.gal_dict[c][0].data = self.gal_dict[c][0].data[top:bottom, left:right]\r\n \r\n return left, right, top, bottom", "def _crop_image_and_paste(self, image, center, size):\n center_y, center_x = center\n target_h, target_w = size\n img_h, img_w, img_c = image.shape\n\n x0 = max(0, center_x - target_w // 2)\n x1 = min(center_x + target_w // 2, img_w)\n y0 = max(0, center_y - target_h // 2)\n y1 = min(center_y + target_h // 2, img_h)\n patch = np.array((int(x0), int(y0), int(x1), int(y1)))\n\n left, right = center_x - x0, x1 - center_x\n top, bottom = center_y - y0, y1 - center_y\n\n cropped_center_y, cropped_center_x = target_h // 2, target_w // 2\n cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)\n for i in range(img_c):\n cropped_img[:, :, i] += self.mean[i]\n y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)\n x_slice = slice(cropped_center_x - left, cropped_center_x + right)\n cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]\n\n border = np.array([\n cropped_center_y - top, cropped_center_y + bottom,\n cropped_center_x - left, cropped_center_x + right\n ],\n dtype=np.float32)\n\n return cropped_img, border, patch", "def center_crop(img:np.array,output_size:List[int])->np.array:\n\n if isinstance(output_size,numbers.Number):\n output_size = (int(output_size),int(output_size))\n elif isinstance(output_size,(tuple,list)) and len(output_size)==1:\n output_size =(output_size[0],output_size[0])\n \n image_height,image_width,_=img.shape\n crop_height,crop_width=output_size\n if crop_width > image_width or crop_height > image_height:\n padding_ltrb = [\n (crop_width - image_width) // 2 if crop_width > image_width else 0,\n (crop_height - image_height) // 2 if crop_height > image_height else 0,\n (crop_width - image_width + 1) // 2 if crop_width > image_width else 0,\n (crop_height - image_height + 1) // 2 if crop_height > image_height else 0,\n ]\n img=cv.copyMakeBorder(img,padding_ltrb[1],padding_ltrb[3],padding_ltrb[0],padding_ltrb[2],cv.BORDER_CONSTANT,value=(0,0,0))\n image_height,image_width,_=img.shape\n if crop_width == image_width and crop_height == image_height:\n return img\n\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img[crop_top:crop_top+crop_height,crop_left:crop_left+crop_width]", "def crop_images_same_dir(data_set_dir):\n data = glob(os.path.join(data_set_dir, \"*.png\"))\n\n # data_length = len(data)\n # high < 256 because want to cut the bottom\n # offs_h = np.random.randint(low=0, high=200, size=data_length)\n # offs_h_end = offs_h + 256\n # offs_w = np.random.randint(low=0, high=512, size=data_length)\n # offs_w_end = offs_w + 512\n # print offs_h, offs_h_end\n\n for index, filePath in enumerate(data):\n print ('%d/%d' % (index, len(data)))\n\n img = scipy.misc.imread(filePath).astype(np.float)\n #img = scipy.misc.imresize(img, 0.25, interp='bilinear', mode=None)\n #scipy.misc.imsave('/home/andy/dataset/CITYSCAPES/CITYSCAPES_crop_random/' + filePath.split('/')[-1],\n # img[offs_h[index]:offs_h_end[index], offs_w[index]:offs_w_end[index] :])\n scipy.misc.imsave('/home/andy/dataset/CITYSCAPES/CITYSCAPES_crop_bottom_192/' + filePath.split('/')[-1],\n img[0:192, :, :])\n #break", "def _central_crop(image_list, crop_height, crop_width):\n outputs = []\n for image in image_list:\n image_height = tf.shape(image)[0]\n image_width = tf.shape(image)[1]\n\n offset_height = (image_height - crop_height) / 2\n offset_width = (image_width - crop_width) / 2\n\n outputs.append(_crop(image, offset_height, offset_width,\n crop_height, crop_width))\n return outputs", "def crop_all(images, center, width, height):\n cropped_images = {}\n for band, img in images.items():\n cropped_images[band] = crop(img, center, width, height)\n return cropped_images", "def get_crops(x_train, y_train, offset=4):\n\ttopleft = iaa.Sequential([\n\t\tiaa.Crop(px=(4 - offset, offset, offset, 4 - offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\ttopright = iaa.Sequential([\n\t\tiaa.Crop(px=(4 - offset, 4 - offset, offset, offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tbotleft = iaa.Sequential([\n\t\tiaa.Crop(px=(offset, offset, 4 - offset, 4 - offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tbotright = iaa.Sequential([\n\t\tiaa.Crop(px=(offset, 4 - offset, 4 - offset, offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tcenter = iaa.Sequential([\n\t\tiaa.Crop(px=(2, 2, 2, 2)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\taugs = [topleft, topright, botleft, botright, center]\n\n\taug_imgs = []\n\tfor aug in tqdm(augs):\n\t\taug_imgs.append(aug.augment_images(x_train * 255))\n\n\taug_x_train = [item for sublist in aug_imgs for item in sublist]\n\taug_y_train = y_train * 5\n\n\treturn aug_x_train, aug_y_train", "def crop(img, centers, mode):\n h, w, c = img.shape\n corners = compute_corners(centers, mode)\n top_left, top_right, bottom_left, bottom_right = corners\n\n # New shape for rotated crop\n f_px_norm = lambda x: np.round(np.linalg.norm(x, axis=1)).astype(np.int32)\n new_h = f_px_norm(bottom_left - top_left)\n new_w = f_px_norm(top_right - top_left)\n\n crops = []\n for i in range(len(centers)):\n tl = top_left[i]\n tr = top_right[i]\n bl = bottom_left[i]\n br = bottom_right[i]\n\n # Get a rotated crop centered on each face\n corners = np.stack((tl, tr, bl)).astype(np.float32)\n new_corners = np.asarray([[0., 0.], [new_w[i], 0.], [0., new_h[i]]],\n dtype=np.float32)\n M = cv2.getAffineTransform(corners, new_corners)\n crop = cv2.warpAffine(img, M, (new_w[i], new_h[i]), flags=cv2.INTER_LANCZOS4)\n crops.append(crop)\n\n return crops[0]", "def central_image_crop(img, crop_width, crop_heigth):\n half_the_width = img.shape[1] / 2\n img = img[(img.shape[0] - crop_heigth): img.shape[0],\n (half_the_width - (crop_width / 2)): (half_the_width + (crop_width / 2))]\n img = img.reshape(img.shape[0], img.shape[1], 1)\n return img", "def five_crop(self, img):\n w, h = img.size\n crop_h, crop_w = self.size\n if crop_w > w or crop_h > h:\n raise ValueError(\"Requested crop size {} is bigger than input size {}\".format(self.size,\n (h, w)))\n tl = img.crop((0, 0, crop_w, crop_h))\n tr = img.crop((w - crop_w, 0, w, crop_h))\n bl = img.crop((0, h - crop_h, crop_w, h))\n br = img.crop((w - crop_w, h - crop_h, w, h))\n center = self.center_crop(img)\n return (tl, tr, bl, br, center)", "def image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode=\"cv2.BORDER_CONSTANT\"):\r\n #Convert position of cell from \"um\" to \"pixel index\"\r\n pos_x,pos_y = pos_x/pix,pos_y/pix \r\n\r\n for i in range(len(images)):\r\n image = images[i]\r\n \r\n #Compute the edge-coordinates that define the cropped image\r\n y1 = np.around(pos_y[i]-final_h/2.0) \r\n x1 = np.around(pos_x[i]-final_w/2.0) \r\n y2 = y1+final_h \r\n x2 = x1+final_w\r\n\r\n #Are these coordinates within the oringinal image?\r\n #If not, the image needs padding\r\n pad_top,pad_bottom,pad_left,pad_right = 0,0,0,0\r\n\r\n if y1<0:#Padding is required on top of image\r\n pad_top = int(abs(y1))\r\n y1 = 0 #set y1 to zero and pad pixels after cropping\r\n \r\n if y2>image.shape[0]:#Padding is required on bottom of image\r\n pad_bottom = int(y2-image.shape[0])\r\n y2 = image.shape[0]\r\n \r\n if x1<0:#Padding is required on left of image\r\n pad_left = int(abs(x1))\r\n x1 = 0\r\n \r\n if x2>image.shape[1]:#Padding is required on right of image\r\n pad_right = int(x2-image.shape[1])\r\n x2 = image.shape[1]\r\n \r\n #Crop the image\r\n temp = image[int(y1):int(y2),int(x1):int(x2)]\r\n\r\n if pad_top+pad_bottom+pad_left+pad_right>0:\r\n if padding_mode==\"Delete\":\r\n temp = np.zeros_like(temp)\r\n else:\r\n #Perform all padding operations in one go\r\n temp = cv2.copyMakeBorder(temp, pad_top, pad_bottom, pad_left, pad_right, eval(padding_mode))\r\n \r\n images[i] = temp\r\n \r\n return images", "def _crop_frames(self, frames, center_crop=True):\n cropped_frames = []\n crop_location = 0.5 if center_crop else np.random.random_sample()\n for frame in frames:\n cropped_frame = self._crop_frame(frame, crop_location)\n cropped_frames.append(cropped_frame)\n\n return np.array(cropped_frames)", "def center_crop2fixed_cut(im, masks, mask, boxes, classes, target_width, target_height, min_size=2):\n\n h, w, c = im.shape\n if float(target_width) / w > float(target_height) / h:\n new_w, new_h = int(target_width), int(float(target_width) / w * h)\n else:\n new_w, new_h = int(float(target_height) / h * w), int(target_height)\n\n scale = float(new_w) / w\n offset_w, offset_h = 0, 0\n if new_w - target_width + 1 > 0 and new_h - target_height + 1 > 0:\n offset_w = np.random.randint(0, new_w - target_width + 1)\n offset_h = np.random.randint(0, new_h - target_height + 1)\n # offset_w = int((new_w - target_width) / 2)\n # offset_h = int((new_h - target_height) / 2)\n\n im = cv2.resize(im, (new_w, new_h))\n mask = cv2.resize(mask, (new_w, new_h), interpolation=cv2.INTER_NEAREST)\n im = im[offset_h: (offset_h + target_height), offset_w: (offset_w + target_width)]\n mask = mask[offset_h: (offset_h + target_height), offset_w: (offset_w + target_width)]\n\n flip = np.random.uniform() > 0.5\n if flip:\n im = cv2.flip(im, 1)\n mask = cv2.flip(mask, 1)\n\n if masks.size > 0:\n masks = np.transpose(masks, (1, 2, 0)) # to (h, w, n)\n masks = cv2.resize(masks, (new_w, new_h), interpolation=cv2.INTER_NEAREST)\n masks = masks[offset_h: (offset_h + target_height), offset_w: (offset_w + target_width)]\n if flip:\n masks = cv2.flip(masks, 1)\n try:\n if masks.ndim > 2:\n masks = np.transpose(masks, (2, 0, 1)) # to (n, h, w)\n else:\n masks = masks.reshape((1, target_height, target_width))\n except ValueError:\n print (masks.ndim, masks.shape)\n raise\n else:\n masks = np.zeros((0, target_height, target_width), masks.dtype)\n\n # bboxes\n boxes = _offset_boxes(boxes, [target_height, target_width], scale, [offset_w, offset_h], flip)\n # boxes *= scale\n # boxes = clip_boxes(boxes, [target_height, target_width])\n # if flip:\n # boxes_x = np.copy(boxes[:, 0])\n # boxes[:, 0] = target_width - boxes[:, 2]\n # boxes[:, 2] = target_width - boxes_x\n\n boxes, classes, masks = _filter_invalid_boxes(boxes, classes, masks, min_size=min_size)\n\n return im, masks, mask, boxes, classes", "def crop(masks, boxes, padding: int = 1):\n h, w, n = masks.shape\n x1, x2 = sanitize_coordinates(boxes[:, 0:1:1], boxes[:, 2:3:1], w, padding, cast=False)\n y1, y2 = sanitize_coordinates(boxes[:, 1:2:1], boxes[:, 3:4:1], h, padding, cast=False)\n\n cast = P.Cast()\n broadcast_to = P.BroadcastTo((h, w, n))\n row = broadcast_to((P.range(Tensor(0, mindspore.int32),\n Tensor(w, mindspore.int32),\n Tensor(1, mindspore.int32)).view(1, -1, 1)))\n rows = cast(row, x1.dtype)\n col = broadcast_to((P.range(Tensor(0, mindspore.int32),\n Tensor(w, mindspore.int32),\n Tensor(1, mindspore.int32)).view(-1, 1, 1)))\n cols = cast(col, x2.dtype)\n\n\n masks_left = rows >= x1.view(1, 1, -1)\n masks_right = rows < x2.view(1, 1, -1)\n masks_left = P.Cast()(masks_left, mindspore.float16)\n masks_right = P.Cast()(masks_right, mindspore.float16)\n crop_mask = masks_left * masks_right\n masks_up = cols >= y1.view(1, 1, -1)\n masks_up = P.Cast()(masks_up, mindspore.float16)\n crop_mask *= masks_up\n masks_down = cols < y2.view(1, 1, -1)\n masks_down = P.Cast()(masks_down, mindspore.float16)\n crop_mask *= masks_down\n\n return masks * crop_mask", "def crop_center(image: Matrix, crop_x: int, crop_y: int) -> Matrix:\n y, x = image.shape\n start_x = x // 2 - (crop_x // 2)\n start_y = y // 2 - (crop_y // 2)\n return image[start_y:start_y + crop_y, start_x:start_x + crop_x].copy()", "def central_crop(image, x_crop=27, y_crop=45, z_crop=27):\n image = image[x_crop:-x_crop, y_crop:-y_crop, z_crop:-z_crop, :]\n\n return image", "def crop_images(dataset_dir):\n data = []\n for folder in os.listdir(dataset_dir):\n path = os.path.join(dataset_dir, folder, \"*.png\")\n data.extend(glob(path))\n\n for index, filePath in enumerate(data):\n print ('{}/{}'.format(index, len(data)))\n\n img = scipy.misc.imread(filePath).astype(np.uint8)\n img = scipy.misc.imresize(img, 0.25, interp='bilinear', mode=None)\n scipy.misc.imsave('/data/vllab1/dataset/CITYSCAPES/CITY_test/fine_image/' + filePath.split('/')[-1], img)", "def bulk_crop_images(input_path, output_path, dims, extension):\n for dir_path, dir_names, filenames in os.walk(input_path):\n structure = os.path.join(output_path, dir_path[len(input_path) + 1:])\n for file in filenames:\n if file.endswith(extension):\n src = os.path.join(dir_path, file)\n width, height = Image.open(src).size\n if width > dims[0] or height > dims[1]:\n img = cv2.imread(src, 0)\n img = crop_around_center(img, dims[0], dims[1])\n dest = os.path.join(structure, file)\n cv2.imwrite(dest, img)", "def apply_center(unctr_img_dir):\n uncentered_image_names = glob(unctr_img_dir)\n DIR = Path(\".\")\n for img_name in uncentered_image_names:\n\n # normalize images\n img = ~cv2.imread(img_name, cv2.COLOR_BGR2GRAY)\n IMG = os.path.basename(img_name)\n\n h, w = img.shape\n # h = w = max(w,h) \n h = int(h//2 )\n w = int(w//2 )\n\n dst = center_image(img)\n hmargin = 30\n vmargin = 10\n dst = dst[vmargin:-vmargin, hmargin:-hmargin]\n outputPath = str((DIR/\"png_normalized\"/IMG))\n cv2.imwrite(outputPath, ~dst)", "def _images_and_boxes_preprocessing(self, imgs, boxes):\r\n # Image [0, 255] -> [0, 1].\r\n imgs = imgs.float()\r\n imgs = imgs / 255.0\r\n\r\n height, width = imgs.shape[2], imgs.shape[3]\r\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\r\n # range of [0, 1].\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n boxes = transform.clip_boxes_to_image(boxes, height, width)\r\n\r\n if self._split == \"train\":\r\n # Train split\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs,\r\n min_size=self._jitter_min_scale,\r\n max_size=self._jitter_max_scale,\r\n boxes=boxes,\r\n )\r\n imgs, boxes = transform.random_crop(imgs, self._crop_size, boxes=boxes)\r\n\r\n # Random flip.\r\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\r\n elif self._split == \"val\":\r\n # Val split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n # Apply center crop for val split\r\n imgs, boxes = transform.uniform_crop(\r\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n elif self._split == \"test\":\r\n # Test split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n else:\r\n raise NotImplementedError(\"{} split not supported yet!\".format(self._split))\r\n\r\n # Do color augmentation (after divided by 255.0).\r\n if self._split == \"train\" and self._use_color_augmentation:\r\n if not self._pca_jitter_only:\r\n imgs = transform.color_jitter(\r\n imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4\r\n )\r\n\r\n imgs = transform.lighting_jitter(\r\n imgs,\r\n alphastd=0.1,\r\n eigval=np.array(self._pca_eigval).astype(np.float32),\r\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\r\n )\r\n\r\n # Normalize images by mean and std.\r\n imgs = transform.color_normalization(\r\n imgs,\r\n np.array(self._data_mean, dtype=np.float32),\r\n np.array(self._data_std, dtype=np.float32),\r\n )\r\n\r\n if self._use_bgr:\r\n # Convert image format from RGB to BGR.\r\n # Note that Kinetics pre-training uses RGB!\r\n imgs = imgs[:, [2, 1, 0], ...]\r\n\r\n boxes = transform.clip_boxes_to_image(boxes, self._crop_size, self._crop_size)\r\n\r\n return imgs, boxes", "def concat_3dimages_corners(imga, imgb, xoffset=0, yoffset=0, zoffset=0,\n transpose=True, ontop=True, center_offset=True,\n adjust_z=(0, 1)):\n print(\"Concating images with reference point being the lower left corner\")\n if transpose:\n print(\"Transpose images\")\n imga = np.transpose(imga, axes=(0, 2, 1))\n imgb = np.transpose(imgb, axes=(0, 2, 1))\n\n offset = (abs(zoffset), abs(yoffset), abs(xoffset))\n max_dim = np.maximum.reduce([imga.shape, np.add(imgb.shape, offset)])\n\n # center_a = np.array(np.divide(imga.shape, 2), dtype=int)\n # center_b = np.array(np.divide(imgb.shape, 2), dtype=int)\n\n # if (max_dim == imgb.shape).all():\n # tmp = np.copy(imgb)\n # imgb = np.copy(imga)\n # imga = np.copy(tmp)\n # ontop = toggle(ontop)\n # xoffset *= -1\n # yoffset *= -1\n # zoffset *= -1\n\n # tmp_offset = np.array(offset)\n # tmp_offset[tmp_offset > 0] = 0\n # new_img = np.full(np.add(max_dim, np.abs(offset)), np.nan)\n new_img = np.full(max_dim, np.nan)\n\n Sa0 = slice(0, imga.shape[0])\n Sa1 = slice(0, imga.shape[1])\n Sa2 = slice(0, imga.shape[2])\n Sb0 = slice(abs(zoffset), abs(zoffset) + imgb.shape[0])\n Sb1 = slice(abs(yoffset), abs(yoffset) + imgb.shape[1])\n Sb2 = slice(abs(xoffset), abs(xoffset) + imgb.shape[2])\n\n xdir = np.sign(xoffset)\n ydir = np.sign(yoffset)\n zdir = np.sign(zoffset)\n\n if ydir == 0:\n ydir = 1\n if xdir == 0:\n xdir = 1\n if zdir == 0:\n zdir = 1\n\n imga = imga[::zdir, ::ydir, ::xdir]\n imgb = imgb[::zdir, ::ydir, ::xdir]\n\n if adjust_z:\n for ix in adjust_z:\n top_img = 1 * new_img[ix]\n top_img[Sa1, Sa2] = imga[ix]\n top_img[Sb1, Sb2] = imgb[ix]\n low_img = 1 * new_img[ix]\n low_img[Sb1, Sb2] = imgb[ix]\n low_img[Sa1, Sa2] = imga[ix]\n\n diff = top_img - low_img\n m = np.nanmean(diff)\n s = np.nanstd(diff)\n mask = np.abs(diff) < m + s\n diff[mask] = np.nan\n add = np.nanmean(diff)\n\n print(add)\n\n imgb[ix] -= add\n\n print(\"new_img shape: \", new_img.shape)\n\n if ontop:\n new_img[Sa0, Sa1, Sa2] = imga\n new_img[Sb0, Sb1, Sb2] = imgb\n else:\n new_img[Sb0, Sb1, Sb2] = imgb\n new_img[Sa0, Sa1, Sa2] = imga\n\n new_img\n\n if transpose:\n print(\"Transpose back\")\n return np.transpose(new_img[::zdir, ::ydir, ::xdir], axes=(0, 2, 1))\n else:\n return new_img[::zdir, ::ydir, ::xdir]", "def crop_around_center(image: np.ndarray, size: Union[tuple, list, np.ndarray]) -> np.ndarray:\n\n if type(size) is int and isinstance(size, str) and len(size) < 2:\n raise ValueError(\"size has to be a list or tuple or array with at least two int elements\")\n\n # For tensor processing\n image = _convert_tensor_to_numpy_if_possible(image)\n\n height = size[0]\n width = size[1]\n\n image_size = (image.shape[1], image.shape[0])\n image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))\n\n if width > image_size[0]:\n width = image_size[0]\n\n if height > image_size[1]:\n height = image_size[1]\n\n x1 = int(image_center[0] - width * 0.5)\n x2 = int(image_center[0] + width * 0.5)\n y1 = int(image_center[1] - height * 0.5)\n y2 = int(image_center[1] + height * 0.5)\n\n return image[y1:y2, x1:x2]", "def _crop_write_image(self, inroot, images, outroot):\n for image in images:\n inimage_path = osp.join(inroot, image)\n cvimg = cv2.imread(inimage_path)\n cvimg = cvimg[60:-30, 25:-25]\n h, w, _ = cvimg.shape\n assert h == w == 128\n outimage_path = osp.join(outroot, image)\n cv2.imwrite(outimage_path, cvimg)\n print(outimage_path)", "def _generate_crop_images(\n crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None\n):\n cropped_images = []\n total_points_per_crop = []\n for i, crop_box in enumerate(crop_boxes):\n left, top, right, bottom = crop_box\n\n channel_dim = infer_channel_dimension_format(image, input_data_format)\n if channel_dim == ChannelDimension.LAST:\n cropped_im = image[top:bottom, left:right, :]\n else:\n cropped_im = image[:, top:bottom, left:right]\n\n cropped_images.append(cropped_im)\n\n cropped_im_size = get_image_size(cropped_im, channel_dim)\n points_scale = np.array(cropped_im_size)[None, ::-1]\n\n points = points_grid[layer_idxs[i]] * points_scale\n normalized_points = _normalize_coordinates(target_size, points, original_size)\n total_points_per_crop.append(normalized_points)\n\n return cropped_images, total_points_per_crop", "def split_image(input_image_path):\n image = cv2.imread(input_image_path)\n cv2.imshow(\"Original Image\", image)\n cv2.waitKey(0)\n\n height, width, channels = image.shape[:3]\n print(image.shape)\n\n witdthdividedby3 = width / 3\n\n # Let's get the starting pixel coordiantes (top left of cropped top)\n start_row, start_col = int(0), int(0)\n # Let's get the ending pixel coordinates (bottom right of cropped top)\n end_row, end_col = int(height), int(witdthdividedby3)\n cropped_right = image[start_row:end_row, start_col:end_col]\n cv2.imshow(\"Cropped right\", cropped_right)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n end_row, end_col = int(height), int(witdthdividedby3 * 2)\n start_row, start_col = int(0), int(witdthdividedby3)\n cropped_mid = image[start_row:end_row, start_col:end_col]\n cv2.imshow(\"Cropped mid\", cropped_mid)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n end_row, end_col = int(height), int(witdthdividedby3 * 3)\n start_row, start_col = int(0), int(witdthdividedby3 * 2)\n cropped_left = image[start_row:end_row, start_col:end_col]\n cv2.imshow(\"Cropped left\", cropped_left)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n return cropped_right, cropped_mid, cropped_left", "def _images_and_boxes_preprocessing(self, imgs, boxes, gt_boxes=None):\n # Image [0, 255] -> [0, 1].\n imgs = imgs.float()\n imgs = imgs / 255.0\n\n height, width = imgs.shape[2], imgs.shape[3]\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\n # range of [0, 1].\n # boxes[:, [0, 2]] *= width\n # boxes[:, [1, 3]] *= height\n boxes = transform.clip_boxes_to_image(boxes, height, width)\n\n if self._split == \"train\":\n # Train split\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._jitter_min_scale,\n max_size=self._jitter_max_scale,\n boxes=boxes,\n )\n imgs, boxes = transform.random_crop(\n imgs, self._crop_size, boxes=boxes\n )\n\n # Random flip.\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\n elif self._split == \"val\":\n # Val split\n # Resize short side to crop_size. Non-local and STRG uses 256.\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._crop_size,\n max_size=self._crop_size,\n boxes=boxes,\n )\n\n # Apply center crop for val split\n imgs, boxes = transform.uniform_crop(\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\n )\n\n if self._test_force_flip:\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\n elif self._split == \"test\":\n # Test split\n # Resize short side to crop_size. Non-local and STRG uses 256.\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._crop_size,\n max_size=self._crop_size,\n boxes=boxes,\n )\n\n if self._test_force_flip:\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\n else:\n raise NotImplementedError(\n \"{} split not supported yet!\".format(self._split)\n )\n\n # Do color augmentation (after divided by 255.0).\n if self._split == \"train\" and self._use_color_augmentation:\n if not self._pca_jitter_only:\n imgs = transform.color_jitter(\n imgs,\n img_brightness=0.4,\n img_contrast=0.4,\n img_saturation=0.4,\n )\n\n imgs = transform.lighting_jitter(\n imgs,\n alphastd=0.1,\n eigval=np.array(self._pca_eigval).astype(np.float32),\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\n )\n\n # Normalize images by mean and std.\n imgs = transform.color_normalization(\n imgs,\n np.array(self._data_mean, dtype=np.float32),\n np.array(self._data_std, dtype=np.float32),\n )\n\n if not self._use_bgr:\n # Convert image format from BGR to RGB.\n # Note that Kinetics pre-training uses RGB!\n imgs = imgs[:, [2, 1, 0], ...]\n\n boxes = transform.clip_boxes_to_image(\n boxes, self._crop_size, self._crop_size\n )\n\n return imgs, boxes", "def crop(self,channel,center_coord,crop_size,z_coord=None,z_size=1): \n x1=center_coord[0]-int(crop_size/2)\n x2=x1+crop_size\n y1=center_coord[1]-int(crop_size/2)\n y2=y1+crop_size\n img_crop=MicImage()\n img_crop._metaData={**self._metaData}\n img_crop.xml=self.xml\n\n\n if z_coord is not None and z_size>1:\n z1=z_coord-int(z_size/2)\n if z1<0:\n z1=0\n z2=z1+z_size\n if (z_coord is not None and z_size==1):\n z1=z_coord\n z2=z1+1\n if z_coord is None:\n z1=0\n z2=-1\n\n img_crop.pixels= self.pixels[z1:z2,x1:x2,y1:y2,channel]\n \n if img_crop.pixels.shape[0]==1:\n img_crop.pixels=np.squeeze(img_crop.pixels)\n img_crop.sumprj=np.squeeze(img_crop.pixels)\n img_crop.maxprj=np.squeeze(img_crop.pixels)\n else:\n img_crop.prj(\"max\")\n img_crop.prj(\"sum\")\n img_crop._metaData.update({\"size_x\": crop_size})\n img_crop._metaData.update({\"size_x\": crop_size})\n\n return img_crop", "def image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode=\"cv2.BORDER_CONSTANT\"):\r\n #Convert position of cell from \"um\" to \"pixel index\"\r\n pos_x = [pos_x_/pix for pos_x_ in pos_x]\r\n pos_y = [pos_y_/pix for pos_y_ in pos_y]\r\n padding_modes = [\"cv2.BORDER_CONSTANT\",\"cv2.BORDER_REFLECT\",\"cv2.BORDER_REFLECT_101\",\"cv2.BORDER_REPLICATE\",\"cv2.BORDER_WRAP\"]\r\n \r\n for i in range(len(images)):\r\n image = images[i]\r\n \r\n #Compute the edge-coordinates that define the cropped image\r\n y1 = np.around(pos_y[i]-final_h/2.0) \r\n x1 = np.around(pos_x[i]-final_w/2.0) \r\n y2 = y1+final_h \r\n x2 = x1+final_w\r\n\r\n #Are these coordinates within the oringinal image?\r\n #If not, the image needs padding\r\n pad_top,pad_bottom,pad_left,pad_right = 0,0,0,0\r\n\r\n if y1<0:#Padding is required on top of image\r\n pad_top = int(abs(y1))\r\n y1 = 0 #set y1 to zero and pad pixels after cropping\r\n \r\n if y2>image.shape[0]:#Padding is required on bottom of image\r\n pad_bottom = int(y2-image.shape[0])\r\n y2 = image.shape[0]\r\n \r\n if x1<0:#Padding is required on left of image\r\n pad_left = int(abs(x1))\r\n x1 = 0\r\n \r\n if x2>image.shape[1]:#Padding is required on right of image\r\n pad_right = int(x2-image.shape[1])\r\n x2 = image.shape[1]\r\n \r\n #Crop the image\r\n temp = image[int(y1):int(y2),int(x1):int(x2)]\r\n\r\n if pad_top+pad_bottom+pad_left+pad_right>0:\r\n if padding_mode.lower()==\"delete\":\r\n temp = np.zeros_like(temp)\r\n else:\r\n #Perform all padding operations in one go\r\n if padding_mode.lower()==\"alternate\":\r\n ind = rand_state.randint(low=0,high=len(padding_modes))\r\n padding_mode = padding_modes[ind]\r\n temp = cv2.copyMakeBorder(temp, pad_top, pad_bottom, pad_left, pad_right, eval(padding_modes[ind]))\r\n else:\r\n temp = cv2.copyMakeBorder(temp, pad_top, pad_bottom, pad_left, pad_right, eval(padding_mode))\r\n \r\n images[i] = temp\r\n \r\n return images", "def doCrop(image, x, y, w, h):\n\tcrop_height = int((config.FACE_HEIGHT / float(config.FACE_WIDTH)) * w)\n\tmidy = y + h/2\n\ty1 = max(0, midy-crop_height/2)\n\ty2 = min(image.shape[0]-1, midy+crop_height/2)\n\treturn image[y1:y2, x:x+w]", "def clip(self):\n \n subprocess.call(['gdaltindex', self.extent, self.referenceImagePath])\n dataNames = sorted(glob.glob(self.fullPath + '/full*.tif'))\n splitAt = len(self.fullPath) + 1\n\n for i in range(len(dataNames)):\n x = dataNames[i]\n y = dataNames[i][:splitAt] + dataNames[i][splitAt+4:]\n subprocess.call(['gdalwarp', '-r', 'near', '-cutline', self.extent, '-crop_to_cutline', x, y, '-dstnodata', '9999'])\n \n for n in dataNames:\n os.remove(n)\n dataNames = sorted(glob.glob(self.fullPath + '/*.tif'))\n test = gdal.Open(dataNames[0]).ReadAsArray()\n logger.log('SUCCESS', 'Clipping complete! %d %s files were successfully clipped to the size of %s with dimensions %d rows by %d columns' % (len(dataNames), str(self.outformat), str(self.referenceImagePath), test.shape[0], test.shape[1]))", "def center_crop2fixed_pad(im, masks, mask, boxes, classes, target_width, target_height, min_size=2):\n\n h, w, c = im.shape\n ir, tr = float(h) / w, float(target_height) / target_width\n if ir > tr:\n borderw, borderh = int((h / tr - w) / 2), 0\n else:\n borderh, borderw = int((w * tr - h) / 2), 0\n\n im = cv2.copyMakeBorder(im, borderh, borderh, borderw, borderw, cv2.BORDER_CONSTANT, value=[103, 116, 123])\n mask = cv2.copyMakeBorder(mask, borderh, borderh, borderw, borderw, cv2.BORDER_CONSTANT, value=[0])\n n = masks.shape[0]\n if n > 1:\n masks = [cv2.copyMakeBorder(m, borderh, borderh, borderw, borderw, cv2.BORDER_CONSTANT, value=[0]) for m in masks]\n masks = np.asarray(masks)\n elif n == 1:\n masks = cv2.copyMakeBorder(masks.reshape([h, w]), borderh, borderh, borderw, borderw, cv2.BORDER_CONSTANT, value=[0])\n masks = masks[np.newaxis, :, :]\n\n boxes[:, 0] = boxes[:, 0] + borderw\n boxes[:, 1] = boxes[:, 1] + borderh\n boxes[:, 2] = boxes[:, 2] + borderw\n boxes[:, 3] = boxes[:, 3] + borderh\n\n scale = float(target_height) / im.shape[0]\n im = cv2.resize(im, (target_width, target_height))\n mask = cv2.resize(mask, (target_width, target_height), interpolation=cv2.INTER_NEAREST)\n\n flip = np.random.uniform() > 0.5\n if flip:\n im = cv2.flip(im, 1)\n mask = cv2.flip(mask, 1)\n\n if masks.size > 0:\n masks = np.transpose(masks, (1, 2, 0)) # to (h, w, n)\n masks = cv2.resize(masks, (target_width, target_height), interpolation=cv2.INTER_NEAREST)\n if flip:\n masks = cv2.flip(masks, 1)\n try:\n if masks.ndim > 2:\n masks = np.transpose(masks, (2, 0, 1)) # to (n, h, w)\n else:\n masks = masks.reshape((1, target_height, target_width))\n except ValueError:\n print (masks.ndim, masks.shape)\n raise\n else:\n masks = np.zeros((0, target_height, target_width), masks.dtype)\n\n # bboxes\n boxes = _offset_boxes(boxes, [target_height, target_width], scale, [0, 0], flip)\n boxes, classes, masks = _filter_invalid_boxes(boxes, classes, masks, min_size=min_size)\n return im, masks, mask, boxes, classes", "def _mosaic_combine(self, loc, center_position_xy, img_shape_wh):\n\n assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n if loc == 'top_left':\n # index0 to top left part of image\n x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \\\n max(center_position_xy[1] - img_shape_wh[1], 0), \\\n center_position_xy[0], \\\n center_position_xy[1]\n crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - (\n y2 - y1), img_shape_wh[0], img_shape_wh[1]\n\n elif loc == 'top_right':\n # index1 to top right part of image\n x1, y1, x2, y2 = center_position_xy[0], \\\n max(center_position_xy[1] - img_shape_wh[1], 0), \\\n min(center_position_xy[0] + img_shape_wh[0],\n self.img_scale[1] * 2), \\\n center_position_xy[1]\n crop_coord = 0, img_shape_wh[1] - (y2 - y1), min(\n img_shape_wh[0], x2 - x1), img_shape_wh[1]\n\n elif loc == 'bottom_left':\n # index2 to bottom left part of image\n x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \\\n center_position_xy[1], \\\n center_position_xy[0], \\\n min(self.img_scale[0] * 2, center_position_xy[1] +\n img_shape_wh[1])\n crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min(\n y2 - y1, img_shape_wh[1])\n\n else:\n # index3 to bottom right part of image\n x1, y1, x2, y2 = center_position_xy[0], \\\n center_position_xy[1], \\\n min(center_position_xy[0] + img_shape_wh[0],\n self.img_scale[1] * 2), \\\n min(self.img_scale[0] * 2, center_position_xy[1] +\n img_shape_wh[1])\n crop_coord = 0, 0, min(img_shape_wh[0],\n x2 - x1), min(y2 - y1, img_shape_wh[1])\n\n paste_coord = x1, y1, x2, y2\n return paste_coord, crop_coord", "def crop_roi_image(data_dir):\n images = list()\n labels = list()\n\n csv_dir = data_dir\n images_dir = data_dir.split(\"_\")[0] + \"_png\"\n\n df = pd.read_csv('/'.join(csv_dir.split('/')[:-1]) + '/data_description.csv', header=None)\n\n for row in df.iterrows():\n # Skip normal cases.\n if str(row[1][4]) == 'nan':\n continue\n if str(row[1][4]) == '*NOT':\n continue\n\n # Process image.\n image = preprocess_image(images_dir + '/' + row[1][0] + '.png')\n\n # Abnormal case: crop around tumour.\n y2 = 0\n x2 = 0\n if row[1][2] != 'NORM':\n y1 = image.shape[1] - int(row[1][5]) - 112\n if y1 < 0:\n y1 = 0\n y2 = 224\n if y2 != 224:\n y2 = image.shape[1] - int(row[1][5]) + 112\n if y2 > image.shape[1]:\n y2 = image.shape[1]\n y1 = image.shape[1] - 224\n x1 = int(row[1][4]) - 112\n if x1 < 0:\n x1 = 0\n x2 = 224\n if x2 != 224:\n x2 = int(row[1][4]) + 112\n if x2 > image.shape[0]:\n x2 = image.shape[0]\n x1 = image.shape[0] - 224\n\n # Normal case: crop around centre of image.\n else:\n y1 = int(image.shape[1] / 2 - 112)\n y2 = int(image.shape[1] / 2 + 112)\n x1 = int(image.shape[0] / 2 - 112)\n x2 = int(image.shape[0] / 2 + 112)\n\n # Get label from CSV file.\n label = \"normal\"\n if str(row[1][3]) == 'B':\n label = \"benign\"\n elif str(row[1][3]) == 'M':\n label = \"malignant\"\n\n # Append image and label to lists.\n images.append(image[y1:y2, x1:x2, :])\n labels.append(label)\n\n return images, labels", "def Crop_Image(img, mask, x, y, width, height):\n img = img[y:y+height, x:x+width,:]\n mask = mask[y:y+height, x:x+width,:]\n return img, mask", "def center_crop_img(img, size=100):\n if img.shape[0] < size or img.shape[1] < size:\n return\n center_x, center_y = img.shape[1] // 2, img.shape[0] // 2\n half_size = size // 2\n try:\n return img[center_y - half_size: center_y + half_size, center_x - half_size: center_x + half_size]\n except:\n return img[:size, :size]", "def _generate_crop(self):\n if self.box_drawn == True:\n if (self.cd_pic_num != -1) & (self.cd_crop_num == 1):\n self.communicator.generate_crop(picture_num=self.cd_pic_num, \\\n xa=self.xa, ya=self.ya, xb=self.xb, yb=self.yb)\n else:\n print \"ERROR: can only generate a new crop from a thumbnail\"\n else:\n print \"ERROR: please select an area to generate a crop from\"", "def five_crop(img, size):\n if isinstance(size, numbers.Number):\n size = (int(size), int(size))\n else:\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n\n # w, h = img.size\n h, w = img.shape[:2]\n crop_h, crop_w = size\n if crop_w > w or crop_h > h:\n raise ValueError(\"Requested crop size {} is bigger than input size {}\".format(size,\n (h, w)))\n # img[i:i+h, j:j+w]\n # tl = img.crop((0, 0, crop_w, crop_h))\n tl = img[0:0+crop_h, 0:0+crop_w]\n # tr = img.crop((w - crop_w, 0, w, crop_h))\n tr = img[0:0+crop_h, w-crop_w:]\n # bl = img.crop((0, h - crop_h, crop_w, h))\n bl = img[h-crop_h:, 0:0+crop_w]\n # br = img.crop((w - crop_w, h - crop_h, w, h))\n br = img[h-crop_h:,w-crop_w:]\n center = center_crop(img, (crop_h, crop_w))\n return (tl, tr, bl, br, center)", "def crop_images(row, crop_path):\n def crop(im, box, square=True):\n \"\"\" box: list, [x_left, y_bottom, x_l + w, y_b + h]\n \"\"\"\n def pad_square(box):\n \"\"\" If box is a rectangle, expand it to a square.\n \"\"\"\n x, y, xw, yh = box\n w = xw-x\n h = yh-y\n if w < h:\n w = h\n elif h < w:\n h = w\n return [x, y, x+w, y+h]\n if square:\n box = pad_square(box)\n x, y, xw, yh = box\n return im[y:yh, x:xw]\n im = tiffread(row['Image'])\n im_crop = crop(im, row['Cropbox'], square=True)\n crop_file = os.path.join(crop_path, row['Name'], row['UID'])\n tiffwrite(crop_file, im_crop)", "def crop_images_color(dataset_dir, is_mask=True):\n data = []\n for folder in os.listdir(dataset_dir):\n path = os.path.join(dataset_dir, folder, \"*_labelIds.png\")\n data.extend(glob(path))\n\n for index, filePath in enumerate(data):\n print ('{}/{}'.format(index, len(data)))\n\n img = scipy.misc.imread(filePath).astype(np.uint8)\n img = scipy.misc.imresize(img, 0.25, interp='bilinear', mode=None)\n if is_mask:\n mask = np.ones((img.shape[0], img.shape[1]), dtype=np.uint8) * 255\n\n idx_person = np.where(np.all(img == [220, 20, 60, 255], axis=-1))\n #idx_rider = np.where(np.all(img == [255, 0, 0, 255], axis=-1))\n #idx_void = np.where(np.all(img == [0, 0, 0, 255], axis=-1))\n\n #indices = np.concatenate((idx_person, idx_rider, idx_void), axis=1)\n indices = idx_person\n # mask[indices[0], indices[1], :] = (0, 0, 0, 255)\n mask[indices[0], indices[1]] = 0\n mask = np.reshape(mask, (256, 512))\n\n #scipy.misc.imsave('/home/andy/dataset/CITYSCAPES/CITYSCAPES_crop_random/' + filePath.split('/')[-1],\n # img[offs_h[index]:offs_h_end[index], offs_w[index]:offs_w_end[index] :])\n scipy.misc.imsave('/home/andy/dataset/CITYSCAPES/for_wonderful_chou/image/' + filePath.split('/')[-1],\n img[0:192, :])\n #break", "def center_crop(image: np.ndarray, size: Union[tuple, list, np.ndarray]) -> np.ndarray:\n\n if type(size) is int and isinstance(size, str) and len(size) < 2:\n raise ValueError(\"size has to be a list or tuple or array with at least two int elements\")\n\n # For tensor processing\n image = _convert_tensor_to_numpy_if_possible(image)\n\n # find larger ratio\n h_ratio = size[0] / image.shape[0]\n w_ratio = size[1] / image.shape[1]\n larger_ratio = h_ratio if h_ratio > w_ratio else w_ratio\n\n # resize with larger ratio\n image = cv2.resize(image, (0, 0), fx=larger_ratio, fy=larger_ratio)\n\n # crop the middle portion\n top_offset = (image.shape[0] - size[0]) // 2\n left_offset = (image.shape[1] - size[1]) // 2\n\n image = image[top_offset:top_offset + size[0], left_offset:left_offset + size[1]]\n\n return image", "def center_crop(image, model_input_image_size):\n im_size = image.get_shape().as_list()\n target_height = model_input_image_size[0]\n target_width = model_input_image_size[1]\n if len(im_size) == 3:\n return tf.image.resize_image_with_crop_or_pad(\n image,\n target_height=target_height,\n target_width=target_width)\n elif len(im_size) == 4:\n time_split_image = tf.split(image, im_size[0], axis=0)\n crops = []\n for idx in range(len(time_split_image)):\n it_crop = tf.image.resize_image_with_crop_or_pad(\n tf.squeeze(time_split_image[idx], axis=0),\n target_height=target_height,\n target_width=target_width)\n crops += [tf.expand_dims(it_crop, axis=0)]\n return tf.concat(crops, axis=0)\n else:\n raise NotImplementedError", "def crop(images, boxes, batch_inds = False, stride = 1, pooled_height = 7, pooled_width = 7, scope='ROIAlign'):\n with tf.name_scope(scope):\n boxes = boxes / (stride + 0.0)\n shape = tf.shape(images)\n boxes = tf.reshape(boxes, [-1, 2]) # to (x, y)\n x = tf.slice(boxes, [0, 0], [-1, 1])\n y = tf.slice(boxes, [0, 1], [-1, 1])\n x = x / tf.cast(shape[2], tf.float32)\n y = y / tf.cast(shape[1], tf.float32)\n boxes = tf.concat([y, x], axis=1)\n boxes = tf.reshape(boxes, [-1, 4]) # to (y1, x1, y2, x2)\n \n if batch_inds is False:\n shape = tf.shape(boxes)\n # batch_inds = tf.zeros((shape[0], ), dtype=tf.int32, name='batch_inds')\n batch_inds = tf.zeros([shape[0]], dtype=tf.int32, name='batch_inds')\n return tf.image.crop_and_resize(images, boxes, batch_inds,\n [pooled_height, pooled_width],\n method='bilinear',\n name='Crop')", "def crop(img, i, j, h, w):\n return img.crop((j, i, j + w, i + h))", "def crop(img, i, j, h, w):\n return img.crop((j, i, j + w, i + h))", "def align_and_crop_face(self, img, rect_list, desired_width, desired_height):\n \n for j, det in enumerate(rect_list):\n shape = self.align_predictor(img, det)\n left_eye = extract_left_eye_center(shape)\n right_eye = extract_right_eye_center(shape)\n M = get_rotation_matrix(left_eye, right_eye)\n\n rotated_img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]), flags=cv2.INTER_CUBIC)\n cropped = crop_image(rotated_img, det)\n try:\n \n cropped_res = cv2.resize(cropped, (desired_width, desired_height))\n except:\n print(det)\n print(img.shape)\n cropped_res = cv2.resize(rotated_img,(desired_width, desired_height))\n cropped_img = cropped_res[:, :, ::-1]\n\n return cropped_img, left_eye, right_eye", "def faceCrop(im,x,y,w,h,m):\r\n sizeX, sizeY = im.size\r\n new_x, new_y = max(0,x-m*w), max(0,y-m*h)\r\n new_w = w + 2*m*w if sizeX > (new_x + w + 2*m*w) else sizeX - new_x\r\n new_h = h + 2*m*h if sizeY > (new_y + h + 2*m*h) else sizeY - new_y\r\n new_x,new_y,new_w,new_h = int(new_x),int(new_y),int(new_w),int(new_h)\r\n return im.crop((new_x,new_y,new_x+new_w,new_y+new_h))", "def crop(self, left=0, top=0, right=None, bottom=None,\n width=None, height=None, reset_coords=True):\n if not (right is None or width is None):\n raise TypeError('parameters right and width are exclusive each '\n 'other; use one at a time')\n elif not (bottom is None or height is None):\n raise TypeError('parameters bottom and height are exclusive each '\n 'other; use one at a time')\n def abs_(n, m, null=None):\n if n is None:\n return m if null is None else null\n elif not isinstance(n, numbers.Integral):\n raise TypeError('expected integer, not ' + repr(n))\n elif n > m:\n raise ValueError(repr(n) + ' > ' + repr(m))\n return m + n if n < 0 else n\n left = abs_(left, self.width, 0)\n top = abs_(top, self.height, 0)\n if width is None:\n right = abs_(right, self.width)\n width = right - left\n if height is None:\n bottom = abs_(bottom, self.height)\n height = bottom - top\n if width < 1:\n raise ValueError('image width cannot be zero')\n elif height < 1:\n raise ValueError('image width cannot be zero')\n elif left == top == 0 and width == self.width and height == self.height:\n return\n library.MagickCropImage(self.wand, width, height, left, top)\n self.raise_exception()\n if reset_coords:\n self.reset_coords()", "def uncrop_3D(image, Coords, originalSize):\t\n\n\n\tuncroppedImage = None\n\tif (len(image.shape)==3) & (len(Coords)==6) & (len(originalSize)==3):\n\t\tif (Coords[0]<originalSize[0]) & (Coords[1]<originalSize[0]) & (Coords[2]<originalSize[1]) & (Coords[3]<originalSize[1]) & (Coords[4]<originalSize[2]) & (Coords[5]<originalSize[2]): \n\t\t\tuncroppedImage = np.zeros(originalSize)\n\t\t\tuncroppedImage[Coords[0]:Coords[1],Coords[2]:Coords[3],Coords[4]:Coords[5]] = image\n\t\t\treturn uncroppedImage\n\t\telse:\n\t\t\treturn \"Original size is smaller than the supplied coordenates\"\n\t\t\treturn uncroppedImage\n\telse:\n\t\tprint \"The array, coordenates or original size do not have 3 dimensions\"\n\t\treturn uncroppedImage", "def crop(original_img, coordinates, destination):\n image = cv2.imread(original_img)\n cropped = image.copy()\n if coordinates is None or len(coordinates) == 0:\n cv2.imwrite(destination, cropped)\n else:\n cnt = np.array(coordinates)\n cropped = four_point_transform(image, cnt)\n cv2.imwrite(destination, cropped)", "def create_cropped_data(image_array: np.ndarray, crop_size: tuple, crop_center: tuple, crop_only: bool = True):\n if not crop_only:\n # check parameters\n if not isinstance(image_array, np.ndarray) or len(image_array.shape) != 2:\n raise ValueError('image_array is not a 2D numpy array')\n elif len(crop_size) != 2 or len(crop_center) != 2:\n raise ValueError('crop size or crop center tuples have invalid amount of values')\n elif crop_size[0] % 2 == 0 or crop_size[1] % 2 == 0:\n raise ValueError('crop size contains an even number')\n # check rectangle position\n min_x = crop_center[0] - crop_size[0] // 2\n max_x = crop_center[0] + crop_size[0] // 2\n min_y = crop_center[1] - crop_size[1] // 2\n max_y = crop_center[1] + crop_size[1] // 2\n if not crop_only:\n crop_margin = 20\n if not (crop_margin <= min_x and max_x < image_array.shape[0] - crop_margin and\n crop_margin <= min_y and max_y < image_array.shape[1] - crop_margin):\n raise ValueError('the crop rectangle is too close to the edges')\n if crop_only:\n # create crop array\n crop_array = np.zeros_like(image_array)\n crop_array[min_x:max_x + 1, min_y:max_y + 1] = 1\n return crop_array\n else:\n # target_array = crop region in image_array\n target_array = np.copy(image_array[min_x:max_x + 1, min_y:max_y + 1])\n # set image_array values in crop region to 0 (in-place)\n image_array[min_x:max_x + 1, min_y:max_y + 1] = 0\n return image_array, target_array", "def __crop_img(img, cx, cy, max_axis, padding=0):\n\n new_height = max_axis\n new_width = max_axis\n\n cy -= new_height // 2\n cx -= new_width // 2\n\n if (cy + new_height) > img.shape[0]:\n shift = (cy + new_height) - img.shape[0]\n cy -= shift\n\n if (cx + new_width) > img.shape[1]:\n shift = (cx + new_width) - img.shape[1]\n cx -= shift\n\n cy = max(0., cy)\n cx = max(0., cx)\n\n cx = padding if cx == 0 else cx\n cy = padding if cy == 0 else cy\n\n cropped_img = img[cy - padding:cy + new_height + padding, cx - padding:cx + new_width + padding, :]\n\n return cropped_img", "def set_crop(self):\n ratio = self.full_widget_ratio * 1\n # ratio = 1\n keepw = self.camw // ratio\n keeph = self.camh // ratio\n print(\"crop ratio %u => %u, %uw x %uh\" %\n (self.full_widget_ratio, ratio, keepw, keeph))\n\n # Divide remaining pixels between left and right\n left = right = (self.camw - keepw) // 2\n top = bottom = (self.camh - keeph) // 2\n self.roi_videocrop.set_property(\"top\", top)\n self.roi_videocrop.set_property(\"bottom\", bottom)\n self.roi_videocrop.set_property(\"left\", left)\n self.roi_videocrop.set_property(\"right\", right)\n\n finalw = self.camw - left - right\n finalh = self.camh - top - bottom\n print(\n \"cam %uw x %uh %0.1fr => crop (x2) %uw x %uh => %uw x %uh %0.1fr\" %\n (self.camw, self.camh, self.camw / self.camh, left, top, finalw,\n finalh, finalw / finalh))", "def __call__(self, results):\n image_size = results['img'].shape[:2]\n crop_size = self._get_crop_size(image_size)\n results = self._crop_data(results, crop_size, self.allow_negative_crop)\n return results", "def crop_center_3d(img, cropx, cropy, cropz):\n z, y, x = img.shape\n startx = x // 2 - (cropx // 2)\n starty = y // 2 - (cropy // 2)\n startz = z // 2 - (cropz // 2)\n return img[\n startz : startz + cropz, starty : starty + cropy, startx : startx + cropx\n ]", "def get_cropped_img(self, img, center_xy, target_size, crop_size,\n avg_channel):\n N, C, H, W = img.shape\n context_xmin = int(center_xy[0] - crop_size / 2)\n context_xmax = int(center_xy[0] + crop_size / 2)\n context_ymin = int(center_xy[1] - crop_size / 2)\n context_ymax = int(center_xy[1] + crop_size / 2)\n\n left_pad = max(0, -context_xmin)\n top_pad = max(0, -context_ymin)\n right_pad = max(0, context_xmax - W)\n bottom_pad = max(0, context_ymax - H)\n\n context_xmin += left_pad\n context_xmax += left_pad\n context_ymin += top_pad\n context_ymax += top_pad\n\n avg_channel = avg_channel[:, None, None]\n if any([top_pad, bottom_pad, left_pad, right_pad]):\n new_img = img.new_zeros(N, C, H + top_pad + bottom_pad,\n W + left_pad + right_pad)\n new_img[..., top_pad:top_pad + H, left_pad:left_pad + W] = img\n if top_pad:\n new_img[..., :top_pad, left_pad:left_pad + W] = avg_channel\n if bottom_pad:\n new_img[..., H + top_pad:, left_pad:left_pad + W] = avg_channel\n if left_pad:\n new_img[..., :left_pad] = avg_channel\n if right_pad:\n new_img[..., W + left_pad:] = avg_channel\n crop_img = new_img[..., context_ymin:context_ymax + 1,\n context_xmin:context_xmax + 1]\n else:\n crop_img = img[..., context_ymin:context_ymax + 1,\n context_xmin:context_xmax + 1]\n\n crop_img = torch.nn.functional.interpolate(\n crop_img,\n size=(target_size, target_size),\n mode='bilinear',\n align_corners=False)\n return crop_img", "def load_images_from_folder(folder, n_cases,patch_size, mask_path, mask_type, mask_name,normalize=False, imrotate=False):\n\n# # Initialize the arrays:\n# if imrotate: # number of images is 4 * n_im\n# bigy = np.empty((n_im * 4, 64, 64))\n# bigx = np.empty((n_im * 4, 64, 64, 2))\n# else:\n# bigy = np.empty((n_im, 64, 64))\n# bigx = np.empty((n_im, 64, 64, 2))\n\n# im = 0 # image counter\n bigy = []\n filenames = os.listdir(folder)\n\n for filename in filenames[n_cases[0]:n_cases[1]]:\n if not filename.startswith('.'):\n temp = loadmat(os.path.join(folder, filename))['res']\n print temp.shape\n # Clean the STONE sense recon data\n row, col = temp.shape\n temp = np.reshape(temp, (row, col, -1))\n #valid_mask = (np.abs(np.squeeze(temp[int(row/2), int(col/2), :])) != 0)\n #final_images = temp[:,:,valid_mask]\n final_images = temp\n \n# # Resize images\n #final_images = np.abs(final_images)\n final_images_resized = np.zeros((patch_size,patch_size,final_images.shape[2]))\n for i in range(final_images.shape[2]):\n final_images_resized[:,:,i] = cv2.resize(final_images[:,:,i], (patch_size,patch_size))\n \n# # Only take a small part of the data\n# final_images = final_images[140:180,140:180,:]\n \n# # Convert to abs values\n# final_images = np.abs(final_images)\n# \n# # Normalize based on single patient case\n# final_images = (final_images - np.mean(final_images)) / np.std(final_images)\n \n# bigy_temp = cv2.imread(os.path.join(folder, filename),\n# cv2.IMREAD_GRAYSCALE)\n \n \n bigy.append(final_images_resized)\n \n bigy = np.asarray(bigy)\n cases, row, col, imgs = bigy.shape\n bigy = np.transpose(np.reshape(np.transpose(bigy, (1,2,3,0)), (row, col, -1)), (2,0,1))\n \n # convert to k-space\n imgs, row, col = bigy.shape\n bigx = np.empty((imgs, row, col, 2))\n mask = read_mask(mask_path=mask_path,mask_type=mask_type,mask_name=mask_name,patch_size=patch_size,show_image=False)\n for i in range(imgs):\n bigx[i, :, :, :] = create_x(np.squeeze(bigy[i,:,:]),mask)\n \n # convert bigx from complex to abs values\n bigy = np.abs(bigy)\n \n# im += 1\n# if imrotate:\n# for angle in [90, 180, 270]:\n# bigy_rot = im_rotate(bigy_temp, angle)\n# bigx_rot = create_x(bigy_rot, normalize)\n# bigy[im, :, :] = bigy_rot\n# bigx[im, :, :, :] = bigx_rot\n# im += 1\n\n# if imrotate:\n# if im > (n_im * 4 - 1): # how many images to load\n# break\n# else:\n# if im > (n_im - 1): # how many images to load\n# break\n\n# if normalize:\n# bigx = (bigx - np.amin(bigx)) / (np.amax(bigx) - np.amin(bigx))\n\n return bigx, bigy", "def crop_around_center(image, width, height):\r\n\r\n image_size = (image.shape[1], image.shape[0])\r\n image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))\r\n\r\n if(width > image_size[0]):\r\n width = image_size[0]\r\n\r\n if(height > image_size[1]):\r\n height = image_size[1]\r\n\r\n x1 = int(image_center[0] - width * 0.5)\r\n x2 = int(image_center[0] + width * 0.5)\r\n y1 = int(image_center[1] - height * 0.5)\r\n y2 = int(image_center[1] + height * 0.5)\r\n\r\n return image[y1:y2, x1:x2]", "def cut_image(im):\n width, height = im.size\n # Three pictures in a row\n item_width = int(width / 3)\n box_list = []\n for i in range(0, 3):\n for j in range(0, 3):\n box = (j * item_width, i * item_width, (j + 1) * item_width, (i + 1) * item_width)\n box_list.append(box)\n image_list = [im.crop(box) for box in box_list]\n return image_list", "def display_cropped_img(i):\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)", "def resizeContours(self):\n scale = 500/self.images.shape[1]\n print('Scaling images by {} for display'.format(scale))\n self.lumenCopy = (self.lumen[0][:], self.lumen[1][:])\n self.plaqueCopy = (self.plaque[0][:], self.plaque[1][:])\n self.stentCopy = (self.stent[0][:], self.stent[1][:])\n self.lumen = self.resize(self.lumen, scale)\n self.plaque = self.resize(self.plaque, scale)\n self.stent = self.resize(self.stent, scale)", "def five_crop(img, size):\n if isinstance(size, numbers.Number):\n size = (int(size), int(size))\n else:\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n\n image_height, image_width = img.shape[:2]\n crop_height, crop_width = size\n if crop_width > image_width or crop_height > image_height:\n msg = \"Requested crop size {} is bigger than input size {}\"\n raise ValueError(msg.format(size, (image_height, image_width)))\n\n tl = crop(img, 0, 0, crop_height, crop_width)\n tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)\n bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)\n br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)\n center = center_crop(img, (crop_height, crop_width))\n return (tl, tr, bl, br, center)", "def crop(self, *args, **kwargs):\n return _image.image_crop(self, *args, **kwargs)", "def slide_crop(img, img_meta, model_cfg, save_dir):\r\n ori_shape = img_meta[0]['ori_shape']\r\n stem = Path(img_meta[0]['ori_filename']).stem\r\n save_dir = Path(save_dir)\r\n save_dir.mkdir(parents=True, exist_ok=True)\r\n\r\n h_stride, w_stride = model_cfg.test_cfg.stride\r\n h_crop, w_crop = model_cfg.test_cfg.crop_size\r\n batch_size, _, h_img, w_img = img.size()\r\n h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1\r\n w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1\r\n\r\n cnt = 0\r\n for h_idx in range(h_grids):\r\n for w_idx in range(w_grids):\r\n y1 = h_idx * h_stride\r\n x1 = w_idx * w_stride\r\n y2 = min(y1 + h_crop, h_img)\r\n x2 = min(x1 + w_crop, w_img)\r\n y1 = max(y2 - h_crop, 0)\r\n x1 = max(x2 - w_crop, 0)\r\n crop_img = img[:, :, y1:y2, x1:x2]\r\n \r\n cnt += 1\r\n msg1 = f'{x1},{y1},{x2},{y2}'\r\n msg2 = f'{ori_shape[0]},{ori_shape[1]},{ori_shape[2]}'\r\n msg3 = f'{batch_size},{h_img},{w_img}'\r\n bin_path = save_dir/f'{stem}-{\"-\".join([msg1, msg2, msg3])}.bin'\r\n crop_img.numpy().astype(np.float32).tofile(bin_path)\r\n\r\n return cnt", "def cifar_image_augmentation(images):\n images = tf.image.resize_image_with_crop_or_pad(images, 40, 40)\n images = tf.random_crop(images, [32, 32, 3])\n images = tf.image.random_flip_left_right(images)\n return images", "def _build_crop_fn(self, img_shape, crop_modes):\n h = img_shape[0]\n w = img_shape[1]\n\n w_crop = int(w * self.CROP_RATIO)\n h_crop = int(h * self.CROP_RATIO)\n\n top_pads = {\n Crop.TOP: 0,\n Crop.CENTER: int((h - h_crop) / 2),\n Crop.BOTTOM: h - h_crop\n }\n left_pads = {\n Crop.LEFT: 0,\n Crop.CENTER: int((w - self.CROP_RATIO) / 2),\n Crop.RIGHT: w - w_crop\n }\n\n def crop(image, directory):\n for crop_mode in crop_modes:\n top_pad = top_pads[crop_mode.vertical]\n left_pad = left_pads[crop_mode.horizontal]\n fname = self.name_generator.generate_aug_name(\n original=image.name,\n aug_name=\"{}_{}\".format(crop_mode.vertical, crop_mode.horizontal)\n )\n fpath = os.path.join(directory, fname)\n\n crop = image.x[top_pad:top_pad + h_crop, left_pad:left_pad + w_crop]\n crop = cv2.resize(crop, (w, h))\n cv2.imwrite(fpath, crop)\n\n return crop", "def rotate_crop_scale(self, lines):\n return None", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def _get_crops(im):\n # Convert to grayscale\n im = color.rgb2gray(im)\n\n # Reshape im (Rescale the input image, make it perfect square)\n target_size = int(np.mean(im.shape))\n im = transform.resize(im, (target_size,)*2)\n \n # Size of the square\n sq_size = target_size / 9.0\n # Get the centers\n centers = np.linspace(0,target_size,9,endpoint=False) + sq_size / 2.0\n \n # Crop\n r = 0.71\n crop_size = int(round((sq_size * r) * 0.5))\n\n cropped_ims = []\n for i in range(9):\n for j in range(9):\n v1, v2 = centers[i] - crop_size, centers[i] + crop_size\n h1, h2 = centers[j] - crop_size, centers[j] + crop_size\n v1, v2, h1, h2 = int(round(v1)), int(round(v2)), int(round(h1)), int(round(h2)) \n cropped = im[v1:v2, h1:h2]\n \n # Process cropped_im\n cropped = 1.0 - cropped # At this point, digit is white, background is black\n \n # Fix the cropped image (Place the digit to the center)\n cropped = _fix_crop(cropped)\n \n # Append\n cropped_ims.append(cropped)\n \n return cropped_ims", "def crop(img: torch.Tensor, new_size: Union[int, Tuple[int, int]]) ->torch.Tensor:\n new_size = to_tuple(new_size)\n return F.center_crop(img, output_size=new_size)", "def crop (*args, **kwargs):\n return compute('crop', inputs=list(args), args=kwargs)", "def crop_center(image, cropx, cropy):\n x,y = image.shape\n startx = x//2 - cropx//2\n starty = y//2 - cropy//2 \n return image[startx:startx+cropx, starty:starty+cropy]", "def crop_image(input_image, output_image, start_x, start_y, width, height):\n box = (start_x, start_y, start_x + width, start_y + height)\n output_img = img.crop(box)\n output_img.save(output_image +\".png\")", "def center_crop(img, size, channels_last: bool = False):\n if channels_last:\n # NHWC\n h, w = img.shape[-3:-1]\n else:\n # NCHW\n h, w = img.shape[-2:]\n\n if isinstance(size, numbers.Number):\n size = (int(size), int(size))\n assert len(size) == 2, \"size should be (h,w) you wish to resize to\"\n cropx, cropy = size\n\n startx = w // 2 - (cropx // 2)\n starty = h // 2 - (cropy // 2)\n if channels_last:\n return img[..., starty : starty + cropy, startx : startx + cropx, :]\n else:\n return img[..., starty : starty + cropy, startx : startx + cropx]", "def _scale_and_crop(self, img, seg, crop_size):\n h, w = img.shape[0], img.shape[1]\n # if train:\n # # random scale\n # scale = random.random() + 0.5 # 0.5-1.5\n # scale = max(scale, 1. * crop_size / (min(h, w) - 1)) # ??\n # else:\n # # scale to crop size\n # scale = 1. * crop_size / (min(h, w) - 1)\n scale = crop_size / min(h, w)\n if scale > 1:\n print('scale: ', scale)\n img = transform.rescale(img, scale, mode='reflect', order=1) # order 1 is bilinear\n seg = transform.rescale(seg.astype(np.float), scale, mode='reflect', order=0) # order 0 is nearest neighbor\n\n h_s, w_s = img.shape[0], seg.shape[1]\n if self.validation or self.testing:\n # center crop\n x1 = (w_s - crop_size) // 2\n y1 = (h_s - crop_size) // 2\n else:\n # random crop\n x1 = random.randint(0, w_s - crop_size)\n y1 = random.randint(0, h_s - crop_size)\n\n img_crop = img[y1: y1 + crop_size, x1: x1 + crop_size, :]\n seg_crop = seg[y1: y1 + crop_size, x1: x1 + crop_size]\n return img_crop, seg_crop", "def center_crop(img, output_size):\n if isinstance(output_size, numbers.Number):\n output_size = (int(output_size), int(output_size))\n image_height, image_width = img.shape[:2]\n crop_height, crop_width = output_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return crop(img, crop_top, crop_left, crop_height, crop_width)", "def cut4(image):\r\n i, j = image.shape\r\n a1 = image[:i // 2, :j // 2]\r\n a2 = image[i // 2:, :j // 2]\r\n a3 = image[:i // 2, j // 2:]\r\n a4 = image[i // 2:, j // 2:]\r\n return a1, a2, a3, a4", "def crop_around_center(image, width, height):\n\n image_size = (image.shape[1], image.shape[0])\n image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))\n\n if(width > image_size[0]):\n width = image_size[0]\n\n if(height > image_size[1]):\n height = image_size[1]\n\n x1 = int(image_center[0] - width * 0.5)\n x2 = int(image_center[0] + width * 0.5)\n y1 = int(image_center[1] - height * 0.5)\n y2 = int(image_center[1] + height * 0.5)\n\n return image[y1:y2, x1:x2]", "def crop_around_center(image, width, height):\n\n image_size = (image.shape[1], image.shape[0])\n image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))\n\n if(width > image_size[0]):\n width = image_size[0]\n\n if(height > image_size[1]):\n height = image_size[1]\n\n x1 = int(image_center[0] - width * 0.5)\n x2 = int(image_center[0] + width * 0.5)\n y1 = int(image_center[1] - height * 0.5)\n y2 = int(image_center[1] + height * 0.5)\n\n return image[y1:y2, x1:x2]", "def crop_around_center(image, width, height):\n\n image_size = (image.shape[1], image.shape[0])\n image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))\n\n if(width > image_size[0]):\n width = image_size[0]\n\n if(height > image_size[1]):\n height = image_size[1]\n\n x1 = int(image_center[0] - width * 0.5)\n x2 = int(image_center[0] + width * 0.5)\n y1 = int(image_center[1] - height * 0.5)\n y2 = int(image_center[1] + height * 0.5)\n\n return image[y1:y2, x1:x2]", "def crop_around_center(image, width, height):\n\n image_size = (image.shape[1], image.shape[0])\n image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))\n\n if (width > image_size[0]):\n width = image_size[0]\n\n if (height > image_size[1]):\n height = image_size[1]\n\n x1 = int(image_center[0] - width * 0.5)\n x2 = int(image_center[0] + width * 0.5)\n y1 = int(image_center[1] - height * 0.5)\n y2 = int(image_center[1] + height * 0.5)\n\n return image[y1:y2, x1:x2]", "def crop_image(filename, n):\n image = SimpleImage(filename)\n width = image.width\n new_width = width - (2 * n)\n height = image.height\n new_height = height - (2 * n)\n image_crop_width = SimpleImage.blank(new_width, height)\n for y in range(height):\n for x in range(new_width):\n pixel = image.get_pixel((x + n), y)\n image_crop_width.set_pixel(x, y, pixel)\n image_crop_width.show()\n\n image_crop_height = SimpleImage.blank(width, new_height)\n for y in range(new_height):\n for x in range(width):\n pixel = image.get_pixel(x, y + n)\n image_crop_height.set_pixel(x, y, pixel)\n image_crop_height.show()\n\n image_crop_width_height = SimpleImage.blank(new_width, new_height)\n for y in range(new_height):\n for x in range(new_width):\n pixel = image.get_pixel(x + n, y + n)\n image_crop_width_height.set_pixel(x, y, pixel)\n image_crop_width_height.show()", "def crop(self, coords):\n pass", "def crop_image(grayscale_image, raft_center, width):\n top_row = int(raft_center[1] - width / 2)\n # note that y corresponds to rows, and is directed from top to bottom in scikit-image\n bottom_row = int(raft_center[1] + width / 2)\n\n left_column = int(raft_center[0] - width / 2)\n right_column = int(raft_center[0] + width / 2)\n\n raft_image = grayscale_image[top_row:bottom_row, left_column:right_column]\n return raft_image", "def crop(self, xdiv, ydiv, img, bBoxes=None):\n xstride = img.shape[1] // xdiv\n ystride = img.shape[0] // ydiv\n\n widthLimits = np.zeros((xdiv+1,), dtype=np.int32)\n heightLimits = np.zeros((ydiv+1), dtype=np.int32)\n croppedImages = [[] for _ in range(xdiv*ydiv)]\n croppedBoxes = [[] for _ in range(xdiv*ydiv)]\n index = 0\n for x in range(0, img.shape[1]+1, xstride):\n widthLimits[index] = x\n index += 1\n index = 0\n for y in range(0, img.shape[0]+1, ystride):\n heightLimits[index] = y\n index+=1\n index = 0\n for i in range(len(widthLimits)-1):\n for j in range(len(heightLimits)-1):\n croppedImages[index] = img[heightLimits[j]:heightLimits[j+1], widthLimits[i]:widthLimits[i+1]]\n index += 1\n if bBoxes:\n for box in bBoxes:\n index = 0\n for i in range(len(widthLimits)-1):\n for j in range(len(heightLimits)-1):\n if box[0] >= widthLimits[i] and box[2] < widthLimits[i+1] \\\n and box[1] >= heightLimits[j] and box[3] < heightLimits[j+1]:\n box[0] -= widthLimits[i]\n box[2] -= widthLimits[i]\n box[1] -= heightLimits[j]\n box[3] -= heightLimits[j]\n croppedBoxes[index].append(box)\n index += 1\n return croppedImages, croppedBoxes", "def _crop_pool_layer(bottom, rois, max_pool=True):\n # code modified from\n # https://github.com/ruotianluo/pytorch-faster-rcnn\n # implement it using stn\n # box to affine\n # input (x1,y1,x2,y2)\n rois = rois.detach()\n batch_size = bottom.size(0)\n D = bottom.size(1)\n H = bottom.size(2)\n W = bottom.size(3)\n roi_per_batch = rois.size(0) / batch_size\n x1 = rois[:, 1::4] / 16.0\n y1 = rois[:, 2::4] / 16.0\n x2 = rois[:, 3::4] / 16.0\n y2 = rois[:, 4::4] / 16.0\n\n height = bottom.size(2)\n width = bottom.size(3)\n\n # affine theta\n zero = Variable(rois.data.new(rois.size(0), 1).zero_())\n theta = torch.cat([ \\\n (x2 - x1) / (width - 1),\n zero,\n (x1 + x2 - width + 1) / (width - 1),\n zero,\n (y2 - y1) / (height - 1),\n (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)\n\n if max_pool:\n pre_pool_size = cfg.POOLING_SIZE * 2\n grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, pre_pool_size, pre_pool_size)))\n bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W) \\\n .contiguous().view(-1, D, H, W)\n crops = F.grid_sample(bottom, grid)\n crops = F.max_pool2d(crops, 2, 2)\n else:\n grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, cfg.POOLING_SIZE, cfg.POOLING_SIZE)))\n bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W) \\\n .contiguous().view(-1, D, H, W)\n crops = F.grid_sample(bottom, grid)\n\n return crops, grid", "def crop_images(directory, selection):\n for root, dirs, files in os.walk(directory):\n for file in files:\n path = os.path.join(root, file)\n if os.path.splitext(file)[1] == '.tiff':\n crop(selection, path, path)", "def _prep_im_for_blob(self, im, pixel_means, bbox):\n im = im.astype(np.float32, copy=False)\n im -= pixel_means\n im_shape = im.shape\n\n # crop version 2\n x, y, w, h = bbox\n crop_img, crop_w, crop_h = None, None, None\n if (x, y, w, h) == (0, 0, im.shape[1]-1, im.shape[0]-1):\n crop_img = im[:,:,:]\n crop_w = w\n crop_h = h\n else:\n # 1. random shifted image'\n # crop_x = np.random.randint(x)\n # crop_w = np.random.randint(x+w, im_shape[1]-1) - crop_x\n # crop_y = np.random.randint(y)\n # crop_h = np.random.randint(y+h, im_shape[0]-1) - crop_y\n # crop_img = im[crop_y:crop_y+crop_h, crop_x:crop_x+crop_w, :]\n # 2. original image\n crop_img = im[y:y+h, x:x+w, :]\n crop_w, crop_h = w, h\n\n im_scale_x = float(self._width) / float(crop_w)\n im_scale_y = float(self._height ) / float(crop_h)\n crop_img = cv2.resize(crop_img, None, None, fx=im_scale_x, fy=im_scale_y,\n interpolation=cv2.INTER_LINEAR)\n\n return crop_img", "def crop_around_center(image, width, height):\n\n image_size = (image.shape[1], image.shape[0])\n image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))\n\n if width > image_size[0]:\n width = image_size[0]\n\n if height > image_size[1]:\n height = image_size[1]\n\n x1 = int(image_center[0] - width * 0.5)\n x2 = int(image_center[0] + width * 0.5)\n y1 = int(image_center[1] - height * 0.5)\n y2 = int(image_center[1] + height * 0.5)\n\n return image[y1:y2, x1:x2]", "def imgProcessing(self):\n if (self.image_width > 320):\n self.cv_image = imutils.resize(self.cv_image, width = 320)\n else:\n pass\n\n \"\"\" optional -- image-mirrored \"\"\"\n # self.cv_image = cv2.flip(self.cv_image, 1)", "def cropping_center(x, crop_shape, batch=False):\n orig_shape = x.shape\n if not batch:\n h0 = int((orig_shape[0] - crop_shape[0]) * 0.5)\n w0 = int((orig_shape[1] - crop_shape[1]) * 0.5)\n x = x[h0 : h0 + crop_shape[0], w0 : w0 + crop_shape[1]]\n else:\n h0 = int((orig_shape[1] - crop_shape[0]) * 0.5)\n w0 = int((orig_shape[2] - crop_shape[1]) * 0.5)\n x = x[:, h0 : h0 + crop_shape[0], w0 : w0 + crop_shape[1]]\n return x", "def crop_from_dets(\n img, \n bboxes, \n target_height, \n target_width,\n extra_zoom\n):\n\n imght = img.size(1)\n imgwidth = img.size(2)\n tmp_img = img\n # normalization (per-channel)\n tmp_img[0].add_(-0.406)\n tmp_img[1].add_(-0.457)\n tmp_img[2].add_(-0.480)\n \n crops = []\n bboxes_zoomed = []\n for box in bboxes:\n upLeft = torch.Tensor(\n (float(box[0]), float(box[1])))\n bottomRight = torch.Tensor(\n (float(box[2]), float(box[3])))\n\n ht = bottomRight[1] - upLeft[1]\n width = bottomRight[0] - upLeft[0]\n if width > 100:\n scaleRate = 0.2\n else:\n scaleRate = 0.3\n\n # zooming the predicted bounding box\n upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)\n upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)\n bottomRight[0] = max(\n min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)\n bottomRight[1] = max(\n min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)\n \n # ADD EXTRA EXPANSION BECAUSE OF ARMS OUT OF THE BOX !!!\n # i.e. shift x-coordinate of the box corner to right or to left\n if extra_zoom == 'right_cam':\n bottomRight[0] += min(bottomRight[0]-upLeft[0], imgwidth-bottomRight[0])\n elif extra_zoom == 'left_cam':\n upLeft[0] -= min(upLeft[0], bottomRight[0]-upLeft[0])\n \n crops.append(cropBox(tmp_img, upLeft, bottomRight, target_height, target_width)[None,...])\n bboxes_zoomed.append(torch.cat((upLeft, bottomRight))[None,...])\n \n crops = torch.cat(crops, dim=0)\n bboxes_zoomed = torch.cat(bboxes_zoomed)\n \n return crops, bboxes_zoomed", "def center_crop(img, output_size, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n if isinstance(output_size, numbers.Number):\n output_size = (int(output_size), int(output_size))\n\n image_width, image_height = _get_image_size(img, data_format)\n crop_height, crop_width = output_size\n crop_top = int(round((image_height - crop_height) / 2.0))\n crop_left = int(round((image_width - crop_width) / 2.0))\n return crop(\n img,\n crop_top,\n crop_left,\n crop_height,\n crop_width,\n data_format=data_format,\n )", "def pre_process_data(input_path: list, cuts: int, shape: int = 32, normalize: bool = True) -> list:\n images = []\n images_uncut = []\n for files_path in input_path:\n\n files = os.listdir(files_path) # TODO paths\n for f in files:\n file_path = f'{files_path}/{f}'\n im_uncut = cv2.imread(file_path)\n im_uncut = cv2.cvtColor(im_uncut, cv2.COLOR_RGB2GRAY)\n images_uncut.append(cv2.resize(im_uncut, (shape * cuts, shape * cuts)))\n x = np.array(images_uncut)\n\n if normalize:\n x_mean = np.mean(x, axis=(0, 1, 2))\n x_std = np.std(x, axis=(0, 1, 2))\n x = (x - x_mean) / (x_std + 1e-9)\n\n for im in x:\n height = im.shape[0]\n width = im.shape[1]\n frac_h = height // cuts\n frac_w = width // cuts\n i = 0\n image = []\n for h in range(cuts):\n for w in range(cuts):\n crop = im[h * frac_h:(h + 1) * frac_h, w * frac_w:(w + 1) * frac_w]\n crop_rehaped = cv2.resize(crop, (shape, shape))\n image.append([crop_rehaped, i, number_to_angle(i, cuts), neighbours(i, cuts)])\n i = i + 1\n images.append(image)\n # return np.array(images) # todo back to array\n return images", "def _central_crop(image, crop_size):\r\n shape = tf.shape(input=image)\r\n height, width = shape[0], shape[1]\r\n\r\n amount_to_be_cropped_h = (height - crop_size[0])\r\n crop_top = amount_to_be_cropped_h // 2\r\n amount_to_be_cropped_w = (width - crop_size[1])\r\n crop_left = amount_to_be_cropped_w // 2\r\n return tf.slice(\r\n image, [crop_top, crop_left, 0], [crop_size[0], crop_size[1], -1])", "def center_crop_to_smallest(x, y):\n smallest_width = min(x.shape[-1], y.shape[-1])\n smallest_height = min(x.shape[-2], y.shape[-2])\n x = center_crop(x, (smallest_height, smallest_width))\n y = center_crop(y, (smallest_height, smallest_width))\n return x, y", "def crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float, float]], center_crop: bool = True):\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n\n w, h, l = data.shape\n\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx),\n int(h / 2 * icropy):int(-h / 2 * icropy),\n int(l / 2 * icropz):int(-l / 2 * icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l * (1 - icropz))]\n\n return view", "def crop_and_resize(image, boxes, size):\n box_ind = keras.backend.zeros_like(boxes, tensorflow.int32)\n box_ind = box_ind[..., 0]\n box_ind = keras.backend.reshape(box_ind, [-1])\n\n boxes = keras.backend.reshape(boxes, [-1, 4])\n\n return tensorflow.image.crop_and_resize(image, boxes, box_ind, size)" ]
[ "0.68269926", "0.65620327", "0.648815", "0.6427035", "0.6379702", "0.63226426", "0.62055635", "0.6204814", "0.6204427", "0.6082633", "0.604973", "0.60170174", "0.59973186", "0.5994196", "0.595956", "0.5947782", "0.59313524", "0.5928219", "0.59176385", "0.58877224", "0.58855003", "0.5849222", "0.5798105", "0.5782022", "0.5777495", "0.57657385", "0.57480437", "0.57449156", "0.57393473", "0.57242936", "0.57241464", "0.5715269", "0.5715243", "0.57149017", "0.5711867", "0.5711182", "0.57083285", "0.57048804", "0.5687255", "0.5663963", "0.56559885", "0.56527996", "0.56472534", "0.5643198", "0.56310713", "0.56310713", "0.56300426", "0.5609485", "0.5586932", "0.5585017", "0.558475", "0.55734587", "0.5568193", "0.5566251", "0.5559263", "0.55566114", "0.5556485", "0.5547433", "0.55431145", "0.5530204", "0.5527507", "0.5527492", "0.5525673", "0.55159163", "0.5513559", "0.55031043", "0.54947037", "0.5492765", "0.5491257", "0.5491257", "0.5487603", "0.54874504", "0.54836226", "0.5482619", "0.5480442", "0.54730916", "0.54682326", "0.54586756", "0.5457995", "0.5455843", "0.5455843", "0.5455843", "0.5452818", "0.54514503", "0.54495525", "0.5441773", "0.5440836", "0.54353595", "0.5434102", "0.5432285", "0.5430459", "0.5428591", "0.5424423", "0.5423377", "0.5418601", "0.54169214", "0.54136336", "0.54094684", "0.5405171", "0.5400667" ]
0.5979108
14
Helper to hold params and allow func like the original.
def __init__(self, graph, weights, input_tensor_name=None, output_tensor_name=None): self.sess = tf.Session() new_saver = tf.train.import_meta_graph(graph) new_saver.restore(self.sess, weights) get_tensor = tf.get_default_graph().get_tensor_by_name # Get the initial place holder, else default if input_tensor_name: self.placeholder = get_tensor(input_tensor_name) else: self.placeholder = get_tensor('Placeholder:0') if output_tensor_name: self.softmax = get_tensor(output_tensor_name) else: self.softmax = get_tensor('Softmax:0') # Save trainables into params trainable_params = tf.trainable_variables() layers = {} params = {} def add_to_layer(name): try: layers[name] = get_tensor("{}:0".format(name)) except KeyError: try: layers[name] = get_tensor("{}/Relu:0".format(name)) except KeyError: print("Activation Not Found.") pass for v in trainable_params: if 'weight' in v.name: name = v.name.split('/')[0] params[name] = v add_to_layer(name) # Pooling layers usually don't have a nice way of gathering. for n in tf.get_default_graph().as_graph_def().node: if 'pool' in n.name: v = get_tensor("{}:0".format(n.name)) name = n.name.split('/')[0] params[name] = v add_to_layer(name) # Get trainable params - 1 holds locations the other is a dummy script self.params = {} self._params = params self.layers = layers # Save empty dict into blobs self.blobs = {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fn(*args, **kwargs):\n pass", "def params(funcarglist):\n def wrapper(function):\n function.funcarglist = funcarglist\n return function\n return wrapper", "def my_func(a, b):", "def wrapper(*args):", "def dummy_fn(self, *args, **kwargs):", "def test_param_of_func(self):\n source = \"\"\"\n def foo(x, y):\n return x + y\n \"\"\"\n target = \"\"\"\n def foo(x_new, y_new):\n return x + y\n \"\"\"\n self._check_compatibility(source, target)", "def _wrapper(func, args):\n return func(*args)", "def _helper_parameters(func, args=(), kwargs=None, onlykeys=False, onlyused=False):\n if kwargs is None:\n kwargs = {}\n # params = list(inspect.signature(self.__init__).parameters.keys())\n params = inspect.getargspec(func).args[1:] # TODO replace deprecated getargspec to work with py2 and py3, perhaps by getfullargspec\n\n if onlykeys and not onlyused: # only add to keywords\n covered = 0 # simulate no args\n else:\n covered = len(args)\n\n if onlyused and onlykeys: # only add modified by user\n adds = [(True if i < covered or key in kwargs else False) for i, key in\n enumerate(params)]\n # add keys from args\n for i, val in enumerate(args):\n kwargs[params[i]] = val\n elif onlyused:\n adds = [(True if i >= covered and key in kwargs else False) for i, key\n in\n enumerate(params)]\n else:\n adds = [(True if i >= covered else False) for i, key in\n enumerate(params)]\n return adds, params, kwargs", "def test_named_params(self):\n varargs = ()\n kwargs = {'arg1' : \"arg1_val\", 'default' : \"default_val\"}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assertEquals(kwargs, var_dict)", "def check_params(params: dict) -> Callable:\n\n def _decorator(test_case: Callable) -> Callable:\n @wraps(test_case)\n def _wrapper(self):\n for param_name in params:\n for param_value in params[param_name]:\n test_case(self, param_name, param_value)\n\n return _wrapper\n\n return _decorator", "def foo(x, y):", "def foo(x, y):", "def foo(x, y):", "def make_safe_f(f, allowed_params):\n def inner(*args, **kwargs):\n if kwargs:\n new_kwargs = {}\n for k, v in kwargs.items():\n if k in allowed_params:\n new_kwargs[k] = v\n return f(*args, **new_kwargs)\n return f(*args, **kwargs)\n return inner", "def func():", "def my_func(self, a, b, c, d=None):\n return True", "def set_params(self, *arg):\n pass", "def function(args):\n pass", "def make_safe_f(f, allowed_params):\r\n def inner(*args, **kwargs):\r\n if kwargs:\r\n new_kwargs = {}\r\n for k, v in kwargs.items():\r\n if k in allowed_params:\r\n new_kwargs[k] = v\r\n return f(*args, **new_kwargs)\r\n return f(*args, **kwargs)\r\n return inner", "def test_onearg_and_keyword(self):\n varargs = (12,)\n kwargs = {'default' : 13}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 13)\n self.assert_(len(var_dict) == 2)", "def f(a, b):", "def params(cls):\n def method_decorator(method):\n @wraps(method)\n def wrapper(self, *args):\n return method(self, *map(cls, args))\n return wrapper\n return method_decorator", "def dummy_func(*args, **kwargs):\r\n pass", "def params(self):\n pass", "def set_params(self, params):", "def apply(self, func):\r\n return func(**self.kwargs)", "def __call__(self, param, xyz=False):\n pass", "def test_no_mutually_exclusive_args_provided(self):\n _func = mutually_exclusive_parameters('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func(), 'foo')\n self.assertEqual(_func(arg3='hello'), 'foo')", "def varfunc(self, fields=[]):\n self.func_arguments = fields", "def check_params(self):\n raise NotImplementedError", "def set_func_args(self, *args, **kwargs):\n self._func_args = args \n self._func_kw_args = kwargs", "def params():\n raise NotImplementedError", "def _check_params(self):\n pass", "def _setup_params(self,**params):\n ### a parameter might be passed in for one of the extra_pos;\n ### if a key in the params dict is not a *parameter* of this\n ### PO, then try it on the extra_pos\n for n,p in params.items():\n if n not in self.params():\n self.set_parameter_value(n,p)\n del params[n]\n\n Parameterized._setup_params(self,**params)", "def formParameters(unaryFunctions, binaryFunctions):", "def apply(self, func, *args, **kwargs):\n pass", "def test_one_mutually_exclusive_arg_provided(self):\n _func = mutually_exclusive_parameters('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func('hello'), 'foo')\n self.assertEqual(_func(arg1='hello'), 'foo')\n self.assertEqual(_func(arg2='hello'), 'foo')", "def test_onearg(self):\n varargs = (12,)\n kwargs = {}\n method = getattr(self.foo,'f_onearg')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(len(var_dict) == 1)", "def func(arg1, arg2):\n\n return arg", "def __call__(self, *ar, **kw):\n\t\tkw = {**self.default_kw, **kw} # add any default keywords\n\t\tkw = {k:v for k,v in kw.items() if self.is_kwarg_valid(k)} # remove non valid keywords (keywords that are not in base func)\n\n\t\t# selectively get the kwargs according to the user\n\t\tif self.ignore_kw == \"ALL\":\n\t\t\tkw = {}\n\t\telif type(self.ignore_kw) == list:\n\t\t\tkw = {k:v for k,v in kw.items() if not k in self.ignore_kw}\n\t\telse:\n\t\t\traise Exception(\"self.ignore_kw must be list or ALL, but is:\", self.ignore_kw)\n\t\t\n\n\t\tassert self.check(ar, is_check_verbose=True), \"Checks have failed on given parameters %s for %s\"%(ar, self.__class__.__name__)\n\t\treturn self.base_func(*self.additional_check(ar), **kw)", "def apply(self, func, *args):\n pass", "def test_single_keyword_arg_provided(self):\n _func = required_parameters('arg1')(undecorated_func)\n self.assertEqual(_func(arg1='hello'), 'foo')", "def test_onearg_and_default(self):\n varargs = (12,)\n kwargs = {}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 1)\n self.assert_(len(var_dict) == 2)\n var_dict = reassign_function_arguments(method, (12, 13), kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 13)\n self.assert_(len(var_dict) == 2)", "def fn():", "def get_parameters(func, args=(), kwargs=None, onlykeys=False, onlyused=False,\n default=None):\n # check what parameters to add\n adds, params, kwargs = _helper_parameters(func=func, args=args, kwargs=kwargs,\n onlykeys=onlykeys, onlyused=onlyused)\n for add, key in zip(adds, params):\n if add and key not in kwargs:\n kwargs[key] = default\n\n if onlykeys:\n return kwargs\n return args, kwargs", "def fun_par_dict(fun: Callable, *args):\n if len(args) > 0:\n return fun(*args[:-1], **args[-1])\n else:\n return fun()", "def test_single_positional_arg_provided(self):\n _func = required_parameters('arg1')(undecorated_func)\n self.assertEqual(_func('hello'), 'foo')", "def dparam_partial(inst_func, *args):\r\n def param_func(param, inst_func, args):\r\n inst_func.im_self._set_params(param)\r\n return inst_func(*args)\r\n return functools.partial(param_func, inst_func=inst_func, args=args)", "def funcname(params):\n # function body\n pass", "def test_make_safe_f(self):\r\n def f(x, y):\r\n return x * y\r\n self.assertEqual(f(3, 4), 12)\r\n g = make_safe_f(f, ['x', 'y'])\r\n self.assertEqual(g(3, 4), 12)\r\n self.assertEqual(g(x=3, y=4, z=10, xxx=11), 12)", "def test_noarg(self):\n varargs = ()\n kwargs = {}\n method = getattr(self.foo,'f_noarg')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict == {})", "def f(self,x,*params):\n raise NotImplementedError", "def prepost_hook_too_many_parameters(self, param) -> None:\n pass", "def __init__(**params):", "def example_function(a, b, c=\"c\"):\n print(a)\n print(b)\n print(c)", "def test_method_with_direct_params_order(self, session_scope_param, class_scope_param, function_scope_param):\n assert session_scope_param or class_scope_param or function_scope_param", "def ok_func(a=\"a\", b=\"b\"):\n return a + b", "def _func2_undecorated(arg1=None, arg2=None, arg3=None):\n pass", "def call_orig_func(func, *args, **kwargs):\n return func(*args, **kwargs)", "def Params(req, cmd=None):\n\tif req == 'POST':\n\t\treturn putFunc(\"Params\", cmd)\n\tif req == 'GET':\n\t\treturn getFunc(req, \"Params\")", "def preprocess_func(cls, func):\n pass", "def doubler_correct(f):\n def g(*args, **kwargs):\n \"\"\"whatever arguments g is supplied, pass them through to f\"\"\"\n print \"*args :\" \n print args\n print \"**kwargs :\" \n print kwargs\n return 2 * f(*args, **kwargs)\n\n return g", "def define_parameters(self):", "def f_noarg(self) :\n pass", "def _create_param_dict(self, func_args):\n for i, a in enumerate(func_args):\n self.fn.args[i].name = str(a)\n self.param_dict[a] = self.fn.args[i]", "def test_validate_params(mocker, params):\n validate_params(**params)", "def func(input, keyword=None):\r\n pass", "def wrapper(*args, **kwargs):\n return func(*args, **kwargs)", "def wrapper(*args, **kwargs):\n return func(*args, **kwargs)", "def initialize_params(self, params):\n pass", "def dummy(*args, **kwargs):\r\n pass", "def my_func(a,b,c):\n \n return a + b + c", "def _arg_swapper(op):\n\n def op_swapped(a, b, *args, **kwargs):\n return op(b, a, *args, **kwargs)\n\n return op_swapped", "def test_require_at_least_one_and_several_provided(self):\n _func = at_least_one_of('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func('ahoy', 'there'), 'foo')\n self.assertEqual(_func(arg1='ahoy', arg2='there'), 'foo')\n self.assertEqual(_func('ahoy', arg2='there', arg3='matey'), 'foo')", "def inject_and_run(self,func,**kw):\r\n args=inspect.getargspec(func).args\r\n if 'request' in args:\r\n kw['request']=self.request\r\n if 'user' in args:\r\n user=self._get_user()\r\n if user:\r\n kw['user']=user\r\n else:\r\n raise UserWarning,'function need login ,but you are not login'\r\n \r\n return func(**kw)", "def set_params(self, **kwargs):\n ...", "def test_wrapper_with_params():\n my_method = SGMethod(\"test\")\n par = my_method.create_parameter(\"par1\")\n other_method = SGMethod(\"other\")\n par1 = other_method.create_parameter(\"par1\")\n \n my_method.calls(other_method)\n my_method.check_call_validity();\n \n assert other_method == my_method.method_called\n assert len(my_method.args) == 1\n assert par == my_method.args[0]", "def _eval_params(trial, params: Dict[str, Any]) -> Dict[str, Any]:\n prepared = dict()\n for arg, value in params.items():\n if isinstance(value, dict):\n # Extract method.\n name = list(value.keys())[0]\n # Add prefix.\n method = \"suggest_\" + name\n # Get method kwargs.\n kwargs = value[name]\n # Add name arg.\n kwargs.update({\"name\": arg})\n # Evaluate method.\n value = getattr(trial, method)(**kwargs)\n prepared.update({arg: value})\n return prepared", "def edit_parameters(parameterized,with_apply=True,**params):\n if not with_apply:\n pf_class = ParametersFrame\n else:\n pf_class = ParametersFrameWithApply\n\n return pf_class(T.Toplevel(),parameterized,**params)", "def wrap_gate(fn):\n return lambda parms: fn(**parms) if len(parms) > 0 else fn", "def apply(self, *args: _Data) -> _Data:", "def arg_use():\n def args_use(*args):\n # The different of args and *args.\n print(\"*args value is {}\".format(*args)) \n print(\"args value is {}\".format(args))\n for arg in args:\n print(arg)\n def kwargs_use(*args, **kwargs):\n # *kwargs get kwargs key(s)\n print(\"*kwargs value is %s\", *kwargs)\n print(\"kwargs value is %s, type is %s\" %(kwargs, type(kwargs)))\n for kwarg in kwargs:\n print(kwarg)\n arg_str = \"abc\"\n arg_list = [1, 2, 3]\n arg_dict = {'name': \"Cai\", 'age': 24}\n args_use(arg_str, arg_list)\n kwargs_use(arg_str, arg_dict, user='CAI', id=23)\n kwargs_use(arg_str, **{'name': \"Cai\", 'age': 24}, user='CAI', id=23)", "def test_parameterless_calls(self):\n for attr in dir(api):\n func = getattr(api, attr)\n if callable(func): \n spec = inspect.getargspec(func)\n if not spec.args and not spec.varargs and not spec.keywords and not spec.defaults:\n func()", "def get_arguments(self, args=(), kwargs=None, onlykeys=False, onlyused=False,\n func=None):\n if func is None:\n func = self.__init__\n\n # check what parameters to add\n adds, params, kwargs = _helper_parameters(func=func, args=args, kwargs=kwargs,\n onlykeys=onlykeys, onlyused=onlyused)\n\n _map_parameters = getattr(self, \"_map_parameters\", None)\n for add, key in zip(adds, params):\n if add and key not in kwargs:\n try:\n if _map_parameters is not None and key in _map_parameters:\n mapped_key = _map_parameters[key]\n # if mapped_key is None then it means variable is not\n # assigned in the __init__ of the instance so ignore it\n if mapped_key is not None:\n kwargs[key] = getattr(self, mapped_key)\n else:\n kwargs[key] = getattr(self, key)\n except AttributeError:\n e, msg, traceback = sys.exc_info()\n msg.args = (\n msg.args[0] + \". Review @copy_support decorator or \"\n \"BaseCopySupporter class for more info.\",)\n raise_(e, msg, traceback)\n\n if onlykeys:\n return kwargs\n return args, kwargs", "def func(*args, **kwargs): # pragma: no cover\n raise NotImplementedError(\"{name} not ported from upstream\"\n .format(name=name))", "def param(*args, **kwargs):\n p = Param(*args, **kwargs)\n\n def decorator(func):\n func.param = p\n return func\n\n return decorator", "def cast_arguments(cast_dict):\n\n def decorator(func):\n def wrapper(request):\n request_params = get_dict_from_request(request)\n request_params = request_params.copy()\n for param in cast_dict:\n if param not in request_params:\n continue\n try:\n request_params[param] = cast_dict[param](\n request_params[param])\n except (ValueError, TypeError) as e:\n return APIInvalidArgumentResponse(error_msg=str(e))\n setattr(request, request.method, request_params)\n return func(request)\n\n return wrapper\n\n return decorator", "def test_require_at_least_one_and_one_provided(self):\n _func = at_least_one_of('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func('ahoy'), 'foo')\n self.assertEqual(_func(arg2='ahoy'), 'foo')", "def foo_do(a):\n print \"doing foo with arg\", a", "def test_twoargs(self):\n varargs = (12, 13)\n kwargs = {}\n method = getattr(self.foo,'f_twoargs')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['arg2'] == 13)\n self.assert_(len(var_dict) == 2)", "def test_keyword(self):\n varargs = ()\n kwargs = {'default' : 12}\n method = getattr(self.foo,'f_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['default'] == 12)\n self.assert_(len(var_dict) == 1)", "def add_to_apply_calls(self, func, *args, **kwargs):\n pass", "def given_func(arg_1, arg_2, arg_3, arg_4):\n return arg_1 + arg_2 + arg_3 + arg_4", "def func(a,b):\n a=b\n return a", "def place_holder(*args):\n return args", "def conf_passer(func):\n def inner(self, *a, **kw):\n return func(self.conf, *a, **kw)\n return inner", "def __init__(self, func, *args, **kwargs):\n self._func = func\n self._args = args\n self._kwargs = kwargs\n self._fully_bound = None", "def set_params(self, **params):\n return super().set_params(**params)", "def floatArguments(func):\n\n def inner_func(*args):\n args = map(float, args)\n return func(*args)\n\n return inner_func", "def func1 (arg1, arg2):\n arg1 = arg2\n return arg1", "def _validate_params(self):\n raise NotImplementedError('Must be implemented in subclasses.')" ]
[ "0.692556", "0.6727447", "0.67098534", "0.658326", "0.6408254", "0.63891876", "0.6376225", "0.6366123", "0.62743974", "0.62322086", "0.6189551", "0.6189551", "0.6189551", "0.6188234", "0.6184675", "0.6144916", "0.61128443", "0.6085491", "0.6061988", "0.6006739", "0.5996577", "0.59640527", "0.5959001", "0.59584266", "0.59564143", "0.59174436", "0.5889633", "0.58750486", "0.5865363", "0.585904", "0.5857036", "0.58495766", "0.58477974", "0.58405495", "0.5833955", "0.58210874", "0.57943285", "0.5791914", "0.5783111", "0.578244", "0.5779651", "0.5774888", "0.5770835", "0.5767492", "0.5745881", "0.57430065", "0.57367545", "0.5727332", "0.5717485", "0.5712134", "0.57118523", "0.5710935", "0.5710388", "0.5693058", "0.569164", "0.56716865", "0.56672525", "0.5663448", "0.56570476", "0.56512386", "0.56491", "0.5648398", "0.56443745", "0.5633225", "0.5623374", "0.5622718", "0.5618723", "0.56167644", "0.56167644", "0.5616359", "0.5601466", "0.5599654", "0.559926", "0.55976063", "0.5597488", "0.5594379", "0.5590402", "0.55852264", "0.5581776", "0.5578978", "0.55682516", "0.5560512", "0.55547905", "0.55277926", "0.5526704", "0.55185956", "0.55184346", "0.5515752", "0.55153704", "0.55108017", "0.5502293", "0.55005664", "0.54948545", "0.5493936", "0.5493886", "0.54902214", "0.548707", "0.54725707", "0.54639053", "0.5452555", "0.5452532" ]
0.0
-1
FUNKTION VON EILEEN Loads a data file saved by relacs. Returns a tuple of dictionaries containing the data and the header information
def load(filename): with open(filename, 'r') as fid: L = [l.lstrip().rstrip() for l in fid.readlines()] ret = [] dat = {} X = [] keyon = False currkey = None for l in L: # if empty line and we have data recorded if (not l or l.startswith('#')) and len(X) > 0: keyon = False currkey = None dat['data'] = np.array(X) ret.append(dat) X = [] dat = {} if '---' in l: continue if l.startswith('#'): if ":" in l: tmp = [e.rstrip().lstrip() for e in l[1:].split(':')] if currkey is None: dat[tmp[0]] = tmp[1] else: dat[currkey][tmp[0]] = tmp[1] elif "=" in l: tmp = [e.rstrip().lstrip() for e in l[1:].split('=')] if currkey is None: dat[tmp[0]] = tmp[1] else: dat[currkey][tmp[0]] = tmp[1] elif l[1:].lower().startswith('key'): dat['key'] = [] keyon = True elif keyon: dat['key'].append(tuple([e.lstrip().rstrip() for e in l[1:].split()])) else: currkey = l[1:].rstrip().lstrip() dat[currkey] = {} elif l: # if l != '' keyon = False currkey = None X.append( [float(e) for e in l.split()]) if len(X) > 0: dat['data'] = np.array(X) else: dat['data'] = [] ret.append(dat) return tuple(ret)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self) -> None:", "def load_data(self):", "def load_data():\n\n server_node = load_nodes(SERVER_NODE_INFILE)\n road_node = load_nodes(ROAD_NODE_INFILE)\n road_segment_point = load_nodes(ROAD_SEGMENT_POINT_INFILE)\n\n return server_node, road_node, road_segment_point", "def getHeaderDict(self):\r\n #put the headers into a dict\r\n \r\n print(\"opening \",self.filename)\r\n with open(self.filename, 'r') as readfile:\r\n headers = readfile.readline()\r\n firstrow = readfile.readline()\r\n if not firstrow:\r\n print(\"first line after headers is blank\")\r\n self.loadDictRow(keystring=headers)\r\n else: #assume first row after headers is test router\r\n print(\"load test router row\") \r\n self.loadDictRow(keystring = headers, valuestring = firstrow) \r\n \r\n # check for headers\r\n miscount=0\r\n for key in self.dataheader:\r\n if not key in self.objdict:\r\n print(\"missing key !\", key)\r\n miscount += 1\r\n\r\n if miscount == 0:\r\n print(\"all Columns found. Thank you.\")\r\n # elif (miscount == 11) and (\"IPADDRESS\" in ):\r\n # print(\"Found IP Address column. program will add additional columns\")\r\n elif miscount > 11:\r\n print(\"Could not locate Header Row\")\r\n elif miscount > 0:\r\n print(\"some columns missing, will add additional columns\")\r\n \r\n \r\n #end file check on filename \r", "def load_data(filename):\n hkas = HKArchiveScanner()\n hkas.process_file(filename)\n cat = hkas.finalize()\n fields, timelines = cat.get_data(['position'], short_match=True)\n return fields, timelines", "def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def read_data(filename):\n # Store debug mode\n debug = params.debug\n params.debug = None\n\n # Initialize dictionary\n header_dict = {}\n\n headername = filename + \".hdr\"\n\n with open(headername, \"r\") as f:\n # Replace characters for easier parsing\n hdata = f.read()\n hdata = hdata.replace(\",\\n\", \",\")\n hdata = hdata.replace(\"\\n,\", \",\")\n hdata = hdata.replace(\"{\\n\", \"{\")\n hdata = hdata.replace(\"\\n}\", \"}\")\n hdata = hdata.replace(\" \\n \", \"\")\n hdata = hdata.replace(\";\", \"\")\n hdata = hdata.split(\"\\n\")\n\n # Loop through and create a dictionary from the header file\n for i, string in enumerate(hdata):\n if ' = ' in string:\n header_data = string.split(\" = \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n elif ' : ' in string:\n header_data = string.split(\" : \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n\n # Reformat wavelengths\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"{\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"}\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\" \", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].split(\",\")\n\n # Create dictionary of wavelengths\n wavelength_dict = {}\n for j, wavelength in enumerate(header_dict[\"wavelength\"]):\n wavelength_dict.update({float(wavelength): float(j)})\n\n # Replace datatype ID number with the numpy datatype\n dtype_dict = {\"1\": np.uint8, \"2\": np.int16, \"3\": np.int32, \"4\": np.float32, \"5\": np.float64, \"6\": np.complex64,\n \"9\": np.complex128, \"12\": np.uint16, \"13\": np.uint32, \"14\": np.uint64, \"15\": np.uint64}\n header_dict[\"data type\"] = dtype_dict[header_dict[\"data type\"]]\n\n # Read in the data from the file\n raw_data = np.fromfile(filename, header_dict[\"data type\"], -1)\n\n # Reshape the raw data into a datacube array\n array_data = raw_data.reshape(int(header_dict[\"lines\"]),\n int(header_dict[\"bands\"]),\n int(header_dict[\"samples\"])).transpose((0, 2, 1))\n\n if \"default bands\" in header_dict:\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"{\", \"\")\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"}\", \"\")\n default_bands = header_dict[\"default bands\"].split(\",\")\n\n pseudo_rgb = cv2.merge((array_data[:, :, int(default_bands[0])],\n array_data[:, :, int(default_bands[1])],\n array_data[:, :, int(default_bands[2])]))\n\n else:\n max_wavelength = max([float(i) for i in wavelength_dict.keys()])\n min_wavelength = min([float(i) for i in wavelength_dict.keys()])\n # Check range of available wavelength\n if max_wavelength >= 635 and min_wavelength <= 490:\n id_red = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 710)\n id_green = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 540)\n id_blue = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 480)\n\n pseudo_rgb = cv2.merge((array_data[:, :, [id_blue]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n else:\n # Otherwise take 3 wavelengths, first, middle and last available wavelength\n id_red = int(header_dict[\"bands\"]) - 1\n id_green = int(id_red / 2)\n pseudo_rgb = cv2.merge((array_data[:, :, [0]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n\n # Gamma correct pseudo_rgb image\n pseudo_rgb = pseudo_rgb ** (1 / 2.2)\n # Scale each of the channels up to 255\n pseudo_rgb = cv2.merge((rescale(pseudo_rgb[:, :, 0]),\n rescale(pseudo_rgb[:, :, 1]),\n rescale(pseudo_rgb[:, :, 2])))\n\n max_wl = float(str(header_dict[\"wavelength\"][-1]).rstrip())\n min_wl = float(str(header_dict[\"wavelength\"][0]).rstrip())\n\n # Create an instance of the spectral_data class\n spectral_array = Spectral_data(array_data=array_data, max_wavelength=max_wl,\n min_wavelength=min_wl, d_type=header_dict[\"data type\"],\n wavelength_dict=wavelength_dict, samples=int(header_dict[\"samples\"]),\n lines=int(header_dict[\"lines\"]), interleave=header_dict[\"interleave\"],\n wavelength_units=header_dict[\"wavelength units\"], array_type=\"datacube\",\n pseudo_rgb=pseudo_rgb, filename=filename)\n\n # Reset debug mode\n params.debug = debug\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(pseudo_rgb)\n elif params.debug == \"print\":\n print_image(pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_pseudo_rgb.png\"))\n\n return spectral_array", "def load(self):\n canSave = self.canSave\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpack('4s3i',16,'REC_HEAD')\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #--LEVC?\n if name == 'LEVC':\n levc = Levc(name,size,delFlag,recFlag,ins,True)\n self.levcs[levc.id] = levc\n if canSave: self.records.append(levc)\n #print ' Added:',levc.id\n elif name == 'LEVI':\n levi = Levi(name,size,delFlag,recFlag,ins,True)\n self.levis[levi.id] = levi\n if canSave: self.records.append(levi)\n #print ' Added:',levi.id\n #--Other\n elif canSave:\n record = Record(name,size,delFlag,recFlag,ins)\n self.records.append(record)\n else:\n ins.seek(size,1,'Record')\n #--Done Reading\n ins.close()", "def load_data(filename) :\r\n data = Data()\r\n data.load(filename)\r\n return data", "def read_file(filename):\n # Read in as nested dictionary\n # hipparcos_data = {'(star catalog number':\n # { 'parallax' : ... , 'apparent_magnitude' : ... , 'blue_minus_visual' : ... },\n # ... }\n\n return hipparcos_data", "def load(self):\n #print self.fileInfo.name\n progress = self.progress\n filePath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n self.fileSize = os.path.getsize(filePath)\n #--Localize\n cells = self.cells\n records = self.records\n canSave = self.canSave\n skipObjRecords = self.skipObjRecords\n contTypes = set(['CREC','CNTC','NPCC'])\n levTypes = set(('LEVC','LEVI'))\n debrisIds = self.debrisIds\n debrisTypes = set(debrisIds.keys())\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n if not canSave: del self.tes3.others[:]\n #--Progress info\n progress = self.progress\n progress(0.0,'Loading '+self.fileInfo.name)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #print \"%s [%d]\" % (name,size)\n #--CELL?\n if name == 'CELL':\n record = Cell(name,size,delFlag,recFlag,ins,0,skipObjRecords)\n cells.append(record)\n if canSave: records.append(record)\n #--Contents\n elif canSave and name in contTypes:\n if name == 'CREC':\n record = Crec(name,size,delFlag,recFlag,ins,True)\n elif name == 'CNTC':\n record = Cntc(name,size,delFlag,recFlag,ins,True)\n else:\n record = Npcc(name,size,delFlag,recFlag,ins,True)\n self.conts.append(record)\n self.conts_id[record.getId()] = record\n records.append(record)\n #--File Map\n elif name == 'FMAP':\n record = Fmap(name,size,delFlag,recFlag,ins)\n self.fmap = record\n records.append(record)\n #--Landscapes\n elif name == 'LAND':\n record = Land(name,size,delFlag,recFlag,ins)\n self.lands[record.getId()] = record\n records.append(record)\n #--Scripts\n elif canSave and name == 'SCPT':\n record = Scpt(name,size,delFlag,recFlag,ins,True)\n records.append(record)\n if record.getRef():\n self.refs_scpt[record] = record.getRef()\n #--Save debris info?\n elif name in debrisTypes:\n record = Record(name,size,delFlag,recFlag,ins)\n id = record.getId()\n if id:\n debrisIds[name].append(id.lower())\n if canSave:\n records.append(record)\n #--Skip Non-cell?\n elif not canSave:\n ins.seek(size,1,name)\n #--Keep non-cell?\n else:\n records.append(Record(name,size,delFlag,recFlag,ins))\n #--Done Reading\n ins.close()\n #--Analyze Cells\n cntCells = 0\n progress.setMax(len(self.cells))\n for cell in self.cells:\n cell.load(None,1)\n self.cells_id[cell.getId()] = cell\n if not canSave:\n cell.data = None #--Free some memory\n #--Progress\n cntCells += 1\n progress(cntCells)\n #--Scripts\n if self.refs_scpt:\n self.updateScptRefs()", "def _read_file_definition(self):\n row_count = 0\n #\n # THIS METHOD ASSUMES A 14 ROW HEADER\n # If the number of header row lines in the glider ASCII input file changes from 14,\n # this method will NOT WORK\n num_hdr_lines = 14\n\n header_pattern = r'(.*): (.*)$'\n header_re = re.compile(header_pattern)\n\n line = self._stream_handle.readline()\n\n while line and row_count < num_hdr_lines:\n\n match = header_re.match(line)\n\n if match:\n key = match.group(1)\n value = match.group(2)\n value = value.strip()\n\n # update num_hdr_lines based on the header info.\n if key == 'num_ascii_tags':\n # this key has a required value of 14, otherwise we don't know how to parse the file\n if int(value) != num_hdr_lines:\n raise DatasetParserException(\"Header must be %d rows, but it is %s\" % (num_hdr_lines, value))\n\n elif key == 'num_label_lines':\n # this key has a required value of 3, otherwise we don't know how to parse the file\n if int(value) != 3:\n raise DatasetParserException(\"There must be 3 Label lines from the header for this parser\")\n\n elif key == 'sensors_per_cycle':\n # save for future use\n self._header_dict[key] = int(value)\n\n elif key in ['filename_label', 'mission_name', 'fileopen_time']:\n # create a dictionary of these 3 key/value pairs strings from\n # the header rows that need to be saved for future use\n self._header_dict[key] = value\n\n else:\n log.warn(\"Failed to parse header row: %s.\", line)\n\n row_count += 1\n # only read the header lines in this method so make sure we stop\n if row_count < num_hdr_lines:\n line = self._stream_handle.readline()\n\n if row_count < num_hdr_lines:\n log.error('Not enough data lines for a full header')\n raise DatasetParserException('Not enough data lines for a full header')", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def loadData():\n project_dir = \"/home/c/chandanchowdhury/Documents/CIS-833/CSSearch/indexer/\"\n\n index_file = \"index_file.pkl\"\n link_file = \"link_file.pkl\"\n\n index_data = loadPickle(project_dir+index_file)\n link_data = loadPickle(project_dir+link_file)\n\n return index_data, link_data", "def load_data(self):\n logging.debug('Loading data from file ({})...'.format(self.file_name))\n parsed_data = list()\n with open(self.file_name) as file_data:\n for line in file_data.readlines():\n temp = dict()\n if 'JD' in line:\n continue\n line = line.split()\n temp['ts'], temp['mag'], temp['dif'] = float(line[0][:14]), float(line[1]), float(line[2])\n temp['f_mag'] = self.kalman_filter(temp['mag'])\n temp['dt'] = self.jd_to_datetime(temp['ts'])\n temp['dt_cor'] = self.jd_to_datetime(temp['ts'] - TIME_CRT)\n parsed_data.append(temp)\n logging.debug(' {} records loaded.'.format(len(parsed_data)))\n logging.debug(parsed_data[0])\n self.data_stream = parsed_data", "def get_file_data(filename):", "def read_data(self):\n if not self.header['data included']:\n pass\n elif self.header['file type'] in (21, 26):\n self._isotope_data()\n if os.path.exists(self.filename + '_txt'):\n self._isotope_txt_data()\n elif self.header['file type'] == 22:\n # line scan types, no ImageHeader\n warnings.warn('No data read for line scan, fix')\n pass\n elif self.header['file type'] in (31, 35):\n self._beamstability_data()\n else:\n self._image_data()", "def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}", "def readFrom(self,fn):\n hdrs = {}\n try:\n f = open(fn+\".headers\",\"tr\")\n for l in f:\n if l[-1:]==\"\\n\":\n l = l[:-1]\n i = l.find(\": \")\n if -1!=i:\n hdrs[l[:i]] = l[i+2:]\n f.close()\n except (Exception,Error) as err:\n log(\"readFrom: header: error: \"+str(err))\n try:\n f2 = open(fn,\"br\")\n data = f2.read()\n f2.close()\n except (Exception,Error) as err:\n log(\"readFrom: body: error: \"+str(err))\n return (hdrs,data)", "def _load(self):\n # Extract the ASCII header (5 first lines)\n with open(self._xst_bin, 'rb') as f:\n header = list(islice(f, 0, 5))\n assert header[0] == b'HeaderStart\\n',\\\n 'Wrong header start'\n assert header[-1] == b'HeaderStop\\n',\\\n 'Wrong header stop'\n header = [s.decode('utf-8') for s in header]\n hd_size = sum([len(s) for s in header])\n\n # Parse informations into a metadata dictionnary\n keys = ['freq', 'ma', 'accu']\n search = ['Freq.List', 'Mr.List', 'accumulation']\n types = ['float64', 'int', 'int']\n for key, word, typ in zip(keys, search, types):\n for h in header:\n if word in h:\n self.meta[key] = np.array(\n h.split('=')[1].split(','),\n dtype=typ\n )\n\n # Deduce the dtype for decoding\n n_ma = self.meta['ma'].size\n n_sb = self.meta['freq'].size\n dtype = np.dtype(\n [('jd', 'float64'),\n ('data', 'complex64', (n_sb, n_ma*n_ma*2 + n_ma))]\n )\n\n # Decoding the binary file\n tmp = np.memmap(\n filename=self._xst_bin,\n dtype='int8',\n mode='r',\n offset=hd_size\n )\n decoded = tmp.view(dtype)\n\n self.data = decoded['data'] / self.meta['accu']\n self.time = Time(decoded['jd'], format='jd', precision=0)\n\n return", "def readHeader(self, filename):\n f = Data.Usrxxx.readHeader(self, filename)\n# self.sayHeader()\n \n while True:\n data = fortran.read(f)\n if data is None: break\n size = len(data)\n# print(\"size: \", size)\n\n if size == 14 and data[:10] == \"STATISTICS\":\n self.statpos = f.tell()\n for det in self.detector:\n data = Data.unpackArray(fortran.read(f))\n det.total = data[0]\n det.totalerror = data[1]\n# for j in range(6):\n# fortran.skip(f)\n break\n\n if size != 50: raise IOError(\"Invalid USRTRACK/USRCOLL file\")\n\n header = struct.unpack(\"=i10siiififfif\", data)\n\n det = Data.Detector()\n det.nb = header[0]\n det.name = header[1].strip() # titutc - track/coll name\n det.type = header[2] # itustc - type of binning: 1 - linear energy etc\n det.dist = header[3] # idustc = distribution to be scored\n det.reg = header[4] # nrustc = region\n det.volume = header[5] # vusrtc = volume (cm**3) of the detector\n det.lowneu = header[6] # llnutc = low energy neutron flag\n det.elow = header[7] # etclow = minimum energy [GeV]\n det.ehigh = header[8] # etchgh = maximum energy [GeV]\n det.ne = header[9] # netcbn = number of energy intervals\n det.de = header[10] # detcbn = energy bin width\n\n self.detector.append(det)\n\n if det.lowneu:\n data = fortran.read(f)\n det.ngroup = struct.unpack(\"=i\",data[:4])[0]\n det.egroup = struct.unpack(\"=%df\"%(det.ngroup+1), data[4:])\n print(\"Low energy neutrons scored with %d groups\" % det.ngroup)\n else:\n\t\tdet.ngroup = 0\n\t\tdet.egroup = []\n\n\t size = (det.ngroup+det.ne) * 4\n\t if size != fortran.skip(f):\n\t\traise IOError(\"Invalid USRTRACK file\")\n f.close()", "def _main_header(self, hdr):\n d = {}\n # Called readDefAnalysis in OpenMIMS\n d['sample type'], d['data included'], d['sample x'], d['sample y'], \\\n d['analysis type'], d['user name'], d['sample z'], date, time = \\\n unpack(self._bo + '4i 32s 16s i 12x 16s 16s', hdr.read(112))\n\n d['data included'] = bool(d['data included'])\n d['user name'] = self._cleanup_string(d['user name'])\n d['analysis type'] = self._cleanup_string(d['analysis type']).lower()\n date = self._cleanup_string(date)\n time = self._cleanup_string(time)\n d['date'] = self._cleanup_date(date + ' ' + time)\n\n if self.header['file type'] in (27, 29, 39):\n # Called MaskImage/readMaskIm in OpenMIMS\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 3i 3h 2x 3i', hdr.read(48))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = {}\n d['HVControl']['hvcontrol enabled'] = False\n\n elif self.header['file type'] in (22, 41):\n # Called MaskSampleStageImage/readMaskIss in OpenMIMS\n d['original filename'], d['analysis duration'], d['scan type'], \\\n d['steps'], d['step size x'], d['step size y'], d['step size?'], \\\n d['step waittime'], d['frames'], d['beam blanking'], \\\n d['presputtering'], d['presputtering duration'] = \\\n unpack(self._bo + '16s 6i d 4i', hdr.read(64))\n\n d['scan type'] = _stage_scan_types.get(d['scan type'], str(d['scan type']))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n # Don't know if this unused byte needs to go after HVControl or after SigRef.\n hdr.seek(4, 1)\n\n elif self.header['file type'] in (21, 26):\n # Not in OpenMIMS\n # this bit same as image, 1 extra unused/unknown\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 4x 3i 3h 2x 3i', hdr.read(52))\n\n # this bit same as stage scan\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n # 24 bytes unknown, not sure if they go here or before AutoCal\n hdr.seek(24, 1)\n\n elif self.header['file type'] == 31:\n # Don't know if this is correct, all 0s anyway\n d['original filename'], d['scan type'], \\\n d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 3i 4x', hdr.read(32))\n\n elif self.header['file type'] == 35:\n d['original filename'], d['scan type'], d['analysis duration'], \\\n d['frames'], d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 5i 40x', hdr.read(76))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n else:\n raise TypeError('What type of image are you? {}'.format(self.header['file type']))\n\n # Continue main header for all types\n d['SigRef'] = self._sigref(hdr)\n d['masses'] = unpack(self._bo + 'i', hdr.read(4))[0]\n\n # scan type is set for stage scan analysis, set others\n if isinstance(d['scan type'], int):\n if d['scan type'] == 0:\n d['scan type'] = ''\n else:\n d['scan type'] = str(d['scan type'])\n\n d['beam blanking'] = bool(d['beam blanking'])\n d['presputtering'] = bool(d['presputtering'])\n d['original filename'] = self._cleanup_string(d['original filename'])\n\n if self.header['file type'] in (21, 26, 27, 29, 35, 39):\n if self.header['file version'] >= 4108:\n n = 60\n else:\n n = 10\n elif self.header['file type'] in (22, 31, 40, 41):\n n = 20\n else:\n n = 0\n\n # Not sure what this is, memory pointers? Not needed.\n # d['mass table ptr'] = unpack(self._bo + 2*n*'h', hdr.read(n*4))\n hdr.seek(n*4, 1)\n\n if self.header['file type'] in (21, 22, 26, 40, 41, 35):\n hdr.seek(4, 1) # 4 bytes unused\n\n # Mass table, dict by species label.\n d['MassTable'] = collections.OrderedDict()\n for m in range(d['masses']):\n mi = {}\n mi['trolley index'], unknown, mi['mass'], mi['matrix or trace'], \\\n mi['detector'], mi['wait time'], mi['frame count time'] = \\\n unpack(self._bo + '2i d 2i 2d', hdr.read(40))\n\n if self.header['file type'] == 31:\n if d['analysis type'].endswith('trolley step scan'):\n # start and end are in mm, step is in μm; convert to mm\n mi['radius start'], mi['radius end'], \\\n mi['radius step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n mi['radius step'] /= 1000\n else:\n mi['voltage start'], mi['voltage end'], \\\n mi['voltage step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n else:\n mi['offset'], mi['b field bits'] = unpack(self._bo + '2i', hdr.read(8))\n\n mi.update(self._species(hdr))\n\n if self.header['file type'] == 31:\n hdr.seek(4, 1)\n\n # Add correction controls, my own addition.\n mi['background corrected'] = False\n mi['deadtime corrected'] = False\n mi['yield corrected'] = False\n\n label = mi.pop('label')\n # This is true for NS50L and file version 4108.\n # Anywhere else different?\n # Maybe confirm this with the Trolleys dict,\n # there is an Esi trolley.\n if mi['trolley index'] == 8:\n label = 'SE'\n\n d['MassTable'][label] = mi\n\n # Create a few convenient lists\n d['label list'] = tuple(d['MassTable'].keys())\n d['label list fmt'] = tuple(format_species(m) for m in d['label list'])\n d['mass list'] = tuple(d['MassTable'][m]['mass'] for m in d['label list'])\n\n return d", "def getBMData(filename):\n\n data = {}\n f = open(filename)\n line = f.readline() \n data['name'], data['gender'], data['age'] = [], [], []\n data['division'], data['country'], data['time'] = [], [], []\n while line != '':\n split = line.split(',')\n data['name'].append(split[0])\n data['gender'].append(split[1])\n data['age'].append(int(split[2]))\n data['division'].append(int(split[3]))\n data['country'].append(split[4]) \n data['time'].append(float(split[5][:-1])) #remove \\n\n line = f.readline()\n f.close()\n return data", "def read(file_path: str) -> dict:\n\n if not os.path.isfile(file_path):\n raise FileNotFoundError(\"The file `%s` must exist and be a BLM file\" % file_path)\n\n file_contents = open(file_path, 'r').read()\n headers = parse_headers(file_contents)\n definitions = parse_definitions(headers, file_contents)\n data = parse_data(headers, definitions, file_contents)\n\n return {'headers': headers, 'definitions': definitions, 'data': data}", "def _load(self):\n\n # number of non-data header details at top of data file\n header = 1\n\n # open file\n weatherData = []\n with open(self.wfile) as myfile:\n if (self.lines > 0):\n weatherData = [next(myfile) for x in xrange(self.lines + header)]\n else:\n weatherData = myfile.readlines()\n\n # get data stream from first line\n streamHeader = weatherData.pop(0).rstrip()\n if (streamHeader == 'FULL'):\n self.dataStream = 0\n elif (streamHeader == 'ADVANCED'):\n self.dataStream = 1\n elif (streamHeader == 'BASIC'):\n self.dataStream = 2\n else:\n print \"Error: unecognised data stream from file %s\" % (self.wfile)\n return -1\n\n # read data\n inputData = []\n for line in weatherData:\n entries = line.split()\n inputData.append(entries)\n\n # copy all into np array\n self.data = np.array(inputData)\n\n return 0", "def load_rivfile(filename=None): \n\n data={}\n\n if filename==None:\n print('load_rivfile requires a filename to load.')\n return\n try:\n fp=open(filename,'r')\n except IOError:\n print('load_rivfile: invalid filename.')\n return data\n \n data['RIVER_NAME']=''\n data['RIVER_GRID_LOCATION']=0\n data['RIVER_VERTICAL_DISTRIBUTION']=''\n\n\n for line in fp:\n if line.strip().startswith('RIVER_NAME'):\n data['RIVER_NAME']=np.append(data['RIVER_NAME'],line[line.find('\"')+1:line.rfind('\"')])\n if line.strip().startswith('RIVER_GRID_LOCATION'):\n data['RIVER_GRID_LOCATION']=np.append(data['RIVER_GRID_LOCATION'],int(line[line.find('=')+1:line.rfind(',')]))\n if line.strip().startswith('RIVER_VERTICAL_DISTRIBUTION'):\n data['RIVER_VERTICAL_DISTRIBUTION']=np.append(data['RIVER_VERTICAL_DISTRIBUTION'],line[line.find('\"')+1:line.rfind('\"')])\n\n data['RIVER_NAME']=np.delete(data['RIVER_NAME'],0)\n data['RIVER_GRID_LOCATION']=np.delete(data['RIVER_GRID_LOCATION'],0)\n data['RIVER_VERTICAL_DISTRIBUTION']=np.delete(data['RIVER_VERTICAL_DISTRIBUTION'],0)\n\n \n return data", "def read_ldat_header(cls, headerpath):\n # TODO extract CalTable info.\n if os.path.isdir(headerpath):\n files = os.listdir(headerpath)\n headerfiles = [f for f in files if f.endswith('.h')]\n headerfile = os.path.join(headerpath, headerfiles.pop())\n else:\n headerfile = headerpath\n stnid = None\n starttime = None\n headerversion = 0\n with open(headerfile, 'r') as hf:\n for hline in hf:\n if \"Header version\" in hline:\n headerversion = hline.split()[-1]\n beamctl_line = \"\"\n contents = {}\n datatype = None\n with open(headerfile, 'r') as hf:\n if headerversion == '1':\n rspctl_lines = []\n for line in hf:\n if \"Observer\" in line:\n _label, _observer = line.split('=')\n if \"Project\" in line:\n _label, _project = line.split('=')\n if \"DataType\" in line:\n _label, datatype = line.split('=')\n if \"StationID\" in line:\n _label, stnid = line.split('=')\n stnid = stnid.strip()\n if \"StartTime\" in line:\n _label, starttime = line.split('=')\n starttime = starttime.strip()\n if \"beamctl\" in line:\n # HACK\n beamctl_line = line\n if \"rspctl\" in line:\n rspctl_lines.append(line)\n elif headerversion == '2':\n contents = yaml.safe_load(hf)\n _observer = contents['Observer']\n _project = contents['Project']\n datatype = contents['DataType']\n stnid = contents['StationID']\n starttime = contents['StartTime']\n beamctl_line = contents['BeamctlCmds']\n rspctl_lines = contents['RspctlCmds'].split('\\n')\n else:\n # headerversion == '4':\n contents = yaml.safe_load(hf)\n datatype = contents['ldat_type']\n filenametime = contents['filenametime']\n stnid = contents['station_id']\n rcusetup_cmds = contents['rcusetup_cmds']\n beamctl_cmds = contents['beamctl_cmds']\n rspctl_cmds = contents['rspctl_cmds']\n if 'caltabinfos' in contents:\n caltabinfos = contents['caltabinfos']\n else:\n caltabinfos = []\n if 'septonconf' in contents:\n septonconf = contents['septonconf']\n else:\n septonconf = None\n obsinfo = cls(datatype, stnid, rcusetup_cmds, beamctl_cmds, rspctl_cmds,\n caltabinfos=caltabinfos, septonconf=septonconf)\n obsinfo.filenametime = filenametime\n return obsinfo", "def read_parsed_data(parsed_filename_path, parsed_topology_data_path):\n with open(parsed_filename_path, 'rb') as f:\n file_name = pk.load(f)\n with open(parsed_topology_data_path, 'rb') as f:\n topology_info = pk.load(f)\n return file_name, topology_info", "def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h", "def read(filename):\n with open(filename, 'rb') as filehandle:\n header = read_header(filehandle)\n data = read_data(header, filehandle, filename)\n return (data, header)", "def load(self):\n\n super(DatasetLoader_XRite2016, self).sync()\n\n keys = (\n 'ColorChecker24 - After November 2014',\n 'ColorChecker24 - Before November 2014',\n 'ColorCheckerSG - After November 2014',\n 'ColorCheckerSG - Before November 2014',\n )\n filenames = (\n 'ColorChecker24_After_Nov2014.txt',\n 'ColorChecker24_Before_Nov2014.txt',\n 'ColorCheckerSG_After_Nov2014.txt',\n 'ColorCheckerSG_Before_Nov2014.txt',\n )\n\n # TODO: Implement support for \"CGATS\" file format in \"Colour\":\n # https://github.com/colour-science/colour/issues/354\n illuminant = (\n CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['ICC D50'])\n\n self._content = OrderedDict()\n for key, filename in zip(keys, filenames):\n directory = os.path.splitext(filename)[0]\n path = os.path.join(self.record.repository, 'dataset', directory,\n filename)\n\n with codecs.open(path, encoding='utf-8') as xrite_file:\n samples = []\n is_data = False\n lines = filter(\n None, (line.strip() for line in xrite_file.readlines()))\n for line in lines:\n if line == 'END_DATA':\n is_data = False\n\n if is_data:\n tokens = line.split()\n samples.append([\n tokens[0],\n [\n float(value.replace(',', '.'))\n for value in tokens[1:]\n ],\n ])\n\n if line == 'BEGIN_DATA':\n is_data = True\n\n i, j = (6, 4) if len(samples) == 24 else (14, 10)\n samples = np.array(samples)\n samples = np.transpose(samples.reshape([i, j, 2]), [1, 0, 2])\n keys, values = zip(*samples.reshape([-1, 2]))\n values = XYZ_to_xyY(Lab_to_XYZ(values, illuminant))\n self._content[key] = ColourChecker(key,\n OrderedDict(zip(keys, values)),\n illuminant)\n\n return self._content", "def getData(dataset,\n description,\n comparisonsType='comparisons'):\n # print(dataset, description, comparisonsType)\n start = timeit.default_timer()\n folder = os.path.join(os.path.expanduser('~'),\n 'LMDBs',\n dataset)\n filename = description + '.npz'\n full_path = os.path.join(folder, filename)\n if not os.path.isfile(full_path):\n raise IOError(\"File {} could not be found.\".format(full_path))\n\n if os.stat(full_path).st_size > 1024*1024*100:\n print(\"Loading file\", full_path, 'takes some time.')\n # flush the stdout (write content to file in cluster)\n # to immediately see output\n sys.stdout.flush()\n\n successfully_read_data = False\n counter = 0\n data = {}\n while counter < 10 and not successfully_read_data:\n try:\n with np.load(full_path) as npzFile:\n # for kind in ['images', 'comparisons', 'augmented']:\n # data[kind] = npzFile[kind]\n\n for kind in ['images', 'comparisons']:\n data[kind] = npzFile[kind]\n # data['description'] = \"{}_{}\".format(dataset, description)\n if comparisonsType == 'augmented':\n data['augmented'] = npzFile['augmented']\n if dataset == 'sintel' or dataset == 'mixed':\n data['albedos'] = npzFile['albedos']\n successfully_read_data = True\n except MemoryError:\n sec = np.random.rand() * 60 # try again up to a min later\n print(\"Reading of data was not successfull, trying again in\",\n sec, \"seconds\")\n sleep(sec)\n data = {}\n counter += 1\n stop = timeit.default_timer()\n print(\"Time needed to load data\", description,\n \"from dataset\", dataset,\n \"is:\", stop-start, \"seconds.\")\n # flush the stdout (write content to file in cluster) for debugging\n sys.stdout.flush()\n return data", "def load_data(filename: str) -> Tuple[np.ndarray, np.ndarray]:", "def _read_data(self):", "def read_file(self):\n # This is quite ugly but works for now.\n self.header = read_csv(self.file_name, delim_whitespace=True,\n header=TrackData.header_line,\n nrows=1).to_dict(orient='index')[0]\n self.data = read_csv(self.file_name, delim_whitespace=True, \n header=TrackData.data_line)", "def get_data(fname: str) -> dict:\n with open(fname) as f:\n return [rec.split() for rec in f.read().split(\"\\n\\n\")]", "def readfile(filename):\n import ROOT\n f = ROOT.TFile(filename)\n keys = f.GetListOfKeys()\n\n extract = lambda _type: filter(lambda x: x.GetClassName() == _type,keys)\n builddict = lambda _type: dict(map(lambda x: (x.GetName(),f.Get(x.GetName())),\n extract(_type)))\n\n # Retrieve all the stuff\n obsdict = builddict('RooRealVar')\n data = builddict('RooDataSet')\n datahists = builddict('RooDataHist')\n data.update(datahists)\n modeldict = builddict('RooRealPdf')\n\n databkgdict = dict(filter(lambda (x,y): x.find('dvbkg') == 0, data.iteritems()))\n datasigdict = dict(filter(lambda (x,y): x.find('dvsig') == 0, data.iteritems()))\n\n return f,obsdict,modeldict,databkgdict,datasigdict", "def load_data():\n dictionary = corpora.Dictionary.load(app.config['DICTIONARY'])\n matrix = similarities.MatrixSimilarity.load(app.config['MATRIX'])\n model = models.LsiModel.load(app.config['MODEL'])\n df = pd.read_pickle(app.config['DATA_FRAME'])\n return Data(matrix=matrix, model=model, dictionary=dictionary, data_frame=df)", "def read(self):\r\n entById = {}\r\n entsByName = {}\r\n header = 'HEADER '\r\n readheader = False\r\n for line in self.file:\r\n e = self.parseLine(line)\r\n if e:\r\n entById[int(e[\"id\"])] = e\r\n ids = e.get(e[\"name\"],[])\r\n ids.append(e[\"id\"])\r\n entsByName[e[\"name\"]] = list(set(ids))\r\n elif 'HEADER' in line:\r\n readheader = True\r\n elif readheader:\r\n if 'ENDSEC' in line:\r\n readheader = False\r\n else:\r\n header += line\r\n \r\n return [entById, entsByName, header]", "def Load(self):\n\t\tfile = open(self.fileName, 'r')\n\t\tself.hdr = file.readline().split('\\n')[0].split(',')\n\t\t\n\t\tfor line in file.readlines():\n\t\t\ttokens = line.split('\\n')[0].split(',')\n\t\t\tif int(tokens[1]) == 0:\n\t\t\t\tself.h0.append(tokens[0])\n\t\t\telse:\n\t\t\t\tself.h1.append(tokens[0])\n\t\tfile.close()\n\t\tself.numH1 = len(self.h1)\n\t\tself.numH0 = len(self.h0)", "def _read_header(\n self, header, filename, run_check_acceptability=True, background_lsts=True\n ):\n # get telescope information\n latitude = header[\"latitude\"][()]\n longitude = header[\"longitude\"][()]\n altitude = header[\"altitude\"][()]\n self.telescope_location_lat_lon_alt_degrees = (latitude, longitude, altitude)\n self.instrument = header[\"instrument\"][()].tobytes().decode(\"utf8\")\n self.telescope_name = header[\"telescope_name\"][()].tobytes().decode(\"utf8\")\n\n # get source information\n self.object_name = header[\"object_name\"][()].tobytes().decode(\"utf8\")\n\n # set history appropriately\n self.history = header[\"history\"][()].tobytes().decode(\"utf8\")\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n\n # check for vis_units\n if \"vis_units\" in header:\n self.vis_units = header[\"vis_units\"][()].tobytes().decode(\"utf8\")\n else:\n # default to uncalibrated data\n self.vis_units = \"UNCALIB\"\n\n # check for optional values\n if \"dut1\" in header:\n self.dut1 = float(header[\"dut1\"][()])\n if \"earth_omega\" in header:\n self.earth_omega = float(header[\"earth_omega\"][()])\n if \"gst0\" in header:\n self.gst0 = float(header[\"gst0\"][()])\n if \"rdate\" in header:\n self.rdate = header[\"rdate\"][()].tobytes().decode(\"utf8\")\n if \"timesys\" in header:\n self.timesys = header[\"timesys\"][()].tobytes().decode(\"utf8\")\n if \"x_orientation\" in header:\n self.x_orientation = header[\"x_orientation\"][()].tobytes().decode(\"utf8\")\n if \"blt_order\" in header:\n blt_order_str = header[\"blt_order\"][()].tobytes().decode(\"utf8\")\n self.blt_order = tuple(blt_order_str.split(\", \"))\n if self.blt_order == (\"bda\",):\n self._blt_order.form = (1,)\n\n if \"antenna_diameters\" in header:\n self.antenna_diameters = header[\"antenna_diameters\"][()]\n if \"uvplane_reference_time\" in header:\n self.uvplane_reference_time = int(header[\"uvplane_reference_time\"][()])\n if \"eq_coeffs\" in header:\n self.eq_coeffs = header[\"eq_coeffs\"][()]\n if \"eq_coeffs_convention\" in header:\n self.eq_coeffs_convention = (\n header[\"eq_coeffs_convention\"][()].tobytes().decode(\"utf8\")\n )\n\n # check for phasing information\n self.phase_type = header[\"phase_type\"][()].tobytes().decode(\"utf8\")\n if self.phase_type == \"phased\":\n self._set_phased()\n self.phase_center_ra = float(header[\"phase_center_ra\"][()])\n self.phase_center_dec = float(header[\"phase_center_dec\"][()])\n self.phase_center_epoch = float(header[\"phase_center_epoch\"][()])\n if \"phase_center_frame\" in header:\n self.phase_center_frame = (\n header[\"phase_center_frame\"][()].tobytes().decode(\"utf8\")\n )\n elif self.phase_type == \"drift\":\n self._set_drift()\n else:\n self._set_unknown_phase_type()\n\n # get antenna arrays\n # cast to native python int type\n self.Nants_data = int(header[\"Nants_data\"][()])\n self.Nants_telescope = int(header[\"Nants_telescope\"][()])\n self.ant_1_array = header[\"ant_1_array\"][:]\n self.ant_2_array = header[\"ant_2_array\"][:]\n self.antenna_names = [\n n.tobytes().decode(\"utf8\") for n in header[\"antenna_names\"][:]\n ]\n self.antenna_numbers = header[\"antenna_numbers\"][:]\n self.antenna_positions = header[\"antenna_positions\"][:]\n\n # set telescope params\n try:\n self.set_telescope_params()\n except ValueError as ve:\n warnings.warn(str(ve))\n\n # get baseline array\n self.baseline_array = self.antnums_to_baseline(\n self.ant_1_array, self.ant_2_array\n )\n self.Nbls = len(np.unique(self.baseline_array))\n\n # get uvw array\n self.uvw_array = header[\"uvw_array\"][:, :]\n\n # get time information\n self.time_array = header[\"time_array\"][:]\n integration_time = header[\"integration_time\"]\n self.integration_time = integration_time[:]\n proc = None\n if \"lst_array\" in header:\n self.lst_array = header[\"lst_array\"][:]\n # check that lst_array in file is self-consistent\n if run_check_acceptability:\n (\n latitude,\n longitude,\n altitude,\n ) = self.telescope_location_lat_lon_alt_degrees\n lst_array = uvutils.get_lst_for_time(\n self.time_array, latitude, longitude, altitude\n )\n if not np.all(\n np.isclose(\n self.lst_array,\n lst_array,\n rtol=self._lst_array.tols[0],\n atol=self._lst_array.tols[1],\n )\n ):\n warnings.warn(\n \"LST values stored in {file} are not self-consistent \"\n \"with time_array and telescope location. Consider \"\n \"recomputing with utils.get_lst_for_time.\".format(file=filename)\n )\n else:\n # compute lst_array from time_array and telescope location\n proc = self.set_lsts_from_time_array(background=background_lsts)\n\n # get frequency information\n self.freq_array = header[\"freq_array\"][:, :]\n self.channel_width = float(header[\"channel_width\"][()])\n self.spw_array = header[\"spw_array\"][:]\n\n # get polarization information\n self.polarization_array = header[\"polarization_array\"][:]\n\n # get data shapes\n self.Nfreqs = int(header[\"Nfreqs\"][()])\n self.Npols = int(header[\"Npols\"][()])\n self.Ntimes = int(header[\"Ntimes\"][()])\n self.Nblts = int(header[\"Nblts\"][()])\n self.Nspws = int(header[\"Nspws\"][()])\n\n # get extra_keywords\n if \"extra_keywords\" in header:\n self.extra_keywords = {}\n for key in header[\"extra_keywords\"].keys():\n if header[\"extra_keywords\"][key].dtype.type in (np.string_, np.object_):\n self.extra_keywords[key] = (\n header[\"extra_keywords\"][key][()].tobytes().decode(\"utf8\")\n )\n else:\n self.extra_keywords[key] = header[\"extra_keywords\"][key][()]\n\n if proc is not None:\n # if lsts are in the background wait for them to return\n proc.join()\n\n return", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def _read_rhessi_spec_file(spec_file):\n rdict = {}\n with fits.open(spec_file) as hdul:\n for i in range(4):\n rdict[str(i)] = [hdul[i].header, hdul[i].data]\n return rdict", "def load_data(self):\n x_vector = pickle.load(open(self.file_stem + \"x.pickle\", \"rb\"))\n ode_sols = pickle.load(open(self.file_stem + \"sols.pickle\", \"rb\"))\n forcings = pickle.load(open(self.file_stem + \"fs.pickle\", \"rb\"))\n sl_coeffs = pickle.load(open(self.file_stem + \"coeffs.pickle\", \"rb\"))\n\n return x_vector, ode_sols, forcings, sl_coeffs", "def DataLoader():\n #importing data\n House_Prices_Uncleaned = pd.read_csv(\"zillow_data/Zip_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_mon.csv\")\n #Cleaning house prices data\n\n House_Prices=pd.DataFrame(House_Prices_Uncleaned[\"RegionName\"][House_Prices_Uncleaned[\"CountyName\"]==\"New York County\"])\n\n House_Prices[\"Price\"]=pd.DataFrame(House_Prices_Uncleaned[\"2020-09-30\"])\n\n House_Rent_Uncleaned= pd.read_csv(\"zillow_data/Zip_ZORI_AllHomesPlusMultifamily_SSA.csv\")\n\n #Cleaning house rent data\n House_Rent=pd.DataFrame(House_Rent_Uncleaned[\"RegionName\"])\n House_Rent[\"Rent\"]=pd.DataFrame(House_Rent_Uncleaned[\"2020-09\"])\n\n return House_Prices, House_Rent", "def _readheader(lines):\n hdrdict = {}\n #input list of 26 lines of header\n #station and channel\n line = lines[5]\n parts = line.strip().split()\n fname = parts[1]\n fparts = fname.split('_')\n hdrdict['station'] = fparts[-2]+'_'+fparts[-1]\n\n #the \"Component\" lines look like either: Component S00W, Component S90E, Component Up\n compstr = lines[12].strip().split()[1]\n hdrdict['channel'] = get_comp_name(compstr)\n\n #instrument\n hdrdict['instrument'] = lines[3].split()[1].strip()\n \n #location string\n line = lines[6]\n hdrdict['location'] = line.strip()\n #event origin, buffer start year/month\n line = lines[16]\n parts = line.strip().split()\n bufyear = int(parts[8])\n bufmonth = int(parts[9])\n #epicentral location, buffer start day/hour\n line = lines[17]\n parts = line.strip().split()\n bufday = int(parts[8])\n bufhour = int(parts[9])\n #numpoints, buffer start min/sec\n line = lines[19]\n parts = line.strip().split()\n hdrdict['npts'] = int(parts[0])\n bufmin = int(parts[8])\n millisec = int(parts[9])\n bufsec = int(millisec/1000)\n bufmicrosec = int(np.round(millisec/1000.0 - bufsec))\n hdrdict['starttime'] = UTCDateTime(datetime(bufyear,bufmonth,bufday,bufhour,bufmin,bufsec,bufmicrosec))\n #part C\n #frequency, calibration value and some other stuff we don't care about\n line = lines[20]\n parts = line.strip().split()\n hdrdict['sampling_rate'] = float(parts[0])\n hdrdict['delta'] = 1.0/hdrdict['sampling_rate']\n hdrdict['calib'] = float(parts[7])\n #site location info, this time in dd\n line = lines[21]\n parts = line.strip().split()\n hdrdict['lat'] = float(parts[0]) * -1\n hdrdict['lon'] = float(parts[1])\n hdrdict['height'] = 0.0\n #duration\n line = lines[22]\n parts = line.strip().split()\n hdrdict['duration'] = float(parts[0])\n hdrdict['endtime'] = hdrdict['starttime'] + hdrdict['duration']\n #max acceleration - good for sanity check\n line = lines[23]\n parts = line.strip().split()\n hdrdict['maxacc'] = float(parts[0])\n hdrdict['network'] = 'NZ'\n hdrdict['units'] = 'acc'\n return hdrdict", "def load_coverage_data(lad_id):\n path = os.path.join(\n DATA_RAW, 'ofcom_2018', '201809_mobile_laua_r02.csv'\n )\n\n with open(path, 'r') as source:\n reader = csv.DictReader(source)\n for line in reader:\n if line['laua'] == lad_id:\n return {\n 'lad_id': line['laua'],\n 'lad_name': line['laua_name'],\n '4G_geo_out_0': line['4G_geo_out_0'],\n '4G_geo_out_1': line['4G_geo_out_1'],\n '4G_geo_out_2': line['4G_geo_out_2'],\n '4G_geo_out_3': line['4G_geo_out_3'],\n '4G_geo_out_4': line['4G_geo_out_4'],\n }", "def _load_obcfile(casename=None): \n\n data={}\n\n if casename==None:\n print('_load_obcfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_obc.dat','r')\n except IOError:\n print('_load_obcfile: invalid case name.')\n return data\n\n obc_str=fp.readline().split('=')\n obc_num=int(obc_str[1])\n t_data1=np.genfromtxt(casename+'_obc.dat',skip_header=1)\n fp.close()\n\n data['obcf_num']=obc_num\n data['obcf_numbers']=t_data1[:,0]\n data['obcf_nodes']=t_data1[:,1]\n data['obcf_value']=t_data1[:,2]\n\n \n return data", "def __loadFromFile(self):\n try:\n f=open(self.__fileR, \"r\")\n line =f.readline().strip()\n rez=[]\n while line!=\"\":\n attrs=line.split(\",\")\n rt=Rent(attrs[0], attrs[1], attrs[2], attrs[3])\n rez.append(rt)\n line=f.readline().strip()\n f.close()\n return rez\n #the file cannot be reached\n except IOError:\n return None", "def load(filename, h5dictKey=None):\n \n\n if not os.path.exists(filename):\n raise IOError(\"File not found :( \\n %s\" % filename)\n\n try:\n \"loading from a joblib file here\"\n mydict = dict(joblib.load(filename))\n data = mydict.pop(\"data\")\n return data\n\n except:\n pass\n \n \"checking for a text file\"\n data_file = open(filename)\n line0 = data_file.readline()\n try:\n N = int(line0)\n except (ValueError, UnicodeDecodeError):\n raise TypeError(\"Cannot read text file... reading pickle file\")\n # data = Cload(filename, center=False)\n data = [list(map(float, i.split())) for i in data_file.readlines()]\n\n if len(data) != N:\n raise ValueError(\"N does not correspond to the number of lines!\")\n return np.array(data)\n\n \n\n #try:\n # data = loadJson(filename)\n # return data[\"data\"]\n #except:\n # print(\"Could not load json\")\n # pass\n\n #h5dict loading deleted", "def load_data(self):\n \n # only loader implemented so far !\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep='')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0]\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n pass # try another format\n\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep=',')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0] # first row must be excluded in this format\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n raise IndexError(\"Format not implemented!\")", "def Load_File(filename):\n with open(filename) as file:\n data = file.readlines()\n return data", "def get_BM_data(filename):\n\n data = {}\n with open(filename, 'r') as f:\n f.readline() #discard first line\n line = f.readline()\n for k in ('name', 'gender', 'age', 'division',\n 'country', 'time'):\n data[k] = []\n while line != '':\n split = line.split(',')\n data['name'].append(split[0])\n data['gender'].append(split[1])\n data['age'].append(int(split[2]))\n data['division'].append(int(split[3]))\n data['country'].append(split[4]) \n data['time'].append(float(split[5][:-1])) #remove \\n\n line = f.readline()\n return data", "def load_data():\n try:\n loader.download()\n load_table_data()\n status = 'loaded'\n except Exception as ex:\n log.log_traceback(ex)\n status = 'failed'\n return flask.jsonify({'status': status})", "async def load(self, file: IO) -> dict:", "def load_file(msl_data_path, filename):\n with open(msl_data_path + filename, 'rb') as (file_):\n file_content = file_.read()\n file_.close()\n return file_content", "def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source", "def load_clips():\n try:\n with open(DATA_FILE, 'r') as f:\n return msgpack.unpack(f, encoding='utf-8')\n except IOError:\n return {}", "def load(datastream):", "def dat_reader(fpath, fname):\n\n header = []\n data = []\n with open(fpath + fname + '.dat', 'rb') as file:\n for row in file:\n string_row = row.decode('iso-8859-1')\n if string_row[0] == 'C':\n header.append(string_row)\n else:\n data.append(string_row)\n\n return [header, data]", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def DataLoad(filename,path,keys_to_search=['dstr','ORT'],text_file = False):\n\n OUTPUT_DICT = {}\n\n if not text_file:\n\n rdr = EafReader(filename, path, text_file=False)\n\n for key in keys_to_search:\n\n annot, annot_df = rdr.parser(key)\n DF = rdr.dataframe_creator(annot, annot_df, annot_type=key)\n\n OUTPUT_DICT[key.lower()] = DF\n else:\n\n\n\n for key in keys_to_search:\n\n rdr = EafReader(filename+'_'+key.lower()+'.txt', path, text_file=True)\n DF = rdr.csv_reader()\n\n OUTPUT_DICT[key.lower()] = DF\n\n return OUTPUT_DICT", "def _read_hdr_file(ktlx_file):\r\n with open(ktlx_file, 'rb') as f:\r\n\r\n hdr = {}\r\n assert f.tell() == 0\r\n\r\n hdr['file_guid'] = hexlify(f.read(16))\r\n hdr['file_schema'], = unpack('<H', f.read(2))\r\n if not hdr['file_schema'] in (1, 3, 7, 8, 9):\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'file_schema ' + str(hdr['file_schema']))\r\n\r\n hdr['base_schema'], = unpack('<H', f.read(2))\r\n if not hdr['base_schema'] == 1: # p.3: base_schema 0 is rare, I think\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'base_schema ' + str(hdr['base_schema']))\r\n\r\n hdr['creation_time'] = datetime.fromtimestamp(unpack('<i',\r\n f.read(4))[0])\r\n hdr['patient_id'], = unpack('<i', f.read(4))\r\n hdr['study_id'], = unpack('<i', f.read(4))\r\n hdr['pat_last_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_first_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_middle_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['patient_id'] = _make_str(unpack('c' * 80, f.read(80)))\r\n assert f.tell() == 352\r\n\r\n if hdr['file_schema'] >= 7:\r\n hdr['sample_freq'], = unpack('<d', f.read(8))\r\n n_chan, = unpack('<i', f.read(4))\r\n hdr['num_channels'] = n_chan\r\n hdr['deltabits'], = unpack('<i', f.read(4))\r\n hdr['phys_chan'] = unpack('<' + 'i' * hdr['num_channels'],\r\n f.read(hdr['num_channels'] * 4))\r\n\r\n f.seek(4464)\r\n hdr['headbox_type'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sn'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sw_version'] = _make_str(unpack('c' * 40, f.read(40)))\r\n hdr['dsp_hw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['dsp_sw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['discardbits'], = unpack('<i', f.read(4))\r\n\r\n if hdr['file_schema'] >= 8:\r\n hdr['shorted'] = unpack('<' + 'h' * 1024, f.read(2048))[:n_chan]\r\n hdr['frequency_factor'] = unpack('<' + 'h' * 1024,\r\n f.read(2048))[:n_chan]\r\n return hdr", "def load_stationfile(filename=None):\n \n data={} \n\n if filename==None:\n print('load_stationfile requires a filename to load.')\n return\n try:\n fp=open(filename,'r')\n except IOError:\n print('load_stationfile: invalid filename.')\n return data\n\n headerstr=fp.readline()\n data_str=np.genfromtxt(filename,skip_header=1,dtype=str)\n fp.close()\n\n data['header']=headerstr\n data['station_num']=data_str[:,0].astype(np.int32)\n data['cell']=data_str[:,3].astype(np.int32)\n data['x']=data_str[:,1].astype(np.float64)\n data['y']=data_str[:,2].astype(np.float64)\n data['h']=data_str[:,4].astype(np.float64)\n data['station_name'] = data_str[:,5]\n \n return data", "def setup_data( self ):\n with open( f'{project_dir}/data/05_source_key_data.json', 'r', encoding='utf-8' ) as f:\n dct = json.loads( f.read() )\n with open( f'{project_dir}/data/04_snapshot_open_textbook.json', 'r', encoding='utf-8' ) as f:\n lst = json.loads( f.read() )\n return ( dct, lst )", "def load(self, filename):\n # XXX Hay que comprobar los datos leidos y lanzar excepcion\n f = open(filename)\n prelaciones = []\n asig = []\n rec = []\n l = f.readline()\n while l:\n # Activities and following activities\n if l[0:21] == 'PRECEDENCE RELATIONS:':\n f.readline()\n l = f.readline()\n while l[0] != '*':\n data = l.split()\n prel = (data[0], data[3:])\n prelaciones.append(prel)\n l = f.readline()\n\n # Activity duration and resource units needed\n if l[0] == '-':\n l = f.readline()\n while l[0] != '*':\n asig.append(l.split())\n l = f.readline()\n\n # Name, type and unit of resources\n if l[0:22] == 'RESOURCEAVAILABILITIES':\n l = f.readline()\n while l[0] != '*':\n rec.append(l.split())\n l = f.readline()\n\n l = f.readline()\n \n # Create data structure\n cont = 1\n activities = []\n for prelacion in prelaciones:\n activities.append([cont, prelacion[0], prelacion[1], '', '', '', '', '', ('Beta')])\n cont += 1 \n\n # Update activities duration\n for n in range(len(asig)): \n activities[n][6] = float(asig[n][2])\n\n # Update resources\n i = 1\n m = 0\n resources = []\n if len(rec) < 2:\n raise InvalidFileFormatException()\n\n for n in range(len(rec[1])):\n # Renewable\n if rec[0][m]=='R' or rec[0][m][0]=='R':\n if rec[0][m]=='R':\n row=[rec[0][m]+rec[0][i], 'Renewable', '', rec[1][n]] \n m+=2\n else:\n row=[rec[0][m], 'Renewable', '', rec[1][n]] \n m+=1 \n # Non Renewable\n elif rec[0][m]=='N' or rec[0][m][0]=='N':\n if rec[0][m]=='N':\n row=[rec[0][m]+rec[0][i], 'Non renewable', rec[1][n], '']\n m+=2\n else:\n row=[rec[0][m], 'Non renewable', rec[1][n], ''] \n m+=1\n # Double constrained\n elif rec[0][m]=='D' or rec[0][m][0]=='D':\n if rec[0][m]=='D':\n row=[rec[0][m]+rec[0][i], 'Double constrained', rec[1][n], rec[1][n]]\n m+=2\n else:\n row=[rec[0][m], 'Double constrained', rec[1][n], rec[1][n]] \n m+=1\n \n resources.append(row)\n i += 2\n # Note: Unlimited resources are not present on PSPLIB projects and so \n # not taken into account here\n\n # Resources needed per activity\n asignation = []\n for n in range(len(asig)): \n for m in range(3, 3+len(rec[1])): #len(self.rec[1]): number of resources \n if asig[n][m] != '0': #unused resources are not shown\n i = m-3\n row = [asig[n][0], resources[i][0], asig[n][m]] \n asignation.append(row)\n \n return (activities, [], resources, asignation)", "def read_cli(fname):\n \n meta = {}\n data = None\n header = []\n\n meta['fname'] = fname\n meta['id'] = ''.join([L for L in fname if L in '0123456789'])\n \n fid = open(fname, 'r')\n meta['CLIGEN Version'] = fid.readline().strip()\n fid.readline()\n meta['Station'] = ' '.join(fid.readline().strip().split())\n\n fid.readline()\n line = fid.readline().strip().split()\n meta['Latitude'] = float(line[0])\n meta['Longitude'] = float(line[1])\n meta['Elevation'] = float(line[2])\n meta['Obs. Years'] = float(line[3])\n meta['Beginning Year'] = float(line[4])\n meta['Years Simulated'] = float(line[5])\n meta['Command Line'] = ' '.join(line[6:])\n\n fid.readline()\n meta['Observed monthly ave max temperature (C)'] = \\\n list(map(float, fid.readline().split()))\n\n fid.readline()\n meta['Observed monthly ave min temperature (C)'] = \\\n list(map(float, fid.readline().split()))\n\n fid.readline()\n meta['Observed monthly ave solar radiation (Langleys/day)'] = \\\n list(map(float, fid.readline().split()))\n\n fid.readline()\n meta['Observed monthly ave precipitation (mm)'] = \\\n list(map(float, fid.readline().split()))\n\n header = fid.readline().strip().split()\n \n fid.readline()\n\n _data = []\n for line in fid.readlines():\n cells = line.split()\n\n if len(cells) != len(header):\n break\n\n _data.append([float(c) for c in cells])\n \n data = {}\n for h,v in zip(header, zip(*_data)):\n data[h] = v\n\n del _data\n del header\n\n return (meta,data)", "def load_data_part(fname):\n if \"_data\" not in fname:\n return None\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data', '_events')\n # read event file\n labels = pd.read_csv(events_fname)\n clean = data.drop(['id'], axis=1) # remove id\n labels = labels.drop(['id'], axis=1) # remove id\n return clean, labels", "def read_data(columns, types = {}, filename= \"data/wxobs20170821.txt\"):\n #Initialize my data variable\n data = {}\n for column in columns:\n data[column] = []\n\n with open(filename, \"r\") as datafile:\n # read first three line (header)\n for _ in range(3):\n #print(_)\n datafile.readline()\n\n\n # Read and parse the rest of the file\n for line in datafile:\n split_line = line.split()\n for column in columns:\n i = columns[column]\n t = types.get(column, str)\n value = t(split_line[i])\n data[column].append(value)\n\n return data", "def Load_File(filename):\n with open(filename) as file:\n data = file.readlines()\n print(\"Finished loading\")\n\n return data", "def _read_file(self):\n\n with open(self.file_name, 'rb') as f:\n new_test = struct.unpack('<l', f.read(8)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n old_test = struct.unpack('<h', f.read(6)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n other_test = struct.unpack('<l', f.read(20)[16:])[0]\n f.close()\n\n open_file = open(self.file_name, 'rb')\n\n if (other_test==202):\n raw = open_file.read(1236)[11:]\n self.model = '202'\n elif ((not new_test==102) and old_test==102):\n raw = open_file.read(1133)\n self.model = '102old'\n elif (new_test==102 and old_test==102):\n raw = open_file.read(1224)\n self.model = '102new'\n\n self.header = DpHeader(raw, self.model)\n\n self.data = DpData(open_file, \n self.model, \n self.header.interferogram_size, \n self.header.number_of_coadds, \n 2048*self.header.zero_fill,\n self.header.laser_wavelength_microns, \n self.header.dispersion_constant_xm,\n self.header.dispersion_constant_xb)\n\n open_file.close()", "def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header", "def read_data(self):\n print 'Reading Data ...'\n fname = self.wpath + 'Data/' + self.city[2] + '-' + self.application + '.csv.bz2'\n self.dataset = loadtxt(fname, skiprows=1,\n dtype=[('lat', 'f8'), ('lng', 'f8'), ('time', 'i4'), ('user', 'S20')],\n usecols=(0, 1, 2, 3), delimiter=';', comments='#')", "def _load_depfile(casename=None):\n\n data={}\n \n if casename==None:\n print('_load_depfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_dep.dat','r')\n except IOError:\n print('_load_depfile: invalid case name.')\n return data\n\n dep_str=fp.readline().split('=')\n dep_num=int(dep_str[1])\n t_data1=np.genfromtxt(casename+'_dep.dat',skip_header=1)\n fp.close()\n\n data['dep_num']=dep_num\n data['x']=t_data1[:,0]\n data['y']=t_data1[:,1]\n data['h']=t_data1[:,2]\n data['nodexy']=t_data1[:,0:2]\n \n return data", "def read_header(fid):\r\n\r\n # Check 'magic number' at beginning of file to make sure this is an Intan\r\n # Technologies RHD2000 data file.\r\n magic_number, = struct.unpack('<I', fid.read(4)) \r\n if magic_number != int('c6912702', 16): raise Exception('Unrecognized file type.')\r\n\r\n header = {}\r\n # Read version number.\r\n version = {}\r\n (version['major'], version['minor']) = struct.unpack('<hh', fid.read(4)) \r\n header['version'] = version\r\n\r\n print('')\r\n print('Reading Intan Technologies RHD2000 Data File, Version {}.{}'.format(version['major'], version['minor']))\r\n print('')\r\n\r\n freq = {}\r\n\r\n # Read information of sampling rate and amplifier frequency settings.\r\n header['sample_rate'], = struct.unpack('<f', fid.read(4))\r\n (freq['dsp_enabled'], freq['actual_dsp_cutoff_frequency'], freq['actual_lower_bandwidth'], freq['actual_upper_bandwidth'], \r\n freq['desired_dsp_cutoff_frequency'], freq['desired_lower_bandwidth'], freq['desired_upper_bandwidth']) = struct.unpack('<hffffff', fid.read(26))\r\n\r\n\r\n # This tells us if a software 50/60 Hz notch filter was enabled during\r\n # the data acquisition.\r\n notch_filter_mode, = struct.unpack('<h', fid.read(2))\r\n header['notch_filter_frequency'] = 0\r\n if notch_filter_mode == 1:\r\n header['notch_filter_frequency'] = 50\r\n elif notch_filter_mode == 2:\r\n header['notch_filter_frequency'] = 60\r\n freq['notch_filter_frequency'] = header['notch_filter_frequency']\r\n\r\n (freq['desired_impedance_test_frequency'], freq['actual_impedance_test_frequency']) = struct.unpack('<ff', fid.read(8))\r\n\r\n note1 = read_qstring(fid)\r\n note2 = read_qstring(fid)\r\n note3 = read_qstring(fid)\r\n header['notes'] = { 'note1' : note1, 'note2' : note2, 'note3' : note3}\r\n\r\n # If data file is from GUI v1.1 or later, see if temperature sensor data was saved.\r\n header['num_temp_sensor_channels'] = 0\r\n if (version['major'] == 1 and version['minor'] >= 1) or (version['major'] > 1) :\r\n header['num_temp_sensor_channels'], = struct.unpack('<h', fid.read(2))\r\n \r\n # If data file is from GUI v1.3 or later, load eval board mode.\r\n header['eval_board_mode'] = 0\r\n if ((version['major'] == 1) and (version['minor'] >= 3)) or (version['major'] > 1) :\r\n header['eval_board_mode'], = struct.unpack('<h', fid.read(2))\r\n \r\n \r\n header['num_samples_per_data_block'] = 60\r\n # If data file is from v2.0 or later (Intan Recording Controller), load name of digital reference channel\r\n if (version['major'] > 1):\r\n header['reference_channel'] = read_qstring(fid)\r\n header['num_samples_per_data_block'] = 128\r\n\r\n # Place frequency-related information in data structure. (Note: much of this structure is set above)\r\n freq['amplifier_sample_rate'] = header['sample_rate']\r\n freq['aux_input_sample_rate'] = header['sample_rate'] / 4\r\n freq['supply_voltage_sample_rate'] = header['sample_rate'] / header['num_samples_per_data_block']\r\n freq['board_adc_sample_rate'] = header['sample_rate']\r\n freq['board_dig_in_sample_rate'] = header['sample_rate']\r\n\r\n header['frequency_parameters'] = freq\r\n\r\n # Create structure arrays for each type of data channel.\r\n header['spike_triggers'] = []\r\n header['amplifier_channels'] = []\r\n header['aux_input_channels'] = []\r\n header['supply_voltage_channels'] = []\r\n header['board_adc_channels'] = []\r\n header['board_dig_in_channels'] = []\r\n header['board_dig_out_channels'] = []\r\n\r\n # Read signal summary from data file header.\r\n\r\n number_of_signal_groups, = struct.unpack('<h', fid.read(2))\r\n print('n signal groups {}'.format(number_of_signal_groups))\r\n\r\n for signal_group in range(1, number_of_signal_groups + 1):\r\n signal_group_name = read_qstring(fid)\r\n signal_group_prefix = read_qstring(fid)\r\n (signal_group_enabled, signal_group_num_channels, signal_group_num_amp_channels) = struct.unpack('<hhh', fid.read(6))\r\n\r\n if (signal_group_num_channels > 0) and (signal_group_enabled > 0):\r\n for signal_channel in range(0, signal_group_num_channels):\r\n new_channel = {'port_name' : signal_group_name, 'port_prefix' : signal_group_prefix, 'port_number' : signal_group}\r\n new_channel['native_channel_name'] = read_qstring(fid)\r\n new_channel['custom_channel_name'] = read_qstring(fid)\r\n (new_channel['native_order'], new_channel['custom_order'], signal_type, channel_enabled, new_channel['chip_channel'], new_channel['board_stream']) = struct.unpack('<hhhhhh', fid.read(12))\r\n new_trigger_channel = {}\r\n (new_trigger_channel['voltage_trigger_mode'], new_trigger_channel['voltage_threshold'], new_trigger_channel['digital_trigger_channel'], new_trigger_channel['digital_edge_polarity']) = struct.unpack('<hhhh', fid.read(8))\r\n (new_channel['electrode_impedance_magnitude'], new_channel['electrode_impedance_phase']) = struct.unpack('<ff', fid.read(8))\r\n\r\n if channel_enabled:\r\n if signal_type == 0:\r\n header['amplifier_channels'].append(new_channel)\r\n header['spike_triggers'].append(new_trigger_channel)\r\n elif signal_type == 1:\r\n header['aux_input_channels'].append(new_channel)\r\n elif signal_type == 2:\r\n header['supply_voltage_channels'].append(new_channel)\r\n elif signal_type == 3:\r\n header['board_adc_channels'].append(new_channel)\r\n elif signal_type == 4:\r\n header['board_dig_in_channels'].append(new_channel)\r\n elif signal_type == 5:\r\n header['board_dig_out_channels'].append(new_channel)\r\n else:\r\n raise Exception('Unknown channel type.')\r\n \r\n # Summarize contents of data file.\r\n header['num_amplifier_channels'] = len(header['amplifier_channels'])\r\n header['num_aux_input_channels'] = len(header['aux_input_channels'])\r\n header['num_supply_voltage_channels'] = len(header['supply_voltage_channels'])\r\n header['num_board_adc_channels'] = len(header['board_adc_channels'])\r\n header['num_board_dig_in_channels'] = len(header['board_dig_in_channels'])\r\n header['num_board_dig_out_channels'] = len(header['board_dig_out_channels'])\r\n\r\n return header", "def _load_grdfile(casename=None):\n \n data={} \n\n if casename==None:\n print('_load_grdfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_grd.dat','r')\n except IOError:\n print('_load_grdfiles: invalid case name.')\n return data\n\n nodes_str=fp.readline().split('=')\n elements_str=fp.readline().split('=')\n nnodes=int(nodes_str[1])\n nele=int(elements_str[1])\n t_data1=np.genfromtxt(casename+'_grd.dat',skip_header=2, skip_footer=nnodes,dtype='int64')\n t_data2=np.genfromtxt(casename+'_grd.dat',skip_header=2+nele,dtype='float64')\n fp.close()\n\n data['nnodes']=nnodes\n data['nele']=nele\n data['nodexy']=t_data2[:,1:3]\n data['x']=t_data2[:,1]\n data['y']=t_data2[:,2]\n data['nv']=t_data1[:,1:4].astype(int)-1\n data['trigridxy'] = mplt.Triangulation(data['x'], data['y'],data['nv'])\n \n return data", "def _init_dataset():\n global _residues\n if _residues is not None:\n # Database is already initialized\n return\n\n # Residuue data is taken from\n # ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif\n # (2019/01/27)\n _info_dir = dirname(realpath(__file__))\n with open(join(_info_dir, \"residues.msgpack\"), \"rb\") as file:\n _residues = msgpack.unpack(\n file, use_list=False, raw=False\n )", "def _load_llfiles(casename=None):\n\n data={}\n \n if casename==None:\n print('_load_llfiles requires a filename to load.')\n return\n try:\n fp=open(casename+'_long.dat','r')\n except IOError:\n print('_load_llfiles: long file is invalid.')\n return data\n\n lon=np.genfromtxt(casename+'_long.dat')\n fp.close()\n\n try:\n fp=open(casename+'_lat.dat','r')\n except IOError:\n print('_load_llfiles: lat file is invalid.')\n return data\n\n lat=np.genfromtxt(casename+'_lat.dat')\n fp.close()\n\n data['nodell']=np.vstack([lon,lat]).T\n data['lat']=lat\n data['lon']=lon\n \n return data", "def ReadData( fName = '/tmp/chartdata' ):\n blocks = common.ReadDataFromFile( fName )\n\n return blocks", "def _load_dict(infile):\n\n # read the data into a list\n data = []\n\n # open the file\n f = open(infile)\n\n for line in f:\n # ignore hashed lines\n if not line.startswith('#') and not line.startswith('@'):\n\n # mind to strip newlines\n data.append(line.strip('\\n\\r').split('\\t'))\n \n # create the dictionary in which the data will be stored\n d = {}\n\n # check for first line, if a local ID is given in the header (or simply\n # \"ID\"), take this line as the ID, otherwise create it\n if data[0][0].lower() in ['local_id','localid']:\n local_id = True\n else:\n local_id = False\n\n # iterate over data and fill the dictionary (a bit inefficient, but enough\n # for the moment)\n i = 1\n for line in data[1:]:\n if local_id:\n d[int(line[0])] = line[1:]\n else:\n d[i] = line\n i += 1\n\n # assign the header to d[0]\n if local_id:\n d[0] = [x.lower() for x in data[0][1:]]\n else:\n d[0] = [x.lower() for x in data[0]]\n\n # return the stuff\n return d", "def getHeaders(self):\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tif len(self.line) == 7:\n\t\t\tself.header.kod = self.line[0]\n\t\t\tself.header.ver = self.line[1]\n\t\t\tpID_date = self.line[2]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_date)\n\t\t\tpID_time = self.line[3]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_time)\n\t\t\tself.header.knod = int(self.line[4])\n\t\t\tself.header.nps = int(self.line[5])\n\t\t\tself.header.rnr = int(self.line[6])\n\t\telif len(self.line) == 3:\n\t\t\tself.header.knod = int(self.line[0])\n\t\t\tself.header.nps = int(self.line[1])\n\t\t\tself.header.rnr = int(self.line[2])\n\t\t\t\n\n\t\tself.header.title = self.mctalFile.readline().strip()\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tself.header.ntal = int(self.line[1])\n\n\t\tif self.header.ntal == 0:\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mNo tallies in this MCTAL file. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tif len(self.line) == 4:\n\t\t\tself.header.npert = int(self.line[3])\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mMCTAL file with perturbation card. Not supported. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\twhile self.line[0].lower() != \"tally\":\n\t\t\tfor l in self.line: self.header.ntals = np.append(self.header.ntals,int(l))\n\t\t\tself.line = self.mctalFile.readline().split()", "def getLoadData(self):\n\n\t\t# Variables\n\t\turl = 'http://mis.nyiso.com/public/dss/nyiso_loads.csv' # Url with the data\n\t\tresponse = urllib2.urlopen(url) # Reading url\n\t\tload_data = csv.reader(response) # Convering data to csv format\n\t\tyear = self.helper.getYear() # Current Year\n\t\thourly_loads = [] # Stores the loads per hour\n\t\tdaily_loads = {} # Stores the loads per hour of a given day\n\t\tmonthly_loads = {} # Stores the loads per day of a given month\n\t\tyearly_loads = {} # Stores the monthly loads in a year\n\n\t\t# Converting data from csv to dictionary\n\t\tfor row in load_data:\n\n\t\t\t# Ignoring first row\n\t\t\tif row[1] != \"Month\" and row[2] != \"Day\" and row[3] != 'Hr1':\n\t\t\t\tmonth = int(row[1])\n\t\t\t\tday = int(row[2])\n\n\t\t\t\t# Getting hourly loads\n\t\t\t\tfor i in range(3,27):\n\t\t\t\t\ttry:\n\t\t\t\t\t\thourly_loads.append(int(row[i]))\n\t\t\t\t\t# If there is an error reading the load then generate a \n\t\t\t\t\t# random load value between 15000 and 25000\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tpass\n\t\t\t\t\t\thourly_loads.append((randint(15000,25000)))\n\t\t\t\tdaily_loads[day] = hourly_loads\n\t\t\t\thourly_loads = []\n\t\t\t\tmonthly_loads[month] = daily_loads\n\t\t\t\tif self.helper.isEndOfMonth(month, day):\n\t\t\t\t\tdaily_loads = {}\n\n\t\tyearly_loads[year] = monthly_loads\n\n\t\treturn yearly_loads", "def load_data():\n if _LOCATIONS_BY_ID:\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID\n\n # We need to read the locations in order of country -> admin level 1 -> admin level 2 -> city.\n # This is so that the higher resolution locations can look up the lower resolution locations\n # that they belong to, and compute the necessary fields.\n countries_by_code = _load_country_data(_DATA_FILES['country'])\n admin1_by_code = _load_admin1_data(_DATA_FILES['admin_1'], countries_by_code)\n admin2_by_code = _load_admin2_data(_DATA_FILES['admin_2'], countries_by_code, admin1_by_code)\n _load_city_data(_DATA_FILES['city'], countries_by_code, admin1_by_code, admin2_by_code)\n _add_alternate_names(_DATA_FILES['alt_wiki_names'])\n _add_estimated_importances(_DATA_FILES['estimated_importance'])\n\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID", "def load_data(path, kmer=False, rg=True, clip=True, rna=True, go=False, motif= True, seq = True, oli = False, test = False):\n\n data = dict()\n if go: data[\"X_GO\"] = np.loadtxt(gzip.open(os.path.join(path,\n \"matrix_GeneOntology.tab.gz\")),\n skiprows=1)\n if kmer: data[\"X_KMER\"] = np.loadtxt(gzip.open(os.path.join(path,\n \"matrix_RNAkmers.tab.gz\")),\n skiprows=1)\n if rg: data[\"X_RG\"] = np.loadtxt(gzip.open(os.path.join(path,\n \"matrix_RegionType.tab.gz\")),\n skiprows=1)\n if clip: data[\"X_CLIP\"] = np.loadtxt(gzip.open(os.path.join(path,\n \"matrix_Cobinding.tab.gz\")),\n skiprows=1)\n if rna: data[\"X_RNA\"] = np.loadtxt(gzip.open(os.path.join(path,\n \"matrix_RNAfold.tab.gz\")),\n skiprows=1)\n if motif: data[\"motif\"] = np.loadtxt(gzip.open(os.path.join(path, 'motif_fea.gz'))\n , skiprows=1, usecols=range(1,103))\n if seq: data[\"seq\"] = read_seq(os.path.join(path, 'sequences.fa.gz'))\n if oli: data[\"oli\"] = read_oli_feature(os.path.join(path, 'sequences.fa.gz'))\n if test:\n data[\"Y\"] = []\n else: \n data[\"Y\"] = np.loadtxt(gzip.open(os.path.join(path,\n \"matrix_Response.tab.gz\")),\n skiprows=1)\n #data[\"Y\"] = data[\"Y\"].reshape((len(data[\"Y\"]), 1))\n\n return data", "def parse_data(fp):\n pass", "def read(self, simtype):\n\n if simtype == 'original':\n if self.filename.endswith(\".pkl\"):\n logging.debug(\"Loading pickle file %s\", self.filename)\n data = pd.read_pickle(self.filename)\n\n elif self.filename.endswith(\".hdf5\"):\n logging.debug(\"Loading HDF5 file %s\", self.filename)\n with h5py.File(self.filename, \"r\") as data_file:\n #print('treeIndex', data_file[\"treeIndex\"].keys())\n #print('haloTrees', data_file[\"haloTrees\"].keys())\n \n # Find dimensionality of keys\n columns_1dim = [] \n columns_2dim = [] \n for column in self.columns:\n if len(data_file[\"/haloTrees/%s\" % column].shape) == 1:\n columns_1dim.append(column)\n else:\n columns_2dim.append(column)\n \n # 1D keys\n data = pd.DataFrame(\n {\n column: data_file[\"/haloTrees/%s\" % column].value\n for column in columns_1dim\n },\n columns=columns_1dim\n ).set_index(\"nodeIndex\")\n del columns_1dim\n\n # 2D keys\n for column in columns_2dim:\n if column == 'position':\n pos = data_file[\"/haloTrees/%s\" % column].value\n data['X'] = pd.Series(pos[:, 0], index=data.index)\n data['Y'] = pd.Series(pos[:, 1], index=data.index)\n data['Z'] = pd.Series(pos[:, 2], index=data.index)\n del columns_2dim\n\n data.rename(index=str,\n columns={\"snapshotNumber\": \"snapnum\"})\n ## eliminate fake elements with isIntegrated=1\n #data = data[data.isInterpolated != 1]\n\n else:\n raise TypeError(\"Unknown filetype %s\" % self.filename)\n if simtype == 'EAGLE':\n if self.filename.endswith(\".pkl\"):\n logging.debug(\"Loading pickle file %s\", self.filename)\n data = pd.read_pickle(self.filename)\n\n elif self.filename.endswith(\".hdf5\"):\n logging.debug(\"Loading HDF5 file %s\", self.filename)\n data_file = h5py.File(self.filename, 'r')\n column_mt = []\n column_sh = []\n for column in self.columns:\n if column in data_file['MergerTree']:\n column_mt.append(column)\n else:\n column_sh.append(column)\n\n data = pd.DataFrame(\n {\n column: data_file[\"/MergerTree/%s\" % column].value\n for column in column_mt\n },\n columns=column_mt\n ).set_index(\"HaloID\")\n #.set_index(data_file[\"/Merger/HaloID\"].value)\n\n for column in column_sh:\n data[column] = pd.Series(data_file[\"/Subhalo/%s\" % column].value,\n index=data.index)\n data = data.rename(index=str,\n columns={\"SnapNum\": \"snapnum\", #\"HaloID\": \"nodeIndex\",\n \"DescendantID\" : \"descendantIndex\"})\n else:\n raise TypeError(\"Unknown filetype %s\" % self.filename)\n\n return data", "def loader():\n bucket = data_load_variables[\"bucket\"]\n\n if data_load_variables[\"use_lite_dataset\"]:\n dataset_name = data_load_variables[\"lite_dataset_name\"]\n else:\n dataset_name = data_load_variables[\"dataset_name\"]\n\n s3 = boto3.client('s3')\n\n obj = s3.get_object(Bucket=bucket, Key=dataset_name)\n # get object and file (key) from bucket\n\n df = pd.read_csv(obj['Body'])\n return df", "def data(self) -> Tuple[List[str], List[List[str]]]:\n format = self.format\n # Check if the file contains header information. Initialize the header\n # with the optional names of columns in the format descriptor.\n has_header = format.get('header', True)\n columns = format.get('columns')\n rows = list()\n # Delimiter depends on the file format.\n delim = '\\t' if format['type'] == 'tsv' else ','\n f = codecs.iterdecode(self.load().open(), 'utf-8')\n for row in csv.reader(f, delimiter=delim):\n if has_header:\n # Set the has_header flag to False so that all following records\n # are added to the list of rows.\n has_header = False\n columns = row if columns is None else columns\n else:\n rows.append(row)\n columns = [None] * len(rows[0]) if not columns and rows else columns\n return (columns, rows)", "def testLoadData(self):\n data = load_covid_data(file)\n assert type(data).__name__ == 'dict'", "def load_data():\n # Dictionary mapping image names to labels\n image_name_to_label = dict()\n\n # Store labels associated with image names\n notifier.send(\" Reading metadata...\")\n with open(\"data/metadata.csv\") as file: # Original dataset\n # Use images for normal, virus (unknown type), COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"Label\"].lower() == \"normal\":\n label = 2\n elif row[\"Label_2_Virus_category\"].lower() == \"covid-19\":\n label = 0\n elif row[\"Label_1_Virus_category\"].lower() == \"virus\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"X_ray_image_name\"]] = label\n with open(\"data/metadata2.csv\") as file: # GitHub dataset\n # Use COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"filename\"] in image_name_to_label: # Image already added\n continue\n if \"covid-19\" in row[\"finding\"].lower():\n label = 0\n elif row[\"finding\"].lower() == \"sars\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"filename\"]] = label\n with open(\"data/metadata_COVID-19.csv\") as file: # Additional COVID-19 images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"COVID-19/\" + row[\"FILE NAME\"] + \".\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 0\n with open(\"data/metadata_ViralPneumonia.csv\") as file: # Additional virus images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"ViralPneumonia/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 1\n with open(\"data/metadata_Normal.csv\") as file: # Additional normal images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"Normal/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 2\n\n notifier.send(\" Loading images...\")\n images, labels = load_images(image_name_to_label)\n\n notifier.send(\" Splitting data...\")\n return split_data(images, labels)", "def load_data(filename):\r\n with open(filename,'rb') as f:\r\n data = pk.load(f,encoding='bytes')\r\n return data[b'data'],data[b'labels']", "def readData(games_data_filename, players_data_filename):\n\tgame_stats = readGameFile(games_data_filename)\n\tgame_stats, team_stats, player_stats = readPlayerFileAndFillStats(players_data_filename, game_stats)\n\treturn game_stats, team_stats, player_stats", "def read_data(filename, prefix=None):\n p_data = {}\n with open(filename) as f:\n # This first line is the header for the entire file.\n line = f.next()\n line = line.strip()\n # prev_line = line\n top_header = line.split(',')\n if not top_header:\n # Don't parse this for now.\n pass\n # Now read in per-participant data.\n while True:\n word_list = []\n all_words_data = {}\n # The first line for the participant is a header.\n try:\n line = f.next()\n except StopIteration:\n # We had previously read everything, so we're done.\n break\n line = line.strip()\n p_header = line.split(',')\n\n # The participant's ID # comes first.\n p_id = p_header[0]\n if not p_id:\n # This happens when the previous participant didn't answer.\n \"\"\"\n print 'previous line:', prev_line\n print 'current line:', line\n print 'p header:', p_header\n print\n \"\"\"\n continue\n if prefix:\n p_id = prefix + p_id\n # print 'SN #', p_id\n # The number of N/A's this p is at 28.\n try:\n p_nas = int(p_header[28])\n except ValueError:\n # This happens when an RA messes up the file.\n \"\"\"\n print 'nas: previous line:', prev_line\n print 'nas: current line:', line\n print 'nas: p header:', p_header\n print\n \"\"\"\n raise\n # print \"NA's: #\", p_nas\n # Check if this participant left everything blank.\n # XXX: Have to hard-code this.\n if p_nas == 20:\n \"\"\"Don't record anything.\n p_data[p_id] = {'words': None,\n 'word_data': None,\n 'nas': None,\n 'overall': None}\n \"\"\"\n continue\n # The next line after the header has both the data\n # for the first word and overall statistics.\n # prev_line = line\n try:\n line = f.next()\n except StopIteration:\n # We had previously read everything, so we're done.\n break\n line = line.strip()\n word, word_data, overall_data = parse_first_line(line.split(','))\n word_list.append(word)\n all_words_data[word] = word_data\n # Now read data for the rest of the words.\n for line in f:\n line = line.strip()\n word, word_data = parse_data_lines(line.split(','))\n if word == '':\n \"\"\"\n print \"loop's previous line:\", prev_line\n print \"loop's current line:\", line\n print\n \"\"\"\n # prev_line = line\n break\n word_list.append(word)\n all_words_data[word] = word_data\n # prev_line = line\n # Compute per-word averages\n all_total_avg, future_total_avg, past_total_avg = \\\n datacomputer.compute_all_future_past(all_words_data)\n overall_data['all'] = all_total_avg\n overall_data['future'] = future_total_avg\n overall_data['past'] = past_total_avg\n p_data[p_id] = {'words': word_list,\n 'word_data': all_words_data,\n 'nas': p_nas,\n 'overall': overall_data}\n # print 'p_data'\n # print p_data[p_id]\n # print\n print \"Processed {} participants' data\".format(len(p_data))\n return p_data", "def load(self):\n if self.loaded or not self.has_data:\n return self.data\n if self.filename:\n try:\n with lzma.open(os.path.join('resources', self.game, 'dumps', self.filename), 'rb') as df:\n return self.load_from_open_file(df)\n except Exception as e:\n return ['ERROR! Could not load data: {}'.format(str(e))]", "def open_file(path):\r\n f = open(path, encoding='utf-8', errors='ignore')\r\n data = f.readlines()\r\n lst_with_data = []\r\n for i in data:\r\n i = i.replace('\"', ' ').replace(\"\\t\", ' ').replace(\"\\n\", \" \").replace(\"'\", ' ').split(' ')\r\n lst_with_data.append(i)\r\n res_lst = [] \r\n for i in lst_with_data:\r\n append_lst = []\r\n for j in i:\r\n if j.isdigit() or j == \"-\":\r\n append_lst.append(j) \r\n if len(append_lst) != 0: \r\n res_lst.append(append_lst) \r\n res_lst = res_lst[1:]\r\n res = [] \r\n for i in res_lst:\r\n if len(i) != len(res_lst[0]):\r\n i = i[1:]\r\n res.append(i) \r\n else:\r\n res.append(i) \r\n ln = len(res[0])\r\n data_by_years = []\r\n for i in range(ln):\r\n data_y = []\r\n for j in res:\r\n data_y.append(j[i])\r\n data_by_years.append(data_y) \r\n dict_by_years = {}\r\n dict_with_total = file_with_total_inform(\"Total_Lviv.csv\")\r\n for i in data_by_years:\r\n dict_by_years[int(i[0])] = causes(i)\r\n dict_by_years[int(i[0])].update({\"Total\": dict_with_total[i[0]]})\r\n res_dict = {}\r\n res_dict[\"Lviv\"] = dict_by_years \r\n return res_dict", "def load_data(self):\n raise NotImplementedError()", "def load_data():\n with open('data.pickle', 'rb') as f:\n data = pickle.load(f)\n return data", "def parse(self):\n\n\t\t# Open and parse the file\n\t\twith open(self.name, 'r') as fdi:\n\t\t\tfor line in fdi:\n\t\t\t\twords = [word for word in line.split(' ') if word != ' ' and word != ':']\n\t\t\t\t\n\t\t\t\t# Store the data in the hash\n\t\t\t\tself.data_hash[int(words[0], 16)] = int(words[1])\n\n\t\t# Sort the dictionary by addresses\n\t\tself.data_hash = od(sorted(self.data_hash.items(), key = lambda t : t[0]))\n\n\t\tprint 'Total Bytes :', float(sum(self.data_hash.values())) / 1024 / 1024\n\n\t\treturn", "def read(self):\n dictionary = {}\n with open(self.path) as file:\n key_header = \"\"\n for line in file:\n entry = line.strip().split()\n if len(entry) == 0:\n continue\n if len(entry) == 1:\n key_header = entry[0]+\"_\"\n else:\n key = entry[0].strip()\n value = reduce(lambda x1, y1: x1+\" \" + y1, entry[1:])\n dictionary[key_header+key] = value\n return dictionary", "def pareHeader(headerFile,Ldontcares=['GData','BiasCoeff','headerFile','y_m_d','TimeZero']):\n reload(chd) # KEN SCOPE ISSUE?\n dHeader = chd.main(['headerFile=' + headerFile])\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d','TimeZero']\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d']\n for k in Ldontcares:\n del dHeader[k]\n dataFile = split(headerFile,'.header')[0] # toss extension\n return dHeader,dataFile" ]
[ "0.6701253", "0.64370406", "0.63576305", "0.6343084", "0.62742215", "0.62503767", "0.62353116", "0.62206954", "0.6175295", "0.61426353", "0.61399317", "0.61399025", "0.6133964", "0.61294085", "0.60999405", "0.60984147", "0.60699123", "0.60694903", "0.6038092", "0.60380447", "0.6034107", "0.602608", "0.60260534", "0.602335", "0.60104823", "0.60061836", "0.6003519", "0.59970254", "0.5994989", "0.5985927", "0.5979812", "0.5935019", "0.592662", "0.5921183", "0.5918315", "0.5902792", "0.590029", "0.58975923", "0.5896617", "0.5871036", "0.586309", "0.58600944", "0.5849301", "0.5848995", "0.584765", "0.58437186", "0.5837779", "0.58365935", "0.5835668", "0.5820947", "0.5812335", "0.57944334", "0.57912", "0.5789585", "0.5781939", "0.5746539", "0.57459694", "0.5745142", "0.57414705", "0.57357305", "0.57240546", "0.57203954", "0.5716972", "0.571567", "0.57086205", "0.5708082", "0.57074183", "0.5707203", "0.5705895", "0.57053596", "0.5696804", "0.56863", "0.56837064", "0.56734926", "0.56712127", "0.5665822", "0.5665542", "0.56614363", "0.5657401", "0.56511956", "0.56478524", "0.5642901", "0.564116", "0.56392276", "0.5638299", "0.5634789", "0.563353", "0.5630469", "0.5628759", "0.5619358", "0.56151634", "0.56125504", "0.56112003", "0.5607969", "0.56026936", "0.5598534", "0.5595941", "0.55869704", "0.5586534", "0.5582701" ]
0.58532524
42
Factory method to create a cache object from github/spilchen/baseball_id_db This is called as part of package initialization and so can be refered to via the Lookup variable. >>> from baseball_id import Lookup >>> Lookup.from_yahoo_ids([10794, 9542, 7578])
def create(cls): ssl._create_default_https_context = ssl._create_unverified_context c = lookup.Cache('https://raw.githubusercontent.com/spilchen/baseball_id_db/main/master.csv') return c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_fake(cls):\n source = pkg_resources.open_text('baseball_id', 'sample.master.csv',\n encoding='iso-8859-1')\n c = lookup.Cache(source)\n return c", "def construct(cls, obs_lists, platform_id):\n step = 0\n LookupTable = []\n while step < obs_lists.shape[0]:\n K = str(int(obs_lists[step, 0]))\n LookupTable.append(BaseCreateFactory(K, platform_id).create_object())\n step += 1\n return LookupTable", "def load_by_ids(cls,ids):\n if not ids or ids[0] == '':\n return None\n es = from_caches(ids) #(ids,'SuiBook') as prefixed\n notfounds = filter(lambda e:e not in es, ids)\n if len(notfounds)>0:\n es2 = dict((str(e.key().id()),e) for e in SuiBook.get_by_id(map(lambda e:int(e),notfounds)) if e)\n to_caches(es2) #to_caches(dict(),time,key_prefix='SuiBook')\n es.update(es2)\n return es", "def __init__(self, simplecache=None, kodidb=None):\n\n if not kodidb:\n from kodidb import KodiDb\n self.kodidb = KodiDb()\n else:\n self.kodidb = kodidb\n\n if not simplecache:\n from simplecache import SimpleCache\n self.cache = SimpleCache()\n else:\n self.cache = simplecache", "def init():\n database = \"database.pkl\"\n\n onsite_bills = BillID(database)\n online_bills = BillID(database)\n\n return onsite_bills, online_bills", "def load_by_ids(cls,ids):\n es = from_caches(ids) #some are loaded from memcache, others are ignored.\n notfounds = filter(lambda e:e not in es, ids)\n if len(notfounds)>0:\n es2 = dict((str(e.key().id()),e) for e in SuiGoods.get_by_id(map(lambda e:int(e),notfounds)))\n to_caches(es2)\n es.update(es2)\n return es", "def seek_by_id(cls,id):\n bk = from_caches('%s'%id)\n if not bk:\n bk = SuiBook.get_by_id(int(id))\n if bk:\n to_cache('%s'%id, bk)\n return bk", "def __init__(self, *args, **kw):\n # kw['strIdent'] = DBCAT\n BaseDB.__init__(self, *args, **kw)\n # cache by project name as key and project Id as value\n self._gbl_projectid_cache = {}", "def symbol_factory(self, id, bp=0):\n try:\n s = self.sym[id] # if already in table don't do anything. Memoized\n except KeyError as e:\n\n class s(BaseSymbol): \n # create appropriate symbol class at run time\n pass\n\n s.__name__ = \"sym(\" + id + \")\"\n s.id = id\n s.lbp = bp\n s.parent = self\n self.sym[id] = s\n else:\n s.lbp = max(bp, s.lbp)\n return s # NOTE: This function does not returns an object. It returns the class", "def _cache(item_label, item_list):\n id_label = item_label + '_id'\n mbid_label = item_label + '_mbid'\n echonest_id_label = item_label + '_echonest_id'\n items = {}\n for item in item_list:\n key = '/%s/%s' % (item_label, item[id_label])\n items[key] = item\n musicbrainz_id = item.get(mbid_label, None)\n if musicbrainz_id:\n items['/musicbrainz/%s/%s' % (item_label, musicbrainz_id)] = key\n # echonest_id = item.get(echonest_id_label, None)\n # if echonest_id:\n # items['/echonest/%s/%s' % (item_label, echonest_id)] = key\n application.config.get('CACHE').set_many(items)", "def create_db_from_cache():\n with open('matches.cache', 'rb') as f:\n matches = pickle.load(f)\n\n Base.metadata.create_all(engine)\n match_loader(matches)", "def __new__(cls, *args, **kwargs):\n if cls.__instance is None:\n cls.__instance = super(CacheManagerSingleton, cls).__new__(cls)\n # Generate all ground truth data files from hard-coded data\n CacheManagerSingleton.export_all_ground_truth_data()\n return cls.__instance", "def _from_db_object_list(db_objects, cls, context):\n return [Boar._from_db_object(cls(context), obj)\n for obj in db_objects]", "def lookup(cls, _db, short_name):\n def _lookup():\n library = get_one(_db, Library, short_name=short_name)\n return library, False\n library, is_new = cls.by_cache_key(_db, short_name, _lookup)\n return library", "def make_library_cache(prefix):\n # avoid cache prefix reuse\n assert prefix not in _lib_cache_prefixes\n _lib_cache_prefixes.add(prefix)\n\n class CustomCodeLibraryCacheImpl(CodeLibraryCacheImpl):\n _filename_prefix = prefix\n\n class LibraryCache(Cache):\n \"\"\"\n Implements Cache that saves and loads CodeLibrary objects for additional\n feature for the specified python function.\n \"\"\"\n _impl_class = CustomCodeLibraryCacheImpl\n\n return LibraryCache", "def __init__(self):\n if Config.USEMEMCACHED is True:\n self.mc = MCache(server = Config.MEMCACHED_SERVER,\n username = Config.MEMCACHED_USERNAME,\n password = Config.MEMCACHED_PASSWORD)\n else:\n self.mc = None\n self.api = DozensApi()", "def make_crypto_db():\n threading.Timer(3600, make_crypto_db).start()\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/?limit=0')\n all_coins = req.json()\n for coins in all_coins:\n name_id_map[coins['name'].lower()] = coins['id']\n symbol_id_map[coins['symbol'].lower()] = coins['id']", "def load_srumid_lookups(database):\n id_lookup = {}\n #Note columns 0 = Type, 1 = Index, 2 = Value\n lookup_table = database.get_table_by_name('SruDbIdMapTable')\n column_lookup = dict([(x.name,index) for index,x in enumerate(lookup_table.columns)]) \n for rec_entry_num in range(lookup_table.number_of_records):\n bin_blob = smart_retrieve(lookup_table,rec_entry_num, column_lookup['IdBlob'])\n if smart_retrieve(lookup_table,rec_entry_num, column_lookup['IdType'])==3:\n bin_blob = BinarySIDtoStringSID(bin_blob)\n elif not bin_blob == \"Empty\":\n bin_blob = blob_to_string(bin_blob)\n id_lookup[smart_retrieve(lookup_table,rec_entry_num, column_lookup['IdIndex'])] = bin_blob\n return id_lookup", "def make_cache_keys(self, identifiers):\n\n raise NotImplementedError", "def test_multidb_fetch_by_id(self):\r\n with self.settings(FETCH_BY_ID=True):\r\n assert Addon.objects.get(id=1).from_cache is False\r\n assert Addon.objects.get(id=1).from_cache is True\r\n\r\n from_slave = Addon.objects.using('slave').get(id=1)\r\n assert from_slave.from_cache is False\r\n assert from_slave._state.db == 'slave'", "def __init__(self, obj):\n self.obj = obj\n self._pkcache = {}\n self._idcache = obj.__class__.__instance_cache__\n self._typecache = defaultdict(dict)\n self.init()", "def from_cache(self, cache_key=None, pk=None):\n if pk:\n cache_key = self._cache_key(pk)\n # if cache_key is none, the mangler will generate a MD5 from the query\n return FromCache(self.label, cache_key)", "def __init__(self, cachefile=None):\n self.cache = dict()\n self.sites = []\n if cachefile:\n try:\n with open(cachefile, 'rb') as cf:\n saved_sites = pickle.load(cf)\n for sitename, popularity, latency, content in saved_sites:\n if content is None: continue\n self.cache_site(sitename, popularity, content, latency)\n except Exception as e:\n print('Failed to open cachefile \"{}\": {}'.format(cachefile, e), file=sys.stderr)", "def from_list(cls, ticker_list, start, end, get_ohlcv=False,\n get_fundamentals=False):\n\n if get_fundamentals:\n cls._init_spiders(ticker_list=ticker_list, start_date=start,\n end_date=end)\n\n with db.transactional_session() as session:\n for ticker in ticker_list:\n session.add(cls(ticker=ticker, start_date=start, end_date=end,\n get_ohlcv=get_ohlcv,\n get_fundamentals=get_fundamentals))", "def getJsonDbFactory(emailOptions, perfToTrackOptions, JsonableRecordClass, savedFilesTracker):\n keyFunc = lambda x: ((-1 if perfToTrackOptions.isLargerBetter else 1)*getattr(x,perfToTrackOptions.perfAttrName))\n JsonableRecordsHolderClass = jsondb.getSortedJsonableRecordsHolderClass(keyFunc=keyFunc); \n #the metadata callbacks are: print if there's a new best, and also save\n #the best performing model.\n metadataCallbacks = [getPrintIfNewBestCallback()];\n if emailOptions is not None and emailOptions.emailMode in [EmailModes.allEmails, EmailModes.errorsAndNewBest]:\n metadataCallbacks.append(getEmailIfNewBestCallback(emailOptions, perfToTrackOptions));\n callbacks_beforeAdd = [ renameFilesWithRecordNumberCallback(savedFilesTracker)\n , getSaveBestFilesCallback(perfToTrackOptions, savedFilesTracker)\n , getSaveSomeFilesCallback(perfToTrackOptions, savedFilesTracker)];\n callbacks_afterAdd = [getPrintAddedRecordCallback()]\n if (emailOptions is not None and emailOptions.emailMode in [EmailModes.allEmails]):\n callbacks_afterAdd.append(getEmailRecordAddedCallback(emailOptions)); \n\n MetadataClass = jsondb.getUpdateValsMetadataClass(\n [jsondb.MetadataUpdateInfo(\n metadataAttrName=perfToTrackOptions.perfAttrName\n ,recordAttrName=perfToTrackOptions.perfAttrName\n ,updateFunc=getBestUpdateFunc(\n isLargerBetter=perfToTrackOptions.isLargerBetter\n ,metadataCallbacks=metadataCallbacks)\n ,initVal=None)\n ,jsondb.NumRecordsMetadataUpdateInfo]\n ,[RunTrackerMetadataFields.bestPerfSavedFiles]); \n jsonDbFactory = jsondb.JsonDb.getFactory(JsonableRecordClass=JsonableRecordClass\n ,JsonableRecordsHolderClass=JsonableRecordsHolderClass\n ,MetadataClass=MetadataClass\n ,callbacks_beforeAdd=callbacks_beforeAdd\n ,callbacks_afterAdd=callbacks_afterAdd); \n return jsonDbFactory;", "def fake_db() -> Callable[[None], FakeRedis]:\n @lru_cache\n def wrapper() -> FakeRedis:\n db = FakeRedis(decode_responses=True)\n return db\n\n return wrapper", "def fetch_objects(cache_key_f, get_database_f, item_keys):\r\n item_key_to_item = get_many_by_key(cache_key_f, item_keys)\r\n \r\n for item_key in item_keys:\r\n if item_key not in item_key_to_item:\r\n # failed to get the item from the cache\r\n try:\r\n # have to get each item individually to cache the query\r\n item = get_database_f(item_key)\r\n item_key_to_item[item_key] = item\r\n except ObjectDoesNotExist:\r\n pass\r\n \r\n return item_key_to_item", "def __init__(self,db,tables=[]):\n #{{{ Load class and test databases\n self.dbcentral = db\n self.tables = tables\n self.debug = config.debug\n self.null_vals = defaultdict(lambda: defaultdict(dict))\n\n \"\"\"\n Load values from databases\n \"\"\"\n self._get_nulls()", "def create_proxy_dict() -> DictProxy:\n manager = new_manager()\n cache_ids = manager.dict() # type: DictProxy\n return cache_ids", "def cache(cls):\n return Cache(cls, cls.cache_regions, cls.cache_label)", "def __init__(self, db: Database, r_db: Redis) -> None:\n super().__init__(db, r_db)\n self.todos_repo = TodosRepository(db, r_db)\n self.tasks_repo = TasksRepository(db, r_db)", "def get_from_cache(item_id, memory_cache, db_cache_table, db_cache_table_id_field=\"id\"):\n # try to retrieve from local in-memory cache\n rv, expires_at = memory_cache.get(item_id, (None, 0))\n if expires_at > expiry:\n return rv\n\n # try to retrieve from database cache\n if hasattr(flask.current_app, \"db\"): # we don't have db in startup\n with flask.current_app.db.session as session:\n cache = (\n session.query(db_cache_table)\n .filter(\n getattr(db_cache_table, db_cache_table_id_field, None) == item_id\n )\n .first()\n )\n if cache and cache.expires_at and cache.expires_at > expiry:\n rv = dict(cache)\n\n # store in memory cache\n memory_cache[item_id] = rv, cache.expires_at\n return rv", "def create_new_tickers(tick_scrape):\n #Check if ticker exists, if not add it to the ticker table\n tick_db = sqlaq_to_df(ticker.fetch())\n #add the id to the tick_ftse table\n new_tick = pd.merge(\n tick_scrape,\n tick_db[[\"id\",\"ticker\"]],\n on=[\"ticker\"],\n how=\"left\"\n )\n #find tickers which don't exist\n new_tick = new_tick[new_tick.id.isnull()]\n logger.info(f\"{new_tick.shape[0]} items to add to ticker\")\n #add to db\n ticker.add_df(new_tick)\n #fetch updated table\n tick_db = sqlaq_to_df(ticker.fetch())\n return tick_db", "def __init__(self,lookup_data_file=None,lookup_data_fp=None,\n cols=(0,3), major_delimiter='\\t',minor_delimiter=',',\n null_ids=('NA')):\n # Initialise\n self.__lookup = {}\n self.__reverse_lookup = {}\n # Open file and read in data\n if lookup_data_fp is None:\n fp = io.open(lookup_data_file,'rt')\n else:\n fp = lookup_data_fp\n for line in fp:\n # Split into columns on major delimiter\n data = line.strip().split(major_delimiter)\n # Get the data\n key = data[cols[0]]\n # Split again on minor delimiter\n values = []\n if minor_delimiter:\n for item in data[cols[1]].strip().split(minor_delimiter):\n values.append(item)\n else:\n values.append(data[cols[1]])\n for value in values:\n # Check for 'null' values\n if value in null_ids:\n continue\n # Store the data\n try:\n self.__lookup[key].append(value)\n except KeyError:\n self.__lookup[key] = [value]\n try:\n self.__reverse_lookup[value].append(key)\n except KeyError:\n self.__reverse_lookup[value] = [key]\n # Finished - close the file\n if lookup_data_fp is None:\n fp.close()", "def __init__(self, dbname='', client=None, client_args={}):\n assert safechar_re.match(dbname)\n if client is None:\n client = redis.Redis(**client_args)\n self.client = client\n self.schema = schema.Schema()\n self.dbprefix = dbname + ':'\n self.cache_timeout = 1000000 # Number of seconds cached items are kept", "def test_multidb_cache(self):\r\n assert Addon.objects.get(id=1).from_cache is False\r\n assert Addon.objects.get(id=1).from_cache is True\r\n\r\n from_slave = Addon.objects.using('slave').get(id=1)\r\n assert from_slave.from_cache is False\r\n assert from_slave._state.db == 'slave'", "def __init__(self):\n self.lookup = {}", "def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None):\r\n self._db = None\r\n self._value = None\r\n self.last_value = None\r\n self.domain_name = domain_name\r\n self.id = id\r\n if self.id == None:\r\n import uuid\r\n self.id = str(uuid.uuid4())\r\n if init_val == None:\r\n init_val = fnc(init_val)\r\n self.val = init_val\r\n\r\n self.item_type = type(fnc(None))\r\n self.timestamp = None\r\n # Allow us to pass in a full name to a function\r\n if type(fnc) == str:\r\n from boto.utils import find_class\r\n fnc = find_class(fnc)\r\n self.fnc = fnc", "def _cache(self):\n return self._class(self.client_servers, **self._options)", "def bdb_init_hash(db_file, cache_size=None):\n\tdb_dir = dirname(db_file)\n\tif not isdir(db_dir):\n\t\tmakedirs(db_dir)\n\tdb = DB()\n\tif cache_size is None:\n\t\tcache_size = _cache_size\n\tdb.set_cachesize (\n\t\tcache_size / (1024*1024*1024),\n\t\tcache_size % (1024*1024*1024)\n\t)\n\tdb.open(db_file, dbtype=DB_HASH, flags=DB_CREATE)\n\treturn db", "def __init__(self, *args, **kwargs):\n self._cachedict = {}", "def __init__(self, db):\n\n # Get a logger handle (singleton)\n self._logger = logger.logger()\n\n # Set the database\n self._db = db\n\n # Pull the MapReduce manager collection\n self._storage = db[\"mr_manager\"]\n\n '''\n # Pull the MapReduce manager\n self._storage = db[\"mr_manager\"].find_one({\"_dataBlobID\":\"mr_manager\"})\n if (not self._storage):\n self._logger.warning(\"Didn't find the MapReduce manager: creating it...\")\n db[\"mr_manager\"].save({\"_dataBlobID\":\"mr_manager\", 'desc':\"MapReduce Manager\",'mr_job_array':[]})\n self._storage = db[\"mr_manager\"].find_one({\"_dataBlobID\":\"mr_manager\"})\n\n\n # Make sure we have the time zone info all set\n mr_job_array = self._storage['mr_job_array']\n for mr_job in mr_job_array:\n mr_job['start'] = pytz.UTC.localize(mr_job['start'])\n mr_job['end'] = pytz.UTC.localize(mr_job['end'])\n '''", "def get(cls) -> BombFactory:\n activity = ba.getactivity()\n factory = activity.customdata.get(cls._STORENAME)\n if factory is None:\n factory = BombFactory()\n activity.customdata[cls._STORENAME] = factory\n assert isinstance(factory, BombFactory)\n return factory", "def __init__(self, access_token_cache, account_id, credentials):\n super(AccessTokenStore, self).__init__(lock=None)\n self._access_token_cache = access_token_cache\n self._account_id = account_id\n self._credentials = credentials", "def getJobCache(*jobSpecIds):\n \n jobData = WEJob.get(list(jobSpecIds))\n if type(jobData) != type([]):\n jobData = [jobData]\n result = {}\n # //\n # // make sure all job ids have an entry\n #//\n [ result.__setitem__(k, None) for k in jobSpecIds]\n # //\n # // update result with actual data\n #//\n [ result.__setitem__(k['id'], k.get('cache_dir', None)) for k in jobData ]\n \n return result", "def __init__(self, cache_dir: str, cache_size: int):\n self.cache_dir = cache_dir\n self.cache_size = int(cache_size * 1e6)\n self.index = {}\n self.touch_list = []\n self._populate_index()", "def get_db(self, id: Any) -> DatabaseEngine:\n\n id = str(id)\n try: #perform cache lookup for this server\n ref = self._cache[id] \n return ref\n except KeyError: #the object may have been garbage collected while we were referencing it, or just doesn't exist\n pass\n\n # register models\n handle = self._engine()\n handle.connect(self._path / id)\n for model in self._registered_models:\n handle.register(model)\n\n self._cache[id] = handle #cache our engine instance\n return handle", "def instantiate(cls, data_store, identifier):\n pass", "def fill_cache_table():\n products = []\n for query in ['bread', 'milk', 'rice']:\n grocery = grocery_search(query)\n products += get_all_products_from_grocery_search(grocery)\n\n orm = ORM()\n for product in products:\n orm.add_cache(**product)", "def __init__(self, artifact_caches, backfill=True):\r\n if not artifact_caches:\r\n raise ValueError('Must provide at least one underlying artifact cache')\r\n log = artifact_caches[0].log\r\n artifact_root = artifact_caches[0].artifact_root\r\n if any(x.artifact_root != artifact_root for x in artifact_caches):\r\n raise ValueError('Combined artifact caches must all have the same artifact root.')\r\n ArtifactCache.__init__(self, log, artifact_root)\r\n self._artifact_caches = artifact_caches\r\n self._backfill = backfill", "def __init__(self):\n initialize_db()\n self.ids_seen = set()", "def __init__(self,repo_info, x):\n\n super(_CachedResults, self).__init__(repo_info)\n if isinstance(x,tuple):\n self._tuple = []\n for i in range(len(x)):\n self._add_object(x[i], i)\n else:\n if hasattr(x, 'repo_info'):\n self.repo_x = x\n else:\n self.x = x", "def __init__(self, db_path: str):\r\n self.blocks = []\r\n self.db = os.path.join(sys.path[0], db_path)\r\n with sqlite3.connect(self.db) as con:\r\n cur = con.cursor()\r\n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS uids(\r\n id string PRIMARY KEY\r\n )\"\"\")\r\n con.commit()", "def from_id(cls, id):\n SELECTSQL = \"SELECT * FROM accounts WHERE id=:id;\"\n with sqlite3.connect(cls.dbpath) as connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n cursor.execute(SELECTSQL, {\"id\": id})\n dictrow = cursor.fetchone()\n if dictrow:\n return cls(**dictrow)\n return None", "def load(self, *uids):\n for uid in uids:\n try:\n yield self._registry.get(uid)\n except KeyError:\n yield None", "def fetch(self, object_ids):\n return libplasma.fetch(self.conn, object_ids)", "def __init__(self, cache_key_gen_version=None):\r\n self._cache_key_gen_version = (cache_key_gen_version or '') + '_' + GLOBAL_CACHE_KEY_GEN_VERSION", "def hfreq_bdb_init(db_file, cache_size=None):\n\treturn bdb_init_hash(db_file, cache_size)", "def __init__(self, db):\n\n # Add database object\n self.db = db\n\n # Initialize a dictionary to store maps of meg data (oscillation bands)\n self.meg_maps = dict()\n self.bands = dict()\n\n # Initialize a dictionary to store exponent map\n self.exponent_map = dict({'Exponents': np.array([])})\n\n # Initialize booleans that keep track of what is loaded\n self.oscs_loaded = False\n self.exponents_loaded = False", "def build_cache(klass: \"PrefixCache\", operators: Tuple[Operator, ...]) -> \"PrefixCache\":\n prefix_cache = klass()\n for operator in operators:\n prefix_cache.update_with_operator(operator)\n\n return prefix_cache", "def FromId(self, id):\n\n self.persistant = self.db.GetOneRow('select * from tracks where '\n 'id=%d;'\n % id)", "def _unpack_items(self):\n\n # Prevent the unpack operation from occurring more than once.\n if hasattr(self, '_unpack'):\n delattr(self, '_unpack')\n else:\n return\n\n # The list contains identifiers that will be unpacked into real items.\n # Copy them so they won't be lost when the list values are altered.\n identifiers = self[:]\n\n cache_keys = dict(izip(identifiers, self.make_cache_keys(identifiers)))\n cached_items = self.cache_backend.get_many(cache_keys.values())\n\n items = {}\n missed = []\n for identifier, cache_key in cache_keys.items():\n try:\n item = cached_items[cache_key]\n assert item is not None\n except (AssertionError, KeyError):\n missed.append(identifier)\n else:\n items[identifier] = item\n\n if missed:\n\n # Rebuild the missing items using their identifiers and\n # replace the contents of this list with those new items.\n self[:] = self.rebuild_items(missed)\n\n # Use the pack_items method to add them to the cache and also\n # get back their identifiers. Finally, put the new items into\n # the items dict to be returned at the end.\n found_identifiers = self._pack_items()\n items.update(izip(found_identifiers, self))\n\n # Replace the value of this list with the final result.\n del self[:]\n for identifier in identifiers:\n item = items.get(identifier)\n if item is not None:\n self.append(item)", "def load(cls, id):\n key = cls.get_key_prefix()+\"#\"+str(id)\n src = dal_get(key)\n logger.debug( \"LOAD %s %s %s\", str(key), str(id), str(src))\n if src == None:\n raise cls.NotExist(\"No instance could be found with ID: \"+str(id))\n result = dal_retrieve(src)\n result = cls._from_dict(result)\n return result", "def __init__(self, context=None):\r\n self._org_url = PARAMS['BB_ORG_FETCHER_URL']\r\n self._cached_metadata = defaultdict(dict)\r\n self._remote_cache_timestamp = None\r\n self._bb_request_time_limit = int(PARAMS[\"BB_REQUEST_TIME_LIMIT\"])\r\n\r\n try:\r\n context_key = PARAMS['FOUNDATION'].split('-')[1][:3]\r\n self._context = context or self._context_map[context_key]\r\n except KeyError:\r\n if context_key not in self._unmapped_contexts:\r\n LOGGER.error(\"Can't map foundation %s\", context_key)\r\n raise InvalidFoundation\r\n else:\r\n self._context = None\r\n\r\n if self._context:\r\n available_contexts = self._context_list()\r\n if not available_contexts \\\r\n or self._context not in available_contexts:\r\n LOGGER.error(\"Context %s (foundation %s) not in context list %s\",\r\n self._context,\r\n PARAMS['FOUNDATION'],\r\n ','.join(available_contexts) if available_contexts\r\n else \"(no contexts)\")\r\n raise ContextNotAvailable\r\n\r\n LOGGER.info(\"Initialize fetcher (context(s): %s)\", self._context)\r\n super().__init__()", "def __init__(self, cache_man=None):\n # manager of redis-pandas caching\n self.cache_man = cache_man\n super().__init__()", "def __init__(self, *args):\n this = _ida_hexrays.new_citem_locator_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, db_input=None):\n\n if db_input is None:\n db_input = []\n elif type(db_input) != list:\n db_input = [db_input]\n self.db = copy.deepcopy(db_input)", "def __init__(self, config):\n # Initialize key variables\n connection_string = (\n '{}:{}'\n ''.format(\n config.memcached_hostname(), config.memcached_port()))\n self.cache = memcache.Client([connection_string], debug=0)", "def __getitem__(self, dbname):\n return Database(dbname=dbname, connection=self)", "def __init__(self, database='/tmp/blingalytics_cache'):\n self.database = database\n self._create_metadata_table()", "def load_cached(cache_path, in_dir):\n\n print(\"Creating dataset from the files in: \" + in_dir)\n\n # If the object-instance for DataSet(in_dir=data_dir) already\n # exists in the cache-file then reload it, otherwise create\n # an object instance and save it to the cache-file for next time.\n\n cache=Cache()\n dataset = cache.cache_data(cache_path=cache_path,\n fn=Dataset, in_dir=in_dir)\n\n return dataset", "def __init__(self, cache, userProjects, tagRefs, commitTimes):\n\n self.cache = cache\n self.userProjects = userProjects\n self.tagRefs = tagRefs\n self.commitTimes = commitTimes\n\n self.initTime = time.time()", "def __init__(self, location, option):\n super(MyCache, self).__init__(location, option)\n self.dcreate('ttl')", "def mk_id_lookups(self):\n id_lookups = {}\n for ns in self.ddef.keys():\n id_lookups[ns] = self.mk_id_lookup(ns)\n return id_lookups", "def __init__(self, cache=None, num_args=None):\n self.cache = cache if cache is not None else {}\n self.num_args = num_args", "def getDBSApi():\n if 'testbed' in dbs3_url:\n dbs3_url_reader = dbs3_url + '/dbs/int/global/DBSReader'\n else:\n dbs3_url_reader = dbs3_url + '/dbs/prod/global/DBSReader'\n\n from dbs.apis.dbsClient import DbsApi\n\n\n #this needs to come after /data/srv/wmagent/current/apps/wmagent/etc/profile.d/init.sh is sourced \n dbsApi = DbsApi(url = dbs3_url_reader)\n return dbsApi", "def load_internal_cache(cls, pex, pex_info):\r\n internal_cache = os.path.join(pex, pex_info.internal_cache)\r\n with TRACER.timed('Searching dependency cache: %s' % internal_cache):\r\n if os.path.isdir(pex):\r\n for dist in find_distributions(internal_cache):\r\n yield dist\r\n else:\r\n for dist in cls.write_zipped_internal_cache(pex, pex_info):\r\n yield dist", "def get_deputies_list(self, bid_list):\n for bid in bid_list:\n entry = self.get_deputy(bid, flush_cache=True)\n yield entry", "def fromDict(cls, userDBDict : dict, **kwargs) -> bbUserDB:\n # Instance the new bbUserDB\n newDB = bbUserDB()\n # iterate over all user IDs to spawn\n for id in userDBDict.keys():\n # Construct new bbUsers for each ID in the database\n # JSON stores properties as strings, so ids must be converted to int first.\n newDB.addUserObj(bbUser.bbUser.fromDict(userDBDict[id], id=int(id)))\n return newDB", "def lookup(self, skip_cache=False, **lookup_vars):\n\n uri = self.get_lookup_url(**lookup_vars)\n return self.get_from_uri(uri, skip_cache=skip_cache)", "def get_history_with_cache(conn, symbol: str, year: str):\n if not check_table_exist(conn, f\"History{year}\"):\n gen_table_for_history(conn, year)\n # load db as pandas Dataframe\n df = load_table_history(conn, year)\n # if not cached\n if not df.Symbol.isin([symbol]).any():\n try:\n response = get_history_for(symbol=symbol)\n for k, v in response[\"Time Series (Daily)\"].items():\n if k.startswith(year):\n insert_history(conn,\n symbol=symbol,\n date=k,\n open=v[\"1. open\"],\n high=v[\"2. high\"],\n low=v[\"3. low\"],\n close=v[\"4. close\"],\n volume=v[\"6. volume\"],\n adjusted=v[\"5. adjusted close\"])\n # when API call limit (5 per minute) reached\n except KeyError:\n print(\n f\"History({symbol}, {year}): API call limit reached.\",\n \"Try again later.\"\n )\n # reload database\n df = load_table_history(conn, year)\n # return price history for the given symbol & year\n return df.query(f\"Symbol == '{symbol}'\")", "def _get_db(self, db_name: str) -> shelve.DbfilenameShelf:\n db_path = os.path.join(self.cache_folder, db_name)\n db = shelve.open(db_path)\n logging.info(f'Opened cache file {db_path!r}')\n return db", "def __init__(self, database):\n # setup database\n if isinstance(database, str):\n if path.exists(database):\n database = SqliteDB(database)\n else:\n raise ValueError(\"EbeDBReader.__init__: the input argument must be an existing database file.\")\n if isinstance(database, SqliteDB):\n self.db = database\n else:\n raise TypeError(\"EbeDBReader.__init__: the input argument must be a string or a SqliteDB database.\")\n # setup lookup tables\n self.ecc_lookup = dict((item[1], item[0]) for item in self.db.selectFromTable(\"ecc_id_lookup\"))\n self.pid_lookup = dict(self.db.selectFromTable(\"pid_lookup\"))\n\n # set self.hasInitializedStringSubstitution to none for lazy initialization in evaluateExpression function\n self.hasInitializedStringSubstitution = False", "def __init__(self, id, data={}):\n\t\tself.__id = id\n\t\tself.__dStore = data", "def from_db(data):\n \n return Collection(\n dbid = data['id'],\n title = data['title'],\n query = data['query'],\n priority = data['priority'],\n export = data['export'])", "def load_cache():\n return {}", "def __init__(self, a, b):\n super(RefreshCacheForGameThread, self).__init__()\n self.theRepository = a\n self.theKey = b", "def __init__(self):\n # better to be a prime number, less collision\n self.key_space = 2069\n self.hash_table = [Bucket() for i in range(self.key_space)]", "def get_dbid_objects(queue, media_base):\n dbid_map = {}\n logging.info(\"Collecting DBID object information\")\n for file_object in os.scandir(media_base + \"/metadata/dbid\"):\n if not file_object.is_file():\n continue\n with open(file_object.path, \"r\") as toml_file:\n metadata = toml.load(toml_file)\n dbid_map.update({metadata[\"dbid\"]: metadata})\n logging.info(\"DBID object collection completed\")\n queue.put(dbid_map)", "def open(self):\n super(MemoryCache, self).open()\n\n def _timer():\n # Use a custom timer to try to spread expirations. Within one instance it\n # won't change anything but it will be better if you run multiple instances.\n return time.time() + self.__ttl * random.uniform(-0.25, 0.25)\n\n self.__cache = cachetools.TTLCache(\n maxsize=self.__size, ttl=self.__ttl, timer=_timer\n )", "def resolve_objects_drs_hostname_from_id(\n object_ids: List[Downloadable], resolved_drs_prefix_cache: dict, mds_url: str\n) -> None:\n for entry in object_ids:\n if entry.hostname is None:\n # if resolution fails the entry hostname will still be None\n entry.hostname, nid, drs_type = resolve_drs_hostname_from_id(\n entry.object_id, resolved_drs_prefix_cache, mds_url\n )\n if (\n drs_type == \"hostname\"\n ): # drs_type is a hostname so object id will be the GUID\n entry.object_id = nid", "def __init__(self, ak=None, domain='api.map.baidu.com', scheme='http',\n sn=None, callback=None, timeout=10,\n proxies=None, output='json'):\n super(BaiduV2, self).__init__(\n scheme=scheme, timeout=timeout, proxies=proxies\n )\n\n self.ak = ak\n self.output = output\n self.domain = domain.strip('/')\n self.scheme = scheme\n self.doc = {}\n\n self.geocoding_api = '%s://%s/geocoder/v2/' % (self.scheme, self.domain)\n self.place_api = '%s://%s/place/v2/' % (self.scheme, self.domain)", "def from_id(cid):\n return DrugBank(cid)", "def __init__(self, **kwargs):\n creator = kwargs.pop(\"creator\", None)\n if not creator:\n import MySQLdb\n creator = MySQLdb\n mincached = kwargs.pop(\"mincached\", 2)\n maxcached = kwargs.pop(\"maxcached\", 10)\n maxshared = kwargs.pop(\"maxshared\", 10)\n maxconnections = kwargs.pop(\"maxconnections\", 20)\n blocking = kwargs.pop(\"blocking\", 0)\n reset = kwargs.pop(\"reset\", True)\n maxusage = kwargs.pop(\"maxusage\", 0)\n setsession = kwargs.pop(\"setsession\", [\"set autocommit = 0\"])\n ping = kwargs.pop(\"ping\", 1)\n\n self._pool = PooledDB(creator=creator, mincached=mincached, maxcached=maxcached,\n maxshared=maxshared, maxconnections=maxconnections,\n blocking=blocking, maxusage=maxusage,reset=reset,\n setsession=setsession, ping=ping, **kwargs)", "def init_issue_tracker_mock():\n fetch_mock = FetchServiceMock()\n apiproxy_stub_map.apiproxy.RegisterStub('urlfetch', fetch_mock)", "def get_cached(factory, cache_file_name, **kwargs):\n if os.path.exists(cache_file_name):\n _logger.info('Loading {}'.format(cache_file_name))\n cached = deserialize(cache_file_name)\n return cached\n\n _logger.info('Creating {}'.format(cache_file_name))\n data = factory()\n serialize(cache_file_name, data, **kwargs)\n return data", "def lazy_loader(self, entry):\n lookup = plugin.get('api_bluray', self).lookup\n\n try:\n with Session() as session:\n title, year = split_title_year(entry['title'])\n movie = lookup(title=title, year=year, session=session)\n entry.update_using_map(self.field_map, movie)\n except LookupError:\n log_once('Bluray lookup failed for %s' % entry['title'], logger, 'WARNING')", "def __init__(self):\n self.currencies = {\n currency.code: currency for currency in Currency.objects.all()\n }\n self.channels = {\n channel.sub_source: channel.channel\n for channel in LinnworksChannel.objects.all()\n }\n self.countries = {\n country.ISO_code: country for country in Country.objects.all()\n }\n self.shipping_services = {\n service.name: service.shipping_service\n for service in LinnworksShippingService.objects.all()\n }", "def _from_redis(self):\n\n # OSM ways and nodes tables\n logging.info(\"Loading OSM ways from redis\")\n self._ways = self._gdf_from_redis(self._bbid + \"_ways\", geometry='geometry')\n if self._ways is None:\n raise Exception(\"No ways data found for this bbid, please provide a pbf file or a different database\")\n logging.info(\"Loading OSM nodes from redis\")\n self._nodes = self._df_from_redis(self._bbid + \"_nodes\")\n if self._nodes is None:\n raise Exception(\"No nodes data found for this bbid, please provide a pbf file or a different database\")\n\n # graph nodes and edges tables (storing only ids and edge lengths)\n logging.info(\"Loading graph nodes and edges from redis\")\n df_nodes = self._df_from_redis(self._bbid + \"_graph_nodes\")\n df_edges = self._df_from_redis(self._bbid + \"_graph_edges\")\n self._edges = df_edges # todo clean up what is stored and what not\n if df_nodes is None or df_edges is None:\n warnings.warn(\"Found ways and nodes data, but not a routing graph for this bbid. Making graph now.\")\n self._make_graph(self._nodes, self._ways)\n gdf_nodes, gdf_edges = osmnx.utils_graph.graph_to_gdfs(self._graph, node_geometry=False,\n fill_edge_geometry=False)\n self._gdf_to_redis(self._bbid + \"_graph_nodes\", gdf_nodes[['id']]) # ['id', 'x', 'y'] to store coordinates\n self._gdf_to_redis(self._bbid + \"_graph_edges\", gdf_edges[['id', 'length', 'u', 'v', 'key']])\n else:\n logging.info(\"Reassembling graph\")\n self._graph = networkx.convert_matrix.from_pandas_edgelist(df_edges, source='u', target='v',\n edge_attr=['id', 'key', 'length'],\n create_using=networkx.MultiDiGraph)\n logging.info(\"Done loading from redis\")", "def __init__(self, quasar, name, start_date, end_date):\n self.quasar = quasar\n self.name = name\n self.start = start_date\n self.end = end_date\n\n self.sampling_freq = 120 # Hz\n\n self.cache = [[None, None] for x in range(CACHE_ENTRIES)]" ]
[ "0.6530035", "0.5592927", "0.5449668", "0.54129845", "0.5390788", "0.5236379", "0.52326137", "0.52017933", "0.5083397", "0.50474405", "0.49929607", "0.4948093", "0.49268007", "0.48906374", "0.48458242", "0.48337904", "0.4833684", "0.48185053", "0.481795", "0.4808004", "0.47927582", "0.47793254", "0.47570118", "0.4744562", "0.4722962", "0.4712565", "0.47098136", "0.47068498", "0.46908027", "0.46823695", "0.46792215", "0.46789208", "0.46788606", "0.46785367", "0.46775603", "0.46759346", "0.4638718", "0.4635922", "0.46277475", "0.46226355", "0.46204624", "0.46139285", "0.4612136", "0.4604873", "0.46026087", "0.4594754", "0.45943043", "0.45932704", "0.45812696", "0.45805904", "0.45776865", "0.45759615", "0.45712548", "0.45592308", "0.45580202", "0.45564356", "0.45514524", "0.451714", "0.4514412", "0.45113525", "0.44916427", "0.44876662", "0.44868153", "0.44815424", "0.44722477", "0.44700274", "0.44617835", "0.44573346", "0.4456629", "0.44559783", "0.44535515", "0.44419086", "0.4428464", "0.44283268", "0.44261912", "0.4424356", "0.44037288", "0.439616", "0.4393248", "0.4389923", "0.43893898", "0.43877015", "0.43837678", "0.43809855", "0.4379466", "0.43786108", "0.43772724", "0.43771172", "0.4375013", "0.43741012", "0.43731046", "0.4371556", "0.4366039", "0.43652895", "0.43652868", "0.43587083", "0.435259", "0.43492287", "0.4347875", "0.43378738" ]
0.6934165
0
Factory method to create a fake data source This refers to a static data file that is in the current package. This function exists for testing purposes as it avoids network traffic to get the actual uptodate ID mapping.
def create_fake(cls): source = pkg_resources.open_text('baseball_id', 'sample.master.csv', encoding='iso-8859-1') c = lookup.Cache(source) return c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_data_source_soaps_id_dynamic_datas_get(self):\n pass", "def init_locally_processed_dataset(directory, source_datasets, uuid_=None):\n md = ptype.DatasetMetadata(\n id_=uuid_,\n # Default creation time is creation of an image.\n creation_dt=datetime.datetime.utcfromtimestamp(directory.stat().st_ctime),\n lineage=ptype.LineageMetadata(\n machine=ptype.MachineMetadata(\n hostname=socket.getfqdn(),\n runtime_id=_RUNTIME_ID,\n uname=' '.join(os.uname())\n ),\n source_datasets=source_datasets\n )\n )\n _note_package_vers(md)\n return md", "def _generate_data(self, codec='deflate'):\n _logger.info('generating fake data')\n (desc, path) = mkstemp()\n os.close(desc)\n os.remove(path)\n try:\n call([\n 'node', osp.join(DPATH, os.pardir, os.pardir, 'scripts', 'random'),\n self.path, str(self.n_records), path\n ])\n yield path\n finally:\n if osp.exists(path):\n os.remove(path)", "def init_existing_dataset(directory, source_datasets, uuid_=None, source_hostname=None):\n md = ptype.DatasetMetadata(\n id_=uuid_,\n # Default creation time is creation of an image.\n creation_dt=datetime.datetime.utcfromtimestamp(directory.stat().st_ctime),\n lineage=ptype.LineageMetadata(\n machine=ptype.MachineMetadata(\n hostname=source_hostname\n ),\n source_datasets=source_datasets\n )\n )\n _note_package_vers(md)\n return md", "def test_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def fixture_example_data():\n import_example_data()", "def code_builder(request, tmp_path_factory) -> dataset_builder.DatasetBuilder:\n tmp_path = tmp_path_factory.mktemp('tfds_datasets') # Temporary data_dir\n builder_cls = request.param\n # Generate the dataset (only once for all tests as scope == 'module').\n builder = builder_cls(data_dir=tmp_path)\n builder.download_and_prepare()\n\n # Update the default DATA_DIR during the test.\n with mock.patch.object(constants, 'DATA_DIR', str(tmp_path)):\n yield builder", "def setup_dummy_data_manager():\n import repoze.filesafe\n repoze.filesafe._local.manager = mgr = DummyDataManager()\n return mgr", "def load_data_source(data_source):\n source_module = __import__('source_'+data_source)\n get_source = getattr(source_module, 'get_source')\n return get_source()", "def fixture_retrieved():\n from aiida.plugins import DataFactory\n from aiida_logger.tests import TEST_DIR\n\n retrieved = DataFactory('folder')()\n retrieved.put_object_from_tree(path=os.path.join(TEST_DIR, 'input_files'))\n\n return retrieved", "def dataset_initialize(self, folder):\r\n if not os.path.isdir(folder):\r\n raise ValueError('Invalid folder: ' + folder)\r\n\r\n ref = self.config_values[self.CONFIG_NAME_USER] + '/INSERT_SLUG_HERE'\r\n licenses = []\r\n default_license = {'name': 'CC0-1.0'}\r\n licenses.append(default_license)\r\n\r\n meta_data = {\r\n 'title': 'INSERT_TITLE_HERE',\r\n 'id': ref,\r\n 'licenses': licenses\r\n }\r\n meta_file = os.path.join(folder, self.DATASET_METADATA_FILE)\r\n with open(meta_file, 'w') as f:\r\n json.dump(meta_data, f, indent=2)\r\n\r\n print('Data package template written to: ' + meta_file)\r\n return meta_file", "def makeIdFactory(self, dataRef):\n # With the default configuration, this IdFactory doesn't do anything, because\n # the IDs it generates are immediately overwritten by the ID from the reference\n # catalog (since that's in config.measurement.copyColumns). But we create one here anyway, to\n # allow us to revert back to the old behavior of generating new forced source IDs,\n # just by renaming the ID in config.copyColumns to \"object_id\".\n expBits = dataRef.get(self.config.coaddName + \"CoaddId_bits\")\n expId = int(dataRef.get(self.config.coaddName + \"CoaddId\"))\n return lsst.afw.table.IdFactory.makeSource(expId, 64 - expBits)", "def registerSampleData():\n # It is always recommended to provide sample data for users to make it easy to try the module,\n # but if no sample data is available then this method (and associated startupCompeted signal connection) can be removed.\n\n import SampleData\n iconsPath = os.path.join(os.path.dirname(__file__), 'Resources/Icons')\n\n # To ensure that the source code repository remains small (can be downloaded and installed quickly)\n # it is recommended to store data sets that are larger than a few MB in a Github release.\n\n # RegularizedFastMarching1\n SampleData.SampleDataLogic.registerCustomSampleDataSource(\n # Category and sample name displayed in Sample Data module\n category='RegularizedFastMarching',\n sampleName='RegularizedFastMarching1',\n # Thumbnail should have size of approximately 260x280 pixels and stored in Resources/Icons folder.\n # It can be created by Screen Capture module, \"Capture all views\" option enabled, \"Number of images\" set to \"Single\".\n thumbnailFileName=os.path.join(iconsPath, 'RegularizedFastMarching1.png'),\n # Download URL and target file name\n uris=\"https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95\",\n fileNames='RegularizedFastMarching1.nrrd',\n # Checksum to ensure file integrity. Can be computed by this command:\n # import hashlib; print(hashlib.sha256(open(filename, \"rb\").read()).hexdigest())\n checksums = 'SHA256:998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95',\n # This node name will be used when the data set is loaded\n nodeNames='RegularizedFastMarching1'\n )\n\n # RegularizedFastMarching2\n SampleData.SampleDataLogic.registerCustomSampleDataSource(\n # Category and sample name displayed in Sample Data module\n category='RegularizedFastMarching',\n sampleName='RegularizedFastMarching2',\n thumbnailFileName=os.path.join(iconsPath, 'RegularizedFastMarching2.png'),\n # Download URL and target file name\n uris=\"https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97\",\n fileNames='RegularizedFastMarching2.nrrd',\n checksums = 'SHA256:1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97',\n # This node name will be used when the data set is loaded\n nodeNames='RegularizedFastMarching2'\n )", "def mock_legacy_dataset(mock_dataset_with_cache_dir):\n archive_path = os.path.join(resource_filename('gtmcore.dataset.tests', 'data'), 'test-legacy-dataset.zip')\n temp_path = os.path.join(tempfile.gettempdir(), 'test-legacy-dataset.zip')\n shutil.copyfile(archive_path, temp_path)\n conf_file = mock_dataset_with_cache_dir[0].client_config.config_file\n import_dataset_from_zip(archive_path=temp_path, username=USERNAME,\n owner=USERNAME, config_file=conf_file)\n\n im = InventoryManager()\n ds = im.load_dataset(USERNAME, USERNAME, 'test-legacy-dataset')\n m = Manifest(ds, USERNAME)\n\n # yield dataset, manifest, working_dir\n yield ds, m, mock_dataset_with_cache_dir[1]", "def F(f):\n return datafile(f, __name__)", "def create_dataset(opt):\n\tdata_loader = CustomDatasetDataLoader(opt)\n\tdataset = data_loader.load_data()\n\treturn dataset", "def __fake_data__(self):\n\n # Set directory for configuration files\n self.configFilePath = q.system.fs.joinPaths(q.dirs.varDir, 'tftproot')\n \n # Add some share's\n for i in xrange(3):\n share = NFSShare()\n share.name = 'share-%s' % q.base.idgenerator.generateRandomInt(0, 255)\n self.shares[share.name] = share", "def create_dataset(opt):\n data_loader = CustomDatasetDataLoader(opt)\n dataset = data_loader.load_data()\n return dataset", "def _test_template_data(self):\n chars=string.ascii_uppercase + string.digits\n id = ''.join(random.choice(chars) for x in range(6))\n\n return {\n 'test_module': self.test_modulename(),\n 'driver_module': self.driver_modulename(),\n 'driver_dir': self.driver_dir(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def init_static_data(log_to_console=False):\n # These are annoyingly necessary to live in the DB, currently. \n # Really this should be app logic, I think.\n load_report_types()\n load_roles()\n loc_file = getattr(settings, \"STATIC_LOCATIONS\")\n if loc_file:\n load_locations(loc_file, log_to_console=log_to_console)\n product_file = getattr(settings, \"STATIC_PRODUCTS\")\n if product_file:\n load_products(product_file, log_to_console=log_to_console)", "def autogen_dataset_dir():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n seed=42,\n sep=',')", "def prepare_dataset(fpath):\n raise NotImplementedError", "def __init__(self, data_source_identifier, verbose=True):\n pass", "def dir_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular', sep=',')", "def getFake(directory=\"../FakeRealNews/Data\"):\r\n return pd.read_csv(directory + \"/Fake.csv\")", "def test_data_source_soaps_id_dynamic_datas_post(self):\n pass", "def __init__(self, table, ioloop, iex_source, **kwargs):\n data_cleaner = kwargs.pop(\"data_cleaner\")\n super(IEXStaticDataSource, self).__init__(\n table, ioloop, data_cleaner=data_cleaner\n )\n self._iex_source = iex_source\n self._iex_source_kwargs = kwargs", "def autogen_dataset_dir_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n test_path='tests/data/dummy_tabular_test',\n seed=42,\n sep=',')", "def mock_dataset_with_cache_dir():\n conf_file, working_dir = _create_temp_work_dir()\n with patch.object(Configuration, 'find_default_config', lambda self: conf_file):\n im = InventoryManager(conf_file)\n ds = im.create_dataset(USERNAME, USERNAME, 'dataset-1', description=\"my dataset 1\",\n storage_type=\"gigantum_object_v1\")\n\n yield ds, working_dir, ds.git.repo.head.commit.hexsha\n shutil.rmtree(working_dir)", "def test_data_source_soaps_id_get(self):\n pass", "def test_data_source_soaps_id_patch(self):\n pass", "def get_fixture(self, filename, globs=None):\n if globs is None:\n module = sys.modules[self.__class__.__module__]\n base_path = os.path.join(os.path.dirname(module.__file__), 'tests')\n else:\n base_path = os.path.dirname(globs['__file__'])\n return os.path.join(base_path, 'data', filename)", "def _create_dataset(source=''):\n return ExperimentalDataset()", "def reproduce(self) -> LocalDataset:\n return LocalDataset(self.path)", "def reproduce(self) -> LocalDataset:\n return LocalDataset(self.path)", "def TestDataFile(*args):\n return sdk_test_base.SdkBase.Resource('tests', 'unit', 'surface', 'container',\n 'hub', 'testdata', *args)", "def test_missing_data_sources(self):", "def source_data_files(self, data_dir, tmp_dir, dataset_split):\n raise NotImplementedError()", "def generate_data(path=resource_filename('locals', 'data/fake/'), mag_range=(11.13,18)):\n # Get some random spectra\n try:\n files = glob.glob('/user/jfilippazzo/Models/ACES/default/*.fits')[::50]\n except:\n files = glob.glob('/Users/jfilippazzo/Documents/Modules/_DEPRECATED/limb_dark_jeff/limb/specint/*.fits')[::20]\n \n # Make a fake source catalog (with only essential columns for now)\n catpath = os.path.join(path,'fake_source_catalog.ecsv')\n ids = list(range(len(files)))\n coords = SkyCoord([89.7455]*len(ids), [-29.05744]*len(ids), unit='deg', frame='icrs')\n cat = at.QTable([ids,coords], names=('id','icrs_centroid'))\n cat.write(catpath)\n \n # Open the x1d file\n header = fits.getheader(resource_filename('locals', 'data/template_x1d.fits'))\n \n # Make Spectrum objects from models at R=150\n wavelength = np.arange(0.05,2.6,0.0001)[::66]*q.um\n \n # Normalize the spectra to a random F200W magnitude\n spectra = []\n f200w = Bandpass('NIRISS.F200W')\n f200w.wave_units = q.um\n for file in files:\n \n # Create Spectrum\n flux = fits.getdata(file)[-1][::66]*q.erg/q.s/q.cm**2/q.AA\n unc = flux/50.\n spec = Spectrum(wavelength, flux, unc)\n \n # Normalize to F200W\n mag = np.random.uniform(*mag_range)\n norm_spec = spec.renormalize(mag, f200w)\n spectra.append(norm_spec)\n \n # Make a separate x1d file and photometry file for each bandpass\n # containing data for each source\n for band in NIRISS_bands:\n \n try:\n \n # Get the Bandpass object\n bp = Bandpass(band)\n bp.wave_units = q.um\n \n # Make x1d file for spectra\n x1d_file = os.path.join(path,'{}_x1d.fits'.format(band))\n x1d_hdu = fits.HDUList(fits.PrimaryHDU(header=header))\n \n # Make csv file for photometry\n phot_file = os.path.join(path,'{}_phot.csv'.format(band))\n phot_data = at.Table(names=('id','band','magnitude','magnitude_unc'), dtype=(int,'S20',float,float))\n \n # Iterate over spectra\n for id,(f,spec) in enumerate(zip(files,spectra)):\n \n # Trim spectrum to bandpass for x1d file\n spec = Spectrum(*spec.spectrum, trim=[(0*q.um,bp.WavelengthMin*1E-4*q.um),(bp.WavelengthMax*1E-4*q.um,10*q.um)])\n \n # Calculate magnitude and add to photometry table\n mag, mag_unc = spec.synthetic_magnitude(bp, force=True)\n phot_data.add_row([id, band, mag, mag_unc])\n \n # Add source spectrum params for verification\n params = f.split('/')[-1].split('-')\n header['TEFF'] = int(params[0].replace('lte',''))\n header['LOGG'] = float(params[1][:4])\n header['FEH'] = float(params[-6][:-8].split('+')[-1])\n header['FILEPATH'] = f\n header['PUPIL'] = band\n\n # Put spectrum in x1d fits file\n data = fits.BinTableHDU(data=np.rec.array(list(zip(*spec.data)),\n formats='float32,float32,float32',\n names='WAVELENGTH,FLUX,ERROR'),\n header=header)\n data.name = 'EXTRACT1D'\n \n x1d_hdu.append(data)\n \n # Write the photometry file\n phot_data.write(phot_file, format='ascii.csv')\n del phot_data\n \n # Write the x1d file\n x1d_hdu.writeto(x1d_file, overwrite=True)\n del x1d_hdu\n \n except IOError:\n pass", "def test_constructor_filename(get_pseudo_potential_data, implicit, source_type):\n pseudo = get_pseudo_potential_data()\n explicit_filename = 'custom.dat'\n\n # Copy the content of the test pseudo to file in the current working directory\n filepath = pathlib.Path('tempfile.pseudo')\n\n with open(filepath, mode='wb') as handle:\n handle.write(pseudo.get_object_content(pseudo.filename, mode='rb'))\n handle.flush()\n\n if source_type == 'stream':\n with open(filepath, 'rb') as handle:\n source = io.BytesIO(handle.read())\n elif source_type == 'str_absolute':\n source = str(filepath.absolute())\n elif source_type == 'str_relative':\n source = str(filepath.name)\n elif source_type == 'pathlib.Path':\n source = filepath\n\n if implicit:\n node = PseudoPotentialData(source, filename=None)\n # If the source type was a stream, we pass a bytestream which doesn't have a name and so the name will be\n # determined by the baseclass which has some default, but in this case we don't have to check anything.\n if source_type != 'stream':\n assert node.filename == filepath.name\n else:\n node = PseudoPotentialData(source, filename=explicit_filename)\n assert node.filename == explicit_filename", "def fakedata():\n if User.query.filter_by(email='[email protected]').first():\n print ('fake data already generated')\n else:\n generate_test_confs() # load testing confs and tracks\n generate_fake_tickets() # create fake tickets\n generate_test_users() # create named fake users\n # generate_fake_users(100) # create random users\n # add_self_follows() # create self-follows for all users\n generate_fake_papers(100) # create random papers\n generate_fake_reviews() # create random reviews\n generate_fake_transactions() # create fake tickets\n generate_fake_schedule()\n generate_default_addons()", "def test_data_infos__default_db_directories(self):\n test_dataset_root = osp.join(self.data_dir, 'VOCdevkit', 'VOC2007')\n custom_ds = self.dataset_class(\n data_root=test_dataset_root,\n ann_file=osp.join(test_dataset_root, 'ImageSets', 'Main',\n 'trainval.txt'),\n pipeline=[],\n classes=('person', 'dog'),\n test_mode=True)\n\n self.assertListEqual([{\n 'id': '000001',\n 'filename': osp.join('JPEGImages', '000001.jpg'),\n 'width': 353,\n 'height': 500\n }], custom_ds.data_infos)", "def create_datagrabber(data_path, template_path, template_args,\n field_template=None,\n infields=['subject_id', 'session_id'],\n outfields=['raw_file']):\n\n datasource = pe.Node(interface=nio.DataGrabber(infields=infields,\n outfields=outfields),\n name='datasource')\n\n datasource.inputs.base_directory = data_path\n datasource.inputs.template = template_path\n\n if field_template:\n datasource.inputs.field_template = field_template\n if type(template_args) == list:\n datasource.inputs.template_args = dict(raw_file=template_args)\n elif type(template_args) == dict:\n datasource.inputs.template_args = template_args\n\n datasource.inputs.sort_filelist = True\n\n return datasource", "def test_make_dataset_happy_path(self):\n # User story: user runs src.make_dataset() on the current directory\n # and gets a fully functional dataset\n pass", "def get_dataset(\n file_pattern: List[str],\n data_accessor: tfx.components.DataAccessor,\n schema: schema_pb2.Schema,\n batch_size: int = 200,\n) -> tf.data.Dataset:\n dataset = data_accessor.tf_dataset_factory(\n file_pattern,\n tfxio.TensorFlowDatasetOptions(\n batch_size=batch_size, label_key=features.TARGET_FEATURE_NAME\n ),\n schema=schema,\n ).repeat()\n\n return dataset", "def create_source(cls, data):\n class SourceOutput(OutputPort):\n\n \"\"\"A port attached to a source task.\"\"\"\n\n name = '0'\n description = str(data)\n\n def emits(self):\n \"\"\"Return the type of the provided datum.\"\"\"\n return type(data)\n\n class Source(Task):\n\n \"\"\"Generated source task.\"\"\"\n\n output_ports = {'0': SourceOutput}\n\n def get_input_data(self, name='0'):\n \"\"\"Return the datum associated with this source.\"\"\"\n return data\n\n def run(self, *arg, **kw):\n \"\"\"Do nothing.\"\"\"\n super(Source, self).run(*arg, **kw)\n self._output_data['0'] = data\n\n return Source()", "def get_generated_data_source(num_players, min_rating=DEFAULT_MIN_RATING, max_rating=DEFAULT_MAX_RATING):\n return lambda: generate_players(num_players, min_rating, max_rating)", "def load_synthetic_data():\n\n pickle_object = FM().data_file \n\n with pickle_object.open('rb') as data_file: \n return pickle.load(data_file)", "def data_factory_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"data_factory_id\")", "def get_testdata(file_name):\n return os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"testdata\",\n file_name)", "def default_downloader():\n default_spec = ('geoJSON', 'SPC', '{product}', '{year:4d}',\n 'day3fw_{year:4d}{month:02d}{day:02d}_1200_{hazard:s}.geojson')\n co_path_template = os.path.join('{config[data_dir]}', *default_spec)\n pre_path_template = os.path.join('{config[pre_existing_data_dir]}',\n *default_spec)\n\n return Day3FireDownloader(target_path_template=co_path_template,\n pre_downloaded_path_template=pre_path_template)", "def default_downloader():\n default_spec = ('geoJSON', 'SPC', '{product}', '{year:4d}',\n 'day1fw_{year:4d}{month:02d}{day:02d}_{ftime:04d}_{hazard:s}.geojson')\n co_path_template = os.path.join('{config[data_dir]}', *default_spec)\n pre_path_template = os.path.join('{config[pre_existing_data_dir]}',\n *default_spec)\n\n return Day1FireDownloader(target_path_template=co_path_template,\n pre_downloaded_path_template=pre_path_template)", "def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)", "def load_data(filename) :\r\n data = Data()\r\n data.load(filename)\r\n return data", "def test_data_source_soaps_get(self):\n pass", "async def create_data_lua_file(entity_retriever: _EntityRetriever, entity_name: str) -> str:\n timestamp = _utils.get_utc_now().strftime('%Y%m%d-%H%M%S')\n data = await get_data_lua(entity_retriever)\n file_path = f'wiki_{entity_name}_data_{timestamp}.lua'\n with open(file_path, 'w') as fp:\n fp.write(data)\n return file_path", "def __init__(self, data_path=root.joinpath(\"data\")):\n self.data_path = data_path", "def get_data(force=False):\n gen_data(force)\n\n return pickle.load(open(DATA_FILE))", "def test_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_test, **self.dl_kwargs)", "def factory(*args):\n\n def wrapper(dataset):\n return Factory(dataset, *args)\n\n return wrapper", "def mock_db(tmpdir_factory):\n filename = str(tmpdir_factory.mktemp(\"data\").join(\"test.db\"))\n create_test_db(filename)\n return filename", "def autogen_dataset():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n seed=42,\n sep=',')", "def simple_files_data(tmpdir):\n return simple(tmpdir)[\"data\"]", "def create_data(storage, df, df_contains='xy', y_col_name=None, y_pred_col_name=None):\n return DataFactory.factories[storage].create(df, df_contains, y_col_name, y_pred_col_name)", "def create_fake_data():\n User.create_fake_users()", "def get_factory():", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def get_staticdata(self):\n return self.get_metadata()", "def default_downloader():\n default_spec = ('geoJSON', 'SPC', '{product}', '{year:4d}',\n 'day7fw_{year:4d}{month:02d}{day:02d}_1200_{hazard:s}.geojson')\n co_path_template = os.path.join('{config[data_dir]}', *default_spec)\n pre_path_template = os.path.join('{config[pre_existing_data_dir]}',\n *default_spec)\n\n return Day7FireDownloader(target_path_template=co_path_template,\n pre_downloaded_path_template=pre_path_template)", "def test_includes_one_new_dataset(self):\n new_dataset = factories.SourceDatasetFactory.create(source_study_version=self.study_version_3)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertIn(new_dataset, table.data)", "def default_DataSource(self, data_source):\n\n self._default_DataSource = self._get_obj_from_str(data_source)\n print(\"Setting default DataSource to {} version {}\".format(\n data_source.name,\n getattr(data_source, 'version', 'not specified')))", "def load_data(self) -> None:", "def load_data(ctx, klass=None):\n if klass:\n if klass and not klass.startswith(\"public_data.models\"):\n klass = f\"public_data.models.{klass}\"\n options = {\"class\": klass}\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"load_data\", **options)", "def setUp(self):\n self.directory = tempfile.TemporaryDirectory()\n self.dataset = self.dataset_cls(cache_root=self.directory.name)", "def _create_data_provider(self, data_source: DataSource, identifiers: Identifiers) -> DataProvider:\n if data_source.driver is not None:\n return DatabaseDataProvider(data_source, identifiers, self._injector)\n elif data_source.mime_type == DataSourceConstants.MIME_TYPE_JSON:\n return JsonDataProvider(data_source, identifiers, self._injector)\n elif data_source.mime_type == DataSourceConstants.MIME_TYPE_CSV:\n return CsvDataProvider(data_source, identifiers, self._injector)\n raise DataSourceError('No appropriate data provider', data_source)", "def makeSource(self, name):\n source = mock.Mock(spec=\"title description\".split())\n source.title = '%s title' % name\n source.description = '%s description' % name\n source.configurationView = '@%s_configuration' % name\n return source", "def get_cached(factory, cache_file_name, **kwargs):\n if os.path.exists(cache_file_name):\n _logger.info('Loading {}'.format(cache_file_name))\n cached = deserialize(cache_file_name)\n return cached\n\n _logger.info('Creating {}'.format(cache_file_name))\n data = factory()\n serialize(cache_file_name, data, **kwargs)\n return data", "def data_source():\n dataset = [0.1, 0.2, 0.3, 0.4, 0.5]\n while True:\n time.sleep(2)\n yield random.choice(dataset)", "def default_downloader():\n default_spec = ('geoJSON', 'SPC', '{product}', '{year:4d}',\n 'day4fw_{year:4d}{month:02d}{day:02d}_1200_{hazard:s}.geojson')\n co_path_template = os.path.join('{config[data_dir]}', *default_spec)\n pre_path_template = os.path.join('{config[pre_existing_data_dir]}',\n *default_spec)\n\n return Day4FireDownloader(target_path_template=co_path_template,\n pre_downloaded_path_template=pre_path_template)", "def default_downloader():\n default_spec = ('geoJSON', 'SPC', '{product}', '{year:4d}',\n 'day8fw_{year:4d}{month:02d}{day:02d}_1200_{hazard:s}.geojson')\n co_path_template = os.path.join('{config[data_dir]}', *default_spec)\n pre_path_template = os.path.join('{config[pre_existing_data_dir]}',\n *default_spec)\n\n return Day8FireDownloader(target_path_template=co_path_template,\n pre_downloaded_path_template=pre_path_template)", "def get_data_file():\n this_directory = os.path.dirname(__file__)\n parent_directory = os.path.dirname(this_directory)\n return os.path.join(parent_directory, '_data/fortunes.txt')", "def datafile(filename):\n return os.path.join(testdata_dir, filename)", "def _DataSourceFromFilePattern(self,\n file_pattern,\n input_source_weights=None,\n **extra_input_kwargs):\n del input_source_weights # Unused.\n\n def Process(source_id, record):\n del source_id # Unused.\n [num] = tf.py_func(int, [record], [tf.int64])\n return py_utils.NestedMap(data=num), 1\n\n # Samples random records from the data files and processes them\n # to generate batches.\n inputs, _ = generic_input.GenericInput(\n processor=Process,\n file_pattern=file_pattern,\n file_random_seed=123,\n file_buffer_size=1,\n file_parallelism=1,\n bucket_batch_limit=[1],\n bucket_upper_bound=[1])\n return inputs", "def get_sample_data_dir():\n \n return resource_filename('cdat_lite.test.test_cdms', 'sample_data')", "def mock_rdata(): \n return {\n \"authors\": [{\"full_name\": \"N. Ame\"}],\n \"owners\": [{\"full_name\": \"N. Ame\"}],\n \"submitter\": {\"full_name\": \"N. Ame\"},\n \"paper_id\": \"1234.56789\",\n \"title\": \"some title\",\n \"abstract\": \"An abstract with math $/alpha * /alpha$ for you.\",\n }", "def get_dataset(self, source='', importer='', parameters=None):\n if not source:\n raise aspecd.exceptions.MissingSourceError(\n 'A source is required to return a dataset')\n if not self.importer_factory:\n raise aspecd.exceptions.MissingImporterFactoryError(\n 'An ImporterFactory is required to return a dataset')\n dataset_ = self._create_dataset(source=source)\n importer = self.importer_factory.get_importer(source=source,\n importer=importer,\n parameters=parameters)\n dataset_.import_from(importer)\n return dataset_", "def data_factory_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"data_factory_id\")", "def data_file(name, datatype, data):\n url = f'/static/datasets/{name}/{datatype}/{data}'\n page_name = data\n return render_template(\"data_file.html\", url=url, page_name=page_name)", "def get_persisted(factory, persisted_file_name, **kwargs):\n if os.path.exists(persisted_file_name):\n _logger.info('Loading {}'.format(persisted_file_name))\n cached = deserialize(persisted_file_name)\n return cached\n\n _logger.info('Creating {}'.format(persisted_file_name))\n data = factory()\n serialize(persisted_file_name, data, **kwargs)\n return data", "def test_file_generator(self):\n row_count = 100\n header = ['col1,col2,col3']\n data = list(mock_data(row_count, len(header)))\n rows = map(lambda x: ','.join(map(str, x)), data)\n test_data = '\\n'.join(header + rows)\n\n with patch('bcipy.acquisition.datastream.generator.open',\n mock_open(read_data=test_data), create=True):\n\n gen = file_data(filename='foo', header_row=1)\n generated_data = [next(gen) for _ in range(row_count)]\n\n for i, row in enumerate(generated_data):\n self.assertEqual(row, data[i])", "def load_data(self):\n raise NotImplementedError()", "def default(cls):\n return {'data_file': 'data.csv'}", "def discovery_data(request):\n file = request.param\n p = Path(file)\n if not p.is_absolute():\n p = Path(__file__).parent / \"fixtures\" / file\n\n with open(p) as f:\n return json.load(f)", "def _make_cpp_data(id, timestamp, instrument, exchange, data):\n return DataCpp(id, timestamp, instrument, exchange, data)", "def create(cls):\n ssl._create_default_https_context = ssl._create_unverified_context\n c = lookup.Cache('https://raw.githubusercontent.com/spilchen/baseball_id_db/main/master.csv')\n return c", "def load_default_atf_data():\n df = load_dataframe(\"oqmd_1.2_voronoi_magpie_fingerprints\")\n return df[df['N_species'] == 2].sample(frac=0.2)", "def __post_init__(self):\n # Only do this if source_data already exists (not during its own initialization)\n if \"SOURCE_DATA\" in globals():\n for data_field in fields(self):\n setattr(self, data_field.name, getattr(SOURCE_DATA, data_field.name))" ]
[ "0.6308188", "0.62671566", "0.6175177", "0.61343235", "0.5995582", "0.5915337", "0.59063405", "0.58865434", "0.5807432", "0.57975435", "0.5794116", "0.57484156", "0.5740665", "0.56791466", "0.56658155", "0.56552786", "0.5642972", "0.56229484", "0.5622516", "0.56089175", "0.56014436", "0.5600478", "0.5591214", "0.5575909", "0.55355436", "0.55352795", "0.5533836", "0.55194515", "0.5509626", "0.5500959", "0.5495487", "0.5492537", "0.54811597", "0.5462267", "0.5462267", "0.5458741", "0.5444603", "0.5444496", "0.54438496", "0.5419", "0.540898", "0.53913313", "0.53898937", "0.5385885", "0.5375238", "0.5369999", "0.53665155", "0.5351366", "0.535003", "0.53363043", "0.53348297", "0.5332589", "0.53321874", "0.5325081", "0.5321593", "0.53106594", "0.53061193", "0.53056043", "0.5304877", "0.5303106", "0.5282418", "0.5281088", "0.52792907", "0.5275407", "0.52733004", "0.5272106", "0.5269985", "0.5269985", "0.5269985", "0.5269985", "0.5269964", "0.5260773", "0.5255784", "0.52552885", "0.5252285", "0.5248837", "0.52482176", "0.5240584", "0.52382576", "0.52360266", "0.52357167", "0.52314705", "0.5230952", "0.52297723", "0.52244955", "0.522421", "0.52237344", "0.52183205", "0.5211877", "0.5211749", "0.5203883", "0.52033967", "0.5199734", "0.5197368", "0.5196368", "0.51952237", "0.5192306", "0.5189677", "0.518331", "0.51820195" ]
0.70590085
0
The extracter moves files. Arguments input_folder and output_folder are set through GUI. Based on the values in the column called column_name in the spreadsheet, files are copied from input_folder to output_folder. Here, these are the gilbert_numbers in the spreadsheet fed from main(). The are matched to the file names. Each gilber_number gets its own directory in the output_folder. output_folder should be empty, at least not contain the same gilbert_numbers already. Also copies all speaker files from input_folder to output_folder.
def extracter(spreadsheet, column_name): print header, "Running the extracter." root=Tkinter.Tk() root.withdraw() root.update() input_folder=tkFileDialog.askdirectory(title="Inputfolder: Please choose a directory that contains your corpus files") root=Tkinter.Tk() root.withdraw() root.update() output_folder=tkFileDialog.askdirectory(title="Outputfolder: Please choose a directory to copy files into") print header, "Copying files from '{}' to '{}'.".format(input_folder, output_folder) #collecting input files inputfiles=[] print "Locating files." for dirpath, subdirs, files in os.walk(input_folder): for f in files: inputfiles.append(os.path.join(dirpath, f)) if len(inputfiles) in [1000,2000,4000,8000,1600,24000]: print "{} files processed, still working.".format(len(inputfiles)) print "Found {} files.".format(len(inputfiles)) #read from spreadsheet # with open(spreadsheet, "r") as spreadsheet: # spreadsheet=pandas.read_csv(spreadsheet, encoding="utf-8") numbers_to_be_extracted= spreadsheet[column_name].unique() print header, "Gilbert numbers to be extracted:" print ",".join([unicode(i) for i in numbers_to_be_extracted]) #copying speaker files print header, "Copying speaker files." speakerfiles=[f for f in inputfiles if re.match(".*\.txt", os.path.split(f)[1])] os.mkdir(os.path.join(output_folder, "speakers")) for s in speakerfiles: shutil.copy2(s, os.path.join(output_folder, "speakers")) #finding relevant input files result=[] for number in numbers_to_be_extracted: print "Processing {}, creating folder '{}'.".format(number, number) os.mkdir(os.path.join(output_folder, unicode(number))) regex="(\d+)-(\d+)-(\d+)-"+number.astype('U')+"-(\D+)\.wav" findings= [f for f in inputfiles if re.match(regex, os.path.split(f)[1])] result= result+findings for find in findings: shutil.copy2(find, os.path.join(output_folder, unicode(number), os.path.split(find)[1])) print header, "{} files have been copied to {}.".format(len(result), output_folder)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_sample_folder(self, input_folder, target_folder, sample_name):\n print(f'processing {sample_name} folder.')\n # first make a subfolder to contain the images - e.g. 'target_folder/sample_name'\n sample_dir = join(target_folder, sample_name)\n if not os.path.exists(sample_dir):\n mkdir(sample_dir)\n # resize and move the mask images - e.g. 'target_folder/sample_name/imgs_necrosis.png'\n img_file_nec = join(input_folder, 'Necrosis',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_nec, self.rescale_ratio)\n img_nec = img_res.copy()\n cv2.imwrite(join(sample_dir, 'necrosis.png'), img_res)\n\n img_file_perf = join(input_folder, 'Perfusion',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_perf, self.rescale_ratio)\n cv2.imwrite(join(sample_dir, 'perfusion.png'), img_res)\n\n # resize and move the maker HE and EF5 images\n files = listdir(input_folder)\n img_files = [x for x in files if x.split(\n '.')[-1] in ('tif', 'jpg', 'png')]\n for img_file in img_files:\n if (sample_name+'_' in img_file) or (sample_name+'-' in img_file):\n if ('HE-G' in img_file) or ('HE-green' in img_file) or ('HEgreen' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-green.png')):\n cv2.imwrite(join(sample_dir, 'HE-green.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-R' in img_file) or ('HE-red' in img_file) or ('HEred' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-red.png')):\n cv2.imwrite(join(sample_dir, 'HE-red.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-B' in img_file) or ('HE-blue' in img_file) or ('HE-blue' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-blue.png')):\n cv2.imwrite(join(sample_dir, 'HE-blue.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif 'EF5' in img_file:\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n img_ef5 = img_res.copy()\n if not os.path.exists(join(sample_dir, 'EF5.png')):\n cv2.imwrite(join(sample_dir, 'EF5.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n\n masked_ef5 = (img_ef5 * (img_nec <= 0)).astype(img_ef5.dtype)\n cv2.imwrite(join(sample_dir, 'EF5_masked.png'), masked_ef5)\n assert len(listdir(sample_dir)) == 7\n return", "def main(inputfolder):\n inputfolder = realpath(inputfolder)\n for data in DATASET:\n for fol in FOLDERS:\n actfile = join(inputfolder, data, data+'.txt')\n logger.info('Changing data in: %s' % actfile)\n filedata = []\n with open(actfile) as fin:\n for line in fin:\n id, y = map(int, line.strip().split('\\t'))\n if y == -1000:\n y = 0\n path = join(inputfolder, 'data'+str(data), action, 'original', str(id)+'.jpg')\n filedata.append((path, y))\n path = join(inputfolder, 'data'+str(data), action, 'original', str(id+1)+'.jpg')\n filedata.append((path, y))\n with open(actfile, 'w') as fout:\n for path, y in filedata:\n fout.write('%s %d\\n' % (path, y))", "def move_generators_to_input(self, generator_folder_glob):\n spawn_folder_names = []\n generator_folders = glob(generator_folder_glob)\n for i, folder in enumerate(generator_folders):\n base_name = 'e01s{:02d}_{}f0000'.format(i + 1, os.path.basename(folder))\n input_destination = os.path.join(self.input_folder, base_name)\n data_destination = os.path.join(self.data_folder, base_name)\n create_folder(input_destination)\n create_folder(data_destination)\n spawn_folder_names.append(input_destination)\n create_symlinks(\n files=os.path.join(folder, '*'),\n dst_folder=os.path.relpath(input_destination)\n )\n return spawn_folder_names", "def binder(folder_name: str, output_name: str = \"output.exe\", verbose=True):\n\n # we get all the files from the given folder\n files: List[str] = os.listdir(folder_name)\n\n if files == []:\n print(\" No file in \", folder_name, \" folder\")\n return\n\n # we sort then by comparing the concatenated number\n files = sorted(files, key=lambda x: int(x.split(\"_\")[0]))\n\n if verbose:\n print(\"encoutered {} files:\".format(len(files)))\n for file in files:\n print(file)\n\n # we open an output stream\n with open(output_name, \"wb+\") as output_stream:\n # And for every gathered files\n for file in files:\n with open(os.path.join(folder_name, file), \"rb\") as input:\n # we add it at the end of the document\n output_stream.write(input.read())\n\n print(\"Done!\")", "def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")", "def jarvis(input_path, output_path): \n\n if not os.path.exists(f'{output_path}'):\n os.makedirs(f'{output_path}')\n\n file_list = [filename for filename in os.listdir(f'{input_path}') if '.tif' in filename]\n\n for filename in file_list:\n pathname = os.path.join(input_path, filename)\n new_name = f\"{output_path}{filename.replace('.lif - ', '_').replace('_5x-', '_')}\"\n copyfile(pathname, new_name)\n logger.info(f'{new_name}')", "def __concatonate_files(self, new_file_name, parent_folder):\n\n # make the output directory\n output_file = self.save_directory + \"/\" + new_file_name\n\n # check if save_directory exists\n if not os.path.exists(self.save_directory):\n try:\n # make the directory\n os.makedirs(self.save_directory)\n except PermissionError:\n # if the user is unable to write to this directory, we should not continue\n print(\"You do not have the correct permissions for creating a directory here. Please try again.\")\n exit(-1)\n\n barcode_files = []\n for root, directory, files in os.walk(parent_folder):\n # we need to know where each file is in the barcode folder so we can read data from it\n for name in files:\n barcode_files.append( os.path.join(root, name) )\n\n with open(output_file, 'w') as writer:\n for name in barcode_files:\n with open(name, 'r') as reader:\n for line in reader:\n writer.write(line)", "def main():\n # file path to csv file\n filePath = r\"C:\\Users\\DSPLab\\Research\\IAPSdata\\IAPS_selectedList_Final.csv\"\n # Get targeted List of picture number\n fileNameList = importSelectedList(filePath)\n\n # Check duplicated item in the list\n print(pd.Series(fileNameList)[pd.Series(fileNameList).duplicated()].values)\n print(len(set(fileNameList)))\n \n # Copy all the selected picture to the targeted folder\n for i in fileNameList:\n # Declare src and dest \n src = r\"C:\\Users\\DSPLab\\Research\\IAPSdata\\IAPS 1-20 Images\\\\\" + str(i) + r\".jpg\"\n dest = r\"C:\\Users\\DSPLab\\Research\\IAPSdata\\IAPS 1-20 Images\\\\Sample_final\\\\\" + str(i) + r\".jpg\"\n copyFile(src,dest)", "def start():\r\n\r\n total_files = sum([len(files) for r, d, files in os.walk(abs_source_directory)])\r\n total_files_down = total_files\r\n for i in range(total_files, 0, -1):\r\n if i % 10 == 0:\r\n total_files_down = i\r\n break\r\n current_iteration = 0\r\n last_factor = 0\r\n position = 1\r\n print(\"[{0}] {1}/{2}\".format(\" \" * 10, 0, total_files))\r\n for path, dirs, files in os.walk(abs_source_directory):\r\n for file_name in list(filter(lambda x: x.endswith(\".pdf\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(normal_regex, file_source_path)\r\n # Handles normal past-papers\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, matched_groups=found_groups)\r\n except AttributeError:\r\n # Handles music past-papers\r\n if \"Music_\" in file_source_path:\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, music_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n elif \"Exam Pack list of omitted papers and markschemes\" in file_name:\r\n pass\r\n else:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n # Handles mp3 files\r\n for file_name in list(filter(lambda x: x.endswith(\".mp3\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, audio_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n print(\"[{0}] {1}/{2}\".format(\"-\" * 10, total_files, total_files))", "def genes_file_creation(input_folder):\n file_paths = {}\n for file_name in os.listdir(input_folder):\n file_paths[file_name] = input_folder + '/' + file_name\n\n df = pa.DataFrame()\n \n for file_name in file_paths:\n df_temp = pa.read_csv(file_paths[file_name], sep='\\t', header=None)\n print(df_temp.columns)\n gene_column = 0\n df_temp = df_temp[[gene_column]]\n df_temp.columns = ['Gene_Name_DE']\n row = []\n file_extension = os.path.splitext(file_name)[1]\n row.append(file_name.replace(file_extension, \"\"))\n row.extend(df_temp['Gene_Name_DE'].tolist())\n df = df.append([row], ignore_index=True)\n\n df.insert(1, 'Description', 'Genes_DE')\n\n df.to_csv('DE_gene.gmt', sep='\\t', index=False, header=False)", "def read_input_txt_file(self, inputfile, outputfolder):\n\n # set output folder from sys argv and append \\\\\n self.outputfolder = outputfolder + \"\\\\\"\n\n with open(inputfile, 'r') as file2open:\n # for each line split into columns\n for line in file2open:\n #split line on tab\n splitline=line.split('\\t')\n \n \n # check if any empty lines, or fields are present in input file. do not check prefix (last element in list)\n if '' in splitline[0:7]:\n raise ValueError(\"\\nError in the input file! \\nHave you used Excel?!?!?! \\n\\\n Please open in notepad and ensure there are no blank lines and all fields are present\")\n \n # assign each value to a variable\n # barcode, subarray (numeric), dye and scan number for file 1\n file1_barcode=splitline[0]\n file1_subarray=int(splitline[1])\n file1_dye=splitline[2]\n file1_scan_number=splitline[3]\n \n # barcode, subarray (numeric), dye and scan number for file 2\n file2_barcode=splitline[4]\n file2_subarray=int(splitline[5])\n file2_dye=splitline[6]\n file2_scan_number=splitline[7].rstrip()\n \n \n # a prefix can be added to as the last column, which is added to the start of the output filename (len(splitline) == 9)\n if len(splitline)==9: \n # capture prefix and remove newline\n out_file_prefix=splitline[8].rstrip()\n #check the prefix is not empty\n assert len(out_file_prefix)!= 0,\"Prefix column is empty, were you trying to add a prefix??!\"\n \n #and append an underscore to help later.\n out_file_prefix=out_file_prefix+\"_\"\n # if no prefix specified\n else:\n out_file_prefix=None\n \n # check the given subarray values are valid. if they are not the text value will not be returned from the dictionary\n assert file1_subarray in self.subarray_dict, \"the given subarray for the Cy3 sample is invalid (\"+str(file2_subarray)+\")(must be a number 1-8)\"\n assert file2_subarray in self.subarray_dict, \"the given subarray for the Cy5 sample is invalid (\"+str(file2_subarray)+\")(must be a number 1-8)\"\n \n # convert the given subarray (an integer 1-8 - the keys in self.subarray_dict) into the string used in the file name (the values in self.subarray_dict)\n file1_subarray=self.subarray_dict[file1_subarray]\n file2_subarray=self.subarray_dict[file2_subarray]\n \n\n # concatenate barcode, scan number and subarray text string to create a filename pattern to search for\n filename1 = str(file1_barcode) + \"_S0\"+file1_scan_number+\"*\" + file1_subarray\n filename2 = str(file2_barcode) + \"_S0\"+file2_scan_number+\"*\" +file2_subarray\n\n # append to a list\n self.files_to_find.append((filename1, file1_dye, filename2, file2_dye,out_file_prefix))", "def __concatonate_files_controller(self):\n\n # find all barcode file paths\n barcode_directories = []\n for root, directory, files in os.walk(self.input_directory):\n for name in directory:\n barcode_directories.append( os.path.join(root, name) )\n\n # iterate through each barcode directory, item is the file path\n for item in barcode_directories:\n file = os.listdir(item)[0]\n path = item\n\n new_file_name = self.__return_new_file_name(file_name=file, file_path=path)\n self.__concatonate_files(new_file_name=new_file_name, parent_folder=path)\n self.__write_logs_to_file(new_file_name)", "def main(input_folder, output_images_folder, output_files_folder, bb_file,\n archive_folder, name_mapping):\n\n output_images_folder = Path(output_images_folder)\n output_files_folder = Path(output_files_folder)\n archive_folder = Path(archive_folder)\n output_images_folder.mkdir(exist_ok=True)\n archive_folder.mkdir(exist_ok=True)\n logger.info(\"Converting Dicom to Nifty - START\")\n converter = NiftiConverter(\n padding=\"whole_image\",\n resampling_spacing=-1,\n list_labels=[\"GTVt\"],\n cores=10,\n )\n _ = converter(input_folder, output_folder=output_images_folder)\n\n logger.info(\"Converting Dicom to Nifty - END\")\n logger.info(\"Removing extra VOI - START\")\n move_extra_vois(output_images_folder, archive_folder)\n logger.info(\"Removing extra VOI - END\")\n logger.info(\"Renaming files- START\")\n correct_names(output_images_folder, name_mapping)\n logger.info(\"Renaming files- END\")\n logger.info(\"Cleaning the VOIs - START\")\n clean_vois(output_images_folder)\n logger.info(\"Cleaning the VOIs - END\")\n\n logger.info(\"Computing the bounding boxes - START\")\n bb_df = compute_bbs(output_images_folder)\n bb_df.to_csv(bb_file)\n logger.info(\"Computing the bounding boxes - END\")", "def main_one(string_path_to_folder, destination_folder):\n # .jpg and .JPG are the same\n # photos = glob.glob(\"C:/Personal/pp2_photo/dataBase/*.JPG\") # Examples of location format\n # pho = glob.glob(\"C:/Personal/pp2_photo/dataBase/*.jpg\")\n photos = glob.glob(string_path_to_folder+\"/*.JPG\")\n print(\"Number of files: \", len(photos))\n for k in photos:\n print(get_photo_date(k))\n process_all(k, destination_folder)", "def batch_preprocess(self, input_folder, output_folder, padding=20):\n input_files = glob.glob(input_folder + '/*')\n for input_path in input_files:\n subject_name = re.search(self.KEY_WORD_FILE, input_path).group()\n output_path = output_folder + '/' + subject_name\n\n data, options = nrrd.read(input_path)\n data, options = self.pad_upper(data, options, padding)\n data, options = self.filter_background_to_air(data, options)\n\n print 'write ' + output_path\n nrrd.write(output_path, data, options) # too slow in Python", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def findfif2move(self, source, destination, foldername):\n import glob\n import shutil\n\n os.chdir(source)\n mainfolders = os.listdir(u'.')\n\n for fname in mainfolders:\n try:\n if fname[:2] == foldername:\n subjectdir = os.path.join(source, fname)\n os.chdir(subjectdir)\n subfolders = os.listdir(u'.')\n \n # for each subject in the provided subfolders \n for s in subfolders:\n if s[0] == 's':\n sessiondir = os.path.join(subjectdir, s)\n os.chdir(sessiondir)\n file = glob.glob(\"*.fif\") # find files to move\n\n for files in file: \n shutil.copy(os.path.join(sessiondir,files),\n destination + fname[1:])\n except Exception:\n print(\"Something went wrong while copying the data >>>\", fname)\n pass\n os.chdir(source)", "def main():\n # checking the directory\n cwd = os.getcwd()\n print(f'The working directory: {cwd}')\n # counting time \n start_time = time.process_time()\n # passing args\n arg = parse_arguments()\n sub_dir = arg.sub_dir\n dir_out = arg.dir_out\n file_amb = 'csv_to_clean'\n names_ambigous = defaultdict(str)\n with open(file_amb, 'r') as fh:\n for line in fh:\n name = line.strip().split('/')[2]\n names_ambigous[name] = names_ambigous.get(name, '')\n names_ambigous[name] += line.strip()\n print(f'number files: {len(names_ambigous)}')\n # checking if the output directory exist\n # if not make it\n f_pwd = os.path.join('Results', 'kmer_counts')\n # get the genus names\n cnt = 0\n for name, filename in names_ambigous.items():\n cleaned = get_csv_clean(filename)\n full_path = os.path.join(f_pwd, name)\n if os.path.exists(full_path):\n print(f'The path {full_path} exist')\n pass\n else:\n os.makedirs(full_path)\n csv_name = f'{full_path}/{name}_k2_8_chr.csv'\n print(f'Checking the full path {csv_name}')\n with open(csv_name, 'w') as fout:\n for km, cn in cleaned.items():\n fout.write(f'{km},{cn}\\n')\n cnt += 1\n # get final time of the script\n end = time.process_time()\n total_time = end - start_time\n print(f'The script takes {total_time} to finish!')\n print(f'Where read and manipulated {cnt} files')\n print('Done!')", "def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def moveprocessedfb2(self, input_folder_path, processed_folder_path, conn, logg):\n logg.writing_log(conn, 'Starting moving processed fb2 files')\n if os.listdir(input_folder_path):\n for file_name in os.listdir(input_folder_path):\n os.rename(os.path.join(input_folder_path, file_name), os.path.join(processed_folder_path, file_name))\n logg.writing_log(conn, 'All processed files are moved to processed folder')\n else:\n logg.writing_log(conn, 'The folder is empty, nothing to move')\n conn.commit()\n conn.close()", "def preprocess(input_folder, output_folder, T, skip, overwrite=False):\n original_labels = ['songID', 'time', 'A_t', 'A#_t', 'B_t', 'C_t', 'C#_t', 'D_t', 'D#_t', 'E_t', 'F_t', 'F#_t',\n 'G_t', 'G#_t']\n input_file_paths = sorted([os.path.join(input_folder, p) for p in os.listdir(input_folder) if p.startswith('chroma-nnls')])[-10:-9]\n print(input_file_paths)\n # input_file_paths = _create_file_paths(input_folder)\n for f in input_file_paths:\n logging.info(\"Working on file {}\".format(f))\n data = pd.read_csv(f, header=None, names=original_labels)\n data['songID'] = data['songID'].apply(_take_id) # take just the ID of the song\n data['songID'] = data['songID'].fillna(method='ffill') # repeat the ID for all rows\n for s in set(data['songID']):\n path_output = os.path.join(output_folder, 'chroma-nnls_' + s + '.csv')\n if not overwrite and os.path.isfile(path_output):\n logging.info(\"Output file {} already exists. Skipping songID {}\".format(path_output, s))\n continue\n logging.info(\"Working on songID {}\".format(s))\n df = data.loc[data['songID'] == s] # select one song at a time not to use too much memory\n df = _create_datapoints_for_dnn(df, T, skip) # add the desired columns\n df.to_csv(path_output, header=False, index=False) # write the df in a file\n return", "def test_input_folders_files(self):\n files = list_files_folder(data_dir + \"build-custom/files/\", ext=\"fna.gz\")\n folder = data_dir + \"build-custom/files/more/\"\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folders_files\"\n params[\"input\"] = files + [folder]\n params[\"input_extension\"] = \"fna.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n files.extend(list_files_folder(folder, ext=params[\"input_extension\"]))\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")", "def multiple(folder_name: str,\r\n min_plant_pixels: int = MIN_PLANT_SIZE,\r\n output_options = [['rows',\r\n 'centers',\r\n 'row_ids',\r\n 'distances'],\r\n \r\n ['rows',\r\n 'centers',\r\n 'row_ids',\r\n 'numbers'],\r\n \r\n ['dirt',\r\n 'ditches',\r\n 'rows',\r\n 'clusters',\r\n 'centers',\r\n 'row_ids',\r\n 'numbers',\r\n 'lines']\r\n ]) -> None:\r\n\r\n # Go to the specified folder\r\n ls = listdir(folder_name)\r\n ls = [join(folder_name, i) for i in ls]\r\n\r\n # Check if the folder exists\r\n if join(folder_name, 'Analysis') in ls:\r\n\r\n # If it does, rename the old folder\r\n new_name = join(folder_name, 'Analysis')\r\n while new_name in ls:\r\n new_name += '_old'\r\n \r\n rename(join(folder_name,'Analysis'), new_name)\r\n\r\n # Create new folders inside the given directory\r\n mkdir(join(folder_name, 'Analysis'))\r\n mkdir(join(folder_name, 'Analysis/Images'))\r\n mkdir(join(folder_name, 'Analysis/Data'))\r\n \r\n # Gather the images to be analysed\r\n co = 0\r\n pics = [j for j in ls if isfile(j)]\r\n le = len(pics)\r\n\r\n # Analyze each of the pictures\r\n for i in pics:\r\n\r\n # Make the field\r\n field = just_field(i, min_plant_pixels)\r\n\r\n # Measure the field and save results\r\n print('Saving data...\\n')\r\n ruler = Ruler(field)\r\n \r\n ruler.output_distances(\r\n join(folder_name,\r\n 'Analysis/Data/{}_Distances.csv'.format(basename(i).split('.')[0])\r\n ) \r\n )\r\n \r\n ruler.output_row_info(\r\n join(folder_name,\r\n 'Analysis/Data/{}_Rows.csv'.format(basename(i).split('.')[0])\r\n )\r\n )\r\n\r\n # Make and save visuals\r\n print('Saving pictures...\\n')\r\n for k in range(len(output_options)):\r\n output_options[k]\r\n img = field.make_visual(ruler, output_options[k])\r\n img.save(\r\n join(folder_name,\r\n 'Analysis/Images/{}_Visual_{}.png'.format(basename(i).split('.')[0], k + 1)))\r\n\r\n # Increment the progress meter\r\n co += 1\r\n print('Completed {}/{} images\\n\\n'.format(co, le))", "def split_per(folderin, folderout, split_col='ECO_ID', colNms=['i_h100','i_cd',\n 'doy','i_wflen','i_acqdate','b1','vcf','ECO_NAME','ECO_ID','BIOME','geometry']):\n\n split_files = glob.glob(folderin + '*.shp')\n\n for filename in split_files:\n print(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n dfa = gpd.read_file(filename)\n df = dfa.astype({split_col: 'int32'}) \n ecoNames = list(np.unique(df[split_col]))#get list of unique ecoregions \n \n for eco in ecoNames:\n #create new df with just columns I want\n df2 = gpd.GeoDataFrame(df, columns=colNms)\n ID = str(eco)\n df_eco = df.loc[df2[split_col]==eco, colNms]\n df_eco.to_file(folderout + '/{}_eco_{}.shp'.format(basename, ID))", "def jarvis(input_path, output_path): \n \n if not os.path.exists(f'{output_path}'):\n os.makedirs(f'{output_path}')\n\n folder_list = [sample for sample in os.listdir(input_path) if os.path.isdir(f'{input_path}{sample}')]\n\n for folder in folder_list:\n\n file_list = [filename for filename in os.listdir(f'{input_path}{folder}/') if '.tif' in filename]\n mutant = '_'.join(folder.split(' '))\n\n for x, filename in enumerate(file_list):\n pathname = os.path.join(input_path, folder, filename)\n new_name = f'{output_path}{mutant}_{x}.tif'\n copyfile(pathname, new_name)\n # array_stack = skimage.io.imread(f'{pathname}').transpose(1, 2, 0)\n logger.info(f'{new_name}')", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def clean_folder(self):\n # Remove the 1st output\n # Remove the 2nd output\n # Remove the calibrated output\n try:\n os.remove(\"output1.csv\")\n except:\n pass\n try: \n os.remove(\"output2.csv\")\n except:\n pass\n try:\n os.remove(self.__add_output_file_location(self._output_filename))\n except:\n pass\n \n list = os.listdir(\"edited\")\n for file in list:\n file = os.path.join(\"edited\", file)\n try:\n os.remove(file)\n except:\n pass\n \n list = os.listdir(\"extracted\")\n for file in list:\n file = os.path.join(\"extracted\", file)\n try:\n os.remove(file)\n except:\n pass", "def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)", "def browse_input(self):\n path = getAFolder()\n if len(path) > 0:\n self.in_directory.setText(path)\n self.out_directory.setText(join(path, 'merged_results'))\n self.preprocessfolder()", "def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')", "def move_files(self, file_dict: Dict[str, List[str]]) -> NoReturn:\n\n for folder in file_dict:\n target_folder = os.path.join(self.out_folder, folder)\n mkdirr(target_folder)\n for file_path in file_dict[folder]:\n annotation_file_name = (\n os.path.basename(file_path)\n .replace(\"png\", \"json\")\n .replace(\"jpg\", \"json\")\n )\n annotation_file_path = os.path.join(\n self.annotation_folder, annotation_file_name\n )\n\n copy_file(file_path, os.path.join(target_folder, DATA_FOLDER))\n copy_file(\n annotation_file_path, os.path.join(target_folder, ANNOTATION_FOLDER)\n )", "def collect_and_rename() -> None:\n image_source_folder = 'image_dir'\n label_source_folder = 'annotation_dir'\n image_target_folder = 'images'\n label_target_folder = 'labels'\n for i, (subdir, _, files) in enumerate(os.walk(image_source_folder), -1):\n # it walks the parent folder first, not a file\n if i == -1: \n continue\n subdir_name = subdir.split('\\\\')[1]\n for file_name in files:\n with open(f'{image_source_folder}/{subdir_name}/{file_name}') as image_file, \\\n open(f'{label_source_folder}/{subdir_name}/{file_name}'.split('.')[0] + '.txt') as label_file:\n shutil.copy2(image_file.name, f'{image_target_folder}/{\"%06d\" % i}.jpg')\n shutil.copy2(label_file.name, f'{label_target_folder}/{\"%06d\" % i}.txt')\n print(f'Processed {i} images')", "def one_step_(lijst_in, file_out, folder):\n new_input_list = []\n\n with open(lijst_in) as input_file:\n num = 0\n for line in input_file:\n line_split = line.split(\";\")\n\n new_input_list.append(line_split)\n num += 1\n # print(new_input_list)\n list_length = len(new_input_list)\n\n beg = 1\n eind = 2\n with open(file_out, 'a', encoding='utf-8') as fh:\n\n for _ in range(list_length - 1):\n aant = int(new_input_list[beg:eind][0][4])\n pdf = str(new_input_list[beg:eind][0][5])\n size = str(new_input_list[beg:eind][0][2])\n art = str(new_input_list[beg:eind][0][1])\n\n rolls(aant, pdf, size, art, file_out)\n\n beg += 1\n eind += 1\n\n with open(file_out, 'a', encoding='utf-8') as fh:\n print(\";geel.pdf\\n\" * 8, end='', file=fh)\n # this line seperates the Artikels by a yellow \"wikkel\"\n df_csv = pd.read_csv(lijst_in, delimiter=\";\", usecols=['Artnr', 'beeld', 'Aantal', 'Size', 'ColorN'])\n artikel_som = sum(df_csv.Aantal)\n artikel = df_csv.Artnr[0]\n print(f\"{artikel_som} van {artikel};leeg.pdf\\n\", end='', file=fh)\n print(\";geel.pdf\\n\" * 1, end='', file=fh)\n\n return artikel_som", "def fetch_and_preprocess(directory_to_extract_to,\n columns_to_use=None,\n output_dir='preprocessed',\n exclude_activities=[0],\n fold=False,\n val_test_size=None):\n if columns_to_use is None:\n columns_to_use = ['hand_acc_16g_x', 'hand_acc_16g_y', 'hand_acc_16g_z',\n 'ankle_acc_16g_x', 'ankle_acc_16g_y', 'ankle_acc_16g_z',\n 'chest_acc_16g_x', 'chest_acc_16g_y', 'chest_acc_16g_z']\n targetdir = fetch_data(directory_to_extract_to)\n outdatapath = os.path.join(targetdir, output_dir)\n if not os.path.exists(outdatapath):\n os.makedirs(outdatapath)\n if os.path.isfile(os.path.join(outdatapath, 'X_train.npy')):\n print('Data previously pre-processed and np-files saved to ' +\n outdatapath)\n else:\n preprocess(targetdir, outdatapath, columns_to_use, exclude_activities, fold, val_test_size)\n return outdatapath", "def main(input_path, output_path):\n logger.info('making final data set from raw data')\n\n index_path = 'data/raw/trec07p/full/index'\n index = getIndexMap(index_path, f'{input_path}/trec07p/data/')\n interim_path = 'data/interim'\n df = pd.DataFrame(columns=columns)\n\n count = 0\n if not path.exists(interim_path):\n logger.info(f'converting external txt files to trec07.csv in {interim_path}')\n mkdir(interim_path)\n for email in listdir(f'{input_path}/trec07p/data'):\n addEmailToDf(f'{input_path}/trec07p/data/{email}', index, df)\n count += 1\n if count % 1000 == 0:\n logger.info(f'conversion done for {count}/75000 files')\n df.to_csv(f'{interim_path}/trec07.csv', index=False)", "def mass_extract(source_directory, target_directory):\n\n import os\n import ZipFile\n\n source_directory = raw_input(\"Where are the zips? \")\n target_directory = raw_input(\"To where do you want to extract the files? \")\n \n if not os.path.exists(source_directory):\n print \"Sorry, that folder doesn't seem to exist.\"\n source_directory = raw_input(\"Where are the zips? \")\n\n if not os.path.exists(target_directory):\n os.mkdir(target_directory)\n \n for path, directory, filename in os.walk(source_directory):\n zip_file = ZipFile.ZipFile(filenames)\n ZipFile.extract(zip_file, target_directory)\n zip_file.close()\n\n print \"Done.\"", "def main():\n\n location = input(\"Enter the pathway to the directory containing the files\"\n \"to be converted:\\n\")\n os.chdir(location)\n gtiff(location)\n tiff(location)", "def postProcessOutput(self):\n\n logging.info(\" ========> Analysis %20s called postProcessOutput:\"%(self.name))\n\n if self.checkExpectedOutputFiles() == False:\n raise Exception(\"Missing expected output files. Number missing are [%d]\"%(len(self.missing_output_files)))\n\n FileUtils.checkDirExists(self.output_dir)\n\n tmpfiles = []\n\n logging.info(\" ========> Analysis %20s called postProcessOutput: Moving files from %s to %s \"%(self.name,self.working_dir,self.output_dir))\n try:\n for srcfile in self.expected_output_files:\n\n fullsrcfile = os.path.join(self.working_dir,srcfile)\n destfile = os.path.join(self.output_dir,srcfile)\n\n FileUtils.checkDirExistsForFile(destfile)\n\n res = shutil.move(fullsrcfile,destfile)\n\n if res == None:\n res = \"OK\"\n else:\n res = \"FAILED\"\n\n print \"Checking %s\"%destfile\n tmpfiles.append(destfile)\n \n logging.info(\" ========> Analysis %20s called postProcessOutput: Result of file move for %s = %s\" % (self.name,srcfile,res))\n\n except Exception as e:\n logging.info(\" ========> Analysis %20s file move failed %s\"%(self.name,e))\n raise\n\n self.output_files = tmpfiles\n\n for f in self.temp_output_files:\n logging.info(\" ========> Analysis %20s removing temp file %s \"%(self.name,f))\n\t res = os.remove(f)", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def move_drop_data(self, accession, project, nicu):\n\n transfer_id = 'ARUP-' + accession\n transfer_file = self.meta.tables['portal_api_datatransferfile']\n statement = select([transfer_file.c.filename]).where(transfer_file.c.datatransfer_id == transfer_id)\n result = self.conn.execute(statement)\n drop_location = nicu.ucgd_db.get_nicu_drop()\n\n ## project processing space.\n project_setup = self.processing_space + project + '/Project_Setup/'\n try:\n if not (os.path.exists(project_setup)):\n raise ProcessSpaceError\n except ProcessSpaceError as e:\n nicu.sns.project_issue('Could not move accession data, project {} Project_Setup directory not found, or data received before manifest added.'.format(project))\n nicu.log.error('Could not move accession data, project {} Project_Setup directory not found, or data received before manifest added.'.format(project))\n\n for files in result:\n acc_dir = drop_location + accession\n drop_file = drop_location + files[0]\n drop_md5 = drop_location + files[0] + '.md5'\n file_meta = files[0].split('/')\n new_file = project_setup + file_meta[-1] \n new_md5 = project_setup + file_meta[-1] + '.md5'\n\n if not (os.path.exists(new_file)):\n os.rename(drop_file, new_file)\n os.rename(drop_md5, new_md5)\n if not os.listdir(acc_dir):\n os.rmdir(acc_dir)\n nicu.log.info('File: {} moved into processing space for project: {}'.format(drop_file, project))", "def rmv_cat(folderin, folderout, column='b1', cat=['0.0', '190.0','200.0','202.0', '210.0', '220.0']):\n fileList = glob.glob(folderin + '*.shp')\n\n for filename in fileList:\n basename = os.path.splitext(os.path.basename(filename))[0]\n df = gpd.read_file(filename) \n new = df[np.logical_not(df[column].isin(cat))]\n if new.empty:\n continue\n new.to_file(folderout + '{}.shp'.format(basename))", "def movenotfb2(self, input_folder_path, trash_folder_path, conn, logg):\n logg.writing_log(conn, 'Starting moving not fb2 files')\n if os.listdir(input_folder_path):\n if any([x[-4:] != '.fb2' for x in os.listdir(input_folder_path)]):\n for file_name in os.listdir(input_folder_path):\n if file_name[-4:] != '.fb2':\n os.rename(os.path.join(input_folder_path, file_name), os.path.join(trash_folder_path, file_name))\n logg.writing_log(conn, 'All files with incorrect format are moved to trash folder')\n else:\n logg.writing_log(conn, 'All files in the input folder are correct')\n else:\n logg.writing_log(conn, 'The folder is empty, nothing to move')\n conn.commit()", "def main():\n\n args = parse_args()\n\n # Read frame.\n insertion_df = Insertion.from_csv(args.insertions, sep='\\t', as_frame=True)\n\n # Create output directory if it doesn't exist.\n args.output_dir.mkdir(exist_ok=True, parents=True)\n\n if args.samples is not None:\n # Subset for samples and convert to categorical.\n mask = insertion_df['sample'].isin(args.samples)\n\n insertion_df = insertion_df.loc[mask]\n insertion_df['sample'] = pd.Categorical(\n insertion_df['sample'], categories=args.samples)\n\n # Split and write individual outputs.\n for sample, grp in insertion_df.groupby('sample'):\n if args.remove_prefix:\n grp['id'] = grp['id'].str.replace(sample + '.', '')\n\n if len(grp) == 0:\n print('WARNING: no insertions found for sample {}'.format(sample))\n\n sample_path = args.output_dir / '{}.txt'.format(sample)\n grp.to_csv(str(sample_path), sep='\\t', index=False)", "def move_files_with_extension(self, extension: str):\n\n while True:\n files_with_extension = self.collect_files_with_extensions(extension)\n print(files_with_extension)\n folders_containing = set(\n [\n os.path.basename(os.path.dirname(file))\n for file in files_with_extension\n ]\n )\n directory = input(\n f\"Files with '{extension}' extension are scattered in your folders:\\n\"\n f\" {', '.join(folders_containing)}\\n\"\n f\"Where do you want to put them?\\n\"\n f\"({', '.join(self.possibilities.keys())})\\n\"\n )\n if directory in self.possibilities:\n self.move_files(files_with_extension, directory)\n break\n else:\n print(\"Invalid Input\")", "def copy_xgen_files(character):\n flg = logging.getLogger(\"lettuce.xgenSetup.copy_xgen_files\")\n\n current_file_dir = get_scene_folder()\n project_dir = get_project_dir()\n\n flg.info(\"Current Scene's folder: {}\".format(current_file_dir))\n flg.info(\"Current Project's folder: {}\".format(project_dir))\n\n gMainProgressBar = mel.eval('$tmp = $gMainProgressBar')\n\n mc.progressBar(gMainProgressBar,\n edit=True,\n beginProgress=True,\n isInterruptable=True,\n status='Copying XGen Files ...',\n maxValue=len(character)\n )\n step = 0\n\n flg.info(\"Copying {} XGen files\".format(len(character)))\n\n for c in character:\n if mc.progressBar(gMainProgressBar, query=True, isCancelled=True):\n flg.info(\"Progress Interrupted by user\")\n flg.info(\"Canceled on step: {0} of {1}\".format(step, len(character)))\n break\n collection = c.get_default_collection()\n\n flg.info(\"Character: {}\".format(c.get_charName()))\n flg.debug(\"Collection: {}\".format(collection))\n\n xg_file = collection.get_xgenFile()\n xg_file_resolved = os.path.join(project_dir, xg_file)\n\n flg.info(\"Copying file from: {0} to {1}\".format(xg_file_resolved, current_file_dir))\n flg.info(\"...\")\n try:\n shutil.copy2(xg_file_resolved, current_file_dir)\n flg.info(\"Complete\")\n except IOError as e:\n mc.progressBar(gMainProgressBar, edit=True, endProgress=True)\n flg.error(\"IO Error, copying failed. {}\".format(e))\n break\n step += 1\n mc.progressBar(gMainProgressBar, edit=True, step=step)\n\n flg.info(\"Complete, {} characters copied\".format(len(character)))\n mc.progressBar(gMainProgressBar, edit=True, endProgress=True)", "def go():\n u_input = UserInput()\n\n # Locates important folders\n input_folder = u_input.get_input_folder()\n working_folder = u_input.get_working_folder()\n output_folder = u_input.get_output_folder()\n\n # Remaining information of the configuration file\n sequence_type = u_input.get_sequence_type()\n protein_type = u_input.get_protein_type()\n check_settings(sequence_type, protein_type)\n accession_ncbi_list = u_input.get_genome_accessions()\n user_email = u_input.get_user_email()\n distance_function = u_input.get_distance_function()\n e_value = u_input.get_e_value()\n cutoff = u_input.get_cutoff()\n replicates = u_input.get_replicates()\n blast_word_size = u_input.get_blast_word_size()\n\n # Output files configuration\n majority_or_support_tree = u_input.get_phylogenetic_tree_type()\n original_newick_tree = u_input.get_original_newick_tree()\n original_distance_matrix = u_input.get_original_distance_matrix()\n bootstrap_distance_matrix = u_input.get_bootstrap_distance_matrix()\n\n # Deletes old content from files\n delete_folder_content(working_folder)\n # delete_folder_content(output_folder)\n\n # Downloads NCBI files\n access_ncbi(accession_ncbi_list, user_email, input_folder)\n\n # Preprocessing phase\n n_files = 0\n error_list = []\n preprocess_phase = Preprocess()\n for file in os.listdir(\"../\" + input_folder): # Navigates into the input_folder\n n_files += 1\n error_list = preprocess_phase.preprocessing_phase(file, input_folder, sequence_type, protein_type, working_folder)\n\n # Displays a list of error detected in the preprocessing code\n display_error_messages(error_list)\n\n if len(error_list) < n_files - 1:\n alignment = Blast()\n # Builds a database\n distance_dictionary, coverage_vector_dictionary = alignment.make_blast_database(\n sequence_type, working_folder, e_value, blast_word_size)\n print(\"Sequence alignment has been done\")\n\n # Calculates distances and generates a phylogenetic tree in newick format\n phylogeny_tree = Phylogeny()\n print(\"Creating phylogenetic trees\")\n newick_tree = phylogeny_tree.get_newick_tree(coverage_vector_dictionary, distance_dictionary, distance_function,\n replicates, working_folder, output_folder,\n original_distance_matrix, bootstrap_distance_matrix,\n original_newick_tree)\n\n # Read and concatenates trees from files\n tree_list = phylogeny_tree.get_tree_list(working_folder)\n\n # Generates a consensus trees with or without support\n if majority_or_support_tree in [\"Support\", \"support\"]:\n phylogeny_tree.get_support_tree(newick_tree, tree_list, output_folder)\n elif majority_or_support_tree in [\"Majority\", \"majority\"]:\n phylogeny_tree.majority_consensus_tree(output_folder, tree_list, cutoff)\n else:\n if majority_or_support_tree in [\"Both\", \"both\"]:\n phylogeny_tree.get_support_tree(newick_tree, tree_list, output_folder)\n phylogeny_tree.majority_consensus_tree(output_folder, tree_list, cutoff)\n else:\n print(\"No majority tree consensus or support tree will be calculated\")\n else:\n print('\\n', \"At least two correct sequences to compare are needed. Please, check the error list to solve the \"\n \"detected problems and the content of the '\" + input_folder + \"' folder.\")", "def process_input_data(input_data_path):\n if os.path.isdir(input_data_path):\n input_data_glob = glob.glob(input_data_path + \"/*.csv\")\n else:\n if is_gcs_path(input_data_path):\n # Download the input to a local\n with tempfile.NamedTemporaryFile() as hf:\n input_data = hf.name\n\n logging.info(\"Copying %s to %s\", input_data_path, input_data)\n input_data_gcs_bucket, input_data_gcs_path = split_gcs_uri(\n input_data_path)\n\n logging.info(\"Download bucket %s object %s.\", input_data_gcs_bucket,\n input_data_gcs_path)\n bucket = storage.Bucket(storage.Client(), input_data_gcs_bucket)\n storage.Blob(input_data_gcs_path, bucket).download_to_filename(\n input_data)\n else:\n input_data = input_data_path\n\n ext = os.path.splitext(input_data)[-1]\n if ext.lower() == '.zip':\n zip_ref = zipfile.ZipFile(input_data, 'r')\n zip_ref.extractall('.')\n zip_ref.close()\n # TODO: Hardcoding the file in the Archive to use is brittle.\n # We should probably just require the input to be a CSV file.:\n csv_file = 'stackoverflow-questions.csv'\n else:\n csv_file = input_data\n\n input_data_glob = glob.glob(csv_file)\n\n return input_data_glob", "def main():\n # The following dictionary will allow us to map extensions to the destination folder names\n extension_to_category = {}\n os.chdir(\"FilesToSort\")\n for filename in os.listdir('.'):\n # Ignore directories, just process files\n if os.path.isdir(filename):\n continue\n\n extension = filename.split('.')[-1]\n if extension not in extension_to_category:\n category = input(\"What category would you like to sort {} files into? \".format(extension))\n # Now we can map this new extension to a folder name\n extension_to_category[extension] = category\n try:\n # We don't expect to get an exception due to the if statement\n # But we'll play it safe anyway in case the user chooses an existing folder\n os.mkdir(category)\n except FileExistsError:\n pass\n\n # We don't need a separate loop for this next step\n # We're already in a loop per file and we now know where to put it\n os.rename(filename, \"{}/{}\".format(extension_to_category[extension], filename))", "def TextureFiles():\n import shutil\n\n # first convert the .psd files to .png\n\n FbmDir = glo.outputFolder + '.fbm'\n\n for d1, d2, filenames in os.walk(FbmDir):\n for filename in filenames:\n \"\"\"filename: vitrin_diffuse.psd\n \"\"\"\n # print \"TextureFiles():\", filename\n if filename[-4:].upper() == '.PSD':\n #print \" -- FbmDir:\" , FbmDir\n #print \" -- in the if clause with filename:\" , filename\n #print \" -- glo.outputFolder\" , glo.outputFolder\n # FbmDir = '../fbx/simplelifeembedmedia.fbm'\n # filename = 'shelves_light.PSD'\n PsdToPngConverter(FbmDir, filename)\n\n # Move only the .png file to the ../png/ directory\n filename = filename[:-4] + '.png'\n src = os.path.join(FbmDir, filename)\n elif filename[0] != '.':\n src = os.path.join(FbmDir, filename)\n pass\n\n shutil.copy(src, glo.outputFolder)\n print os.path.join(glo.outputFolder, filename), \"\\n\"\n sys.stdout.flush()\n # for d1, d2, files in os.walk(glo.outputFolder):\n # if not filename in files:\n # #print \"moving: \", files, filename, not filename in files\n # shutil.copy(src, glo.outputFolder)\n # print os.path.join(glo.outputFolder, filename), \"\\n\"\n # else:\n # print \"%s/%s already exists. File not moved\" % (glo.outputFolder,filename)", "def main():\n\ttarget_folder = r'Abstracts cleanup\\abstracts\\*.txt'\n\t\n\ttry:\n\t\tstripchars(target_folder)\n\texcept Exception as e:\n\t\tprint(e)\n\t\tpass\n\n\t# renameid(target_folder)", "def copy_mosaic(mosaic_dir='K:/IID_SaltonSea/Tasks/Soil mapping/PhotoDocumentation/Original/',\r\n output_dir='K:/IID_SaltonSea/Tasks/Soil mapping/PhotoDocumentation/Processing/',\r\n file_pattern='IID201905*jpg', replace=False): \r\n \r\n if not os.path.exists(mosaic_dir):\r\n sys.exit('input folder does not exist')\r\n \r\n mosaics = []\r\n for root, dirnames, filenames in os.walk(mosaic_dir):\r\n for filename in fnmatch.filter(filenames, file_pattern):\r\n mosaics.append(os.path.join(root, filename))\r\n \r\n c = 0\r\n s = 0\r\n r = 0\r\n for m in mosaics:\r\n f = output_dir + os.path.basename(m)\r\n if not os.path.exists(f):\r\n copyfile(m, f)\r\n print('copied: %s' % f)\r\n c+=1\r\n elif replace:\r\n copyfile(m, f)\r\n print('replaced: %s' % f)\r\n r+=1\r\n else:\r\n print('skipped: %s' % f)\r\n s+=1\r\n \r\n print('copied total of %i files' % c)\r\n print('replaced total of %i files' % r)\r\n print('skipped total of %i files' % s)", "def copy_database(path_images, path_labels, path_final_images):\n\n try:\n labels = sorted(os.listdir(path_labels))\n except FileNotFoudError:\n print(\"No such file or directory \", path_labels)\n\n try:\n images = sorted(os.listdir(path_images)) #+ \"RetinaNet_I04590/\"))\n except FileNotFoudError:\n print(\"No such file or directory \", path_images)\n\n \"\"\"if not os.path.exists(path_final_images + \"I04590/\"):\n os.mkdir(path_final_images + \"I04590/\")\n\n if not os.path.exists(path_final_images + \"I045135/\"):\n os.mkdir(path_final_images + \"I045135/\")\n\n if not os.path.exists(path_final_images + \"I090135/\"):\n os.mkdir(path_final_images + \"I090135/\")\n\n if not os.path.exists(path_final_images + \"I4590135/\"):\n os.mkdir(path_final_images + \"I4590135/\")\n\n if not os.path.exists(path_final_images + \"Params/\"):\n os.mkdir(path_final_images + \"Params/\")\n\n if not os.path.exists(path_final_images + \"Pauli2/\"):\n os.mkdir(path_final_images + \"Pauli2/\")\n\n if not os.path.exists(path_final_images + \"Pauli3/\"):\n os.mkdir(path_final_images + \"Pauli3/\")\n\n if not os.path.exists(path_final_images + \"Stokes/\"):\n os.mkdir(path_final_images + \"Stokes/\")\n\n if not os.path.exists(path_final_images + \"Rachel/\"):\n os.mkdir(path_final_images + \"Rachel/\")\n\n if not os.path.exists(path_final_images + \"Rachel2/\"):\n os.mkdir(path_final_images + \"Rachel2/\")\"\"\"\n\n for k in range(len(images)):\n if str(k) + \".xml\" in labels:\n copyfile(path_images + \"/\" + images[k],\n path_final_images + \"/\" + images[k])\n \"\"\"copyfile(path_images + \"RetinaNet_I04590/\" + str(k) + \".png\",\n path_final_images + \"I04590/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I045135/\" + str(k) + \".png\",\n path_final_images + \"I045135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I090135/\" + str(k) + \".png\",\n path_final_images + \"I090135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I4590135/\" + str(k) + \".png\",\n path_final_images + \"I4590135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Params/\" + str(k) + \".png\",\n path_final_images + \"Params/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli2/\" + str(k) + \".png\",\n path_final_images + \"Pauli2/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli3/\" + str(k) + \".png\",\n path_final_images + \"Pauli3/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Stokes/\" + str(k) + \".png\",\n path_final_images + \"Stokes/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel/\" + str(k) + \".png\",\n path_final_images + \"Rachel/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel2/\" + str(k) + \".png\",\n path_final_images + \"Rachel2/\" + str(k) + \".png\")\n copyfile(path_labels + str(k) + \".xml\",\n path_final_labels + str(k) + \".xml\")\"\"\"\n print(k)", "def format_preparation_files(run_dir, sample_sheet, output_dir, pipeline,\n verbose):\n sample_sheet = KLSampleSheet(sample_sheet)\n df_sheet = sample_sheet_to_dataframe(sample_sheet)\n\n if pipeline == 'atropos-and-bowtie2':\n click.echo('Stats collection is not supported for pipeline '\n 'atropos-and-bowtie2')\n else:\n stats = run_counts(run_dir, sample_sheet)\n\n stats['sample_name'] = \\\n df_sheet.set_index('lane', append=True)['sample_name']\n\n # returns a map of (run, project_name, lane) -> preparation frame\n preps = preparations_for_run(run_dir, df_sheet, pipeline=pipeline)\n\n os.makedirs(output_dir, exist_ok=True)\n\n for (run, project, lane), df in preps.items():\n fp = os.path.join(output_dir, f'{run}.{project}.{lane}.tsv')\n\n if pipeline == 'fastp-and-minimap2':\n # stats are indexed by sample name and lane, lane is the first\n # level index. When merging, make sure to select the lane subset\n # that we care about, otherwise we'll end up with repeated rows\n df = df.merge(stats.xs(lane, level=1), how='left',\n on='sample_name')\n\n # strip qiita_id from project names in sample_project column\n df['sample_project'] = df['sample_project'].map(\n lambda x: re.sub(r'_\\d+$', r'', x))\n\n # center_project_name is a legacy column that should mirror\n # the values for sample_project.\n df['center_project_name'] = df['sample_project']\n\n df.to_csv(fp, sep='\\t', index=False)\n\n if verbose:\n project_name = remove_qiita_id(project)\n # assume qiita_id is extractable and is an integer, given that\n # we have already passed error-checking.\n qiita_id = project.replace(project_name + '_', '')\n print(\"%s\\t%s\" % (qiita_id, abspath(fp)))", "def process_directory(working_directory, cc_size, output_directory):\n print \"\\nProcessing directory {0}\".format(working_directory)\n \n for dirpath, dirnames, filenames in os.walk(working_directory):\n for f in filenames:\n if f.split('.')[-1] == 'tif':\n img = load_image(os.path.join(dirpath, f))\n onebitimage = img.to_onebit()\n onebitimage.despeckle(int(cc_size))\n output_path = os.path.join(output_directory, f)\n # print onebitimage\n # print (os.path.join(dirpath, f.split('.')[0]+ '_NEW.' + f.split('.')[-1]))\n # onebitimage.save_tiff(os.path.join(dirpath, f.split('.')[0]+ '_NEW.' + f.split('.')[-1]))\n\n onebitimage.save_tiff(output_path)\n print output_path\n else:\n pass", "def split_and_copy_to_xval_folders(xVal_info, target_folder):\n\n for i in range(1, 6):\n os.makedirs(os.path.join(target_folder, str(i), \"x\"), exist_ok=True)\n os.makedirs(os.path.join(target_folder, str(i), \"y\"), exist_ok=True)\n\n for i, (x_old_path, y_old_path, doc_eval, xval_group) in enumerate(xVal_info):\n y = np.load(y_old_path)\n assert int(y) == doc_eval, \"Doctor labels in npy files must match with the excel sheet!\"\n x_name = os.path.split(x_old_path)[-1]\n y_name = os.path.split(y_old_path)[-1]\n assert x_name == y_name, \"x and y file names must match!\"\n x_new_path = os.path.join(target_folder, str(xval_group), \"x\", x_name)\n y_new_path = os.path.join(target_folder, str(xval_group), \"y\", y_name)\n shutil.copy(x_old_path, x_new_path)\n shutil.copy(y_old_path, y_new_path)\n print(f\"Copied {x_name} to xval group {xval_group} ({i+1}/{len(xVal_info)}) \\n\\tx: {x_new_path}\\n\\ty: {y_new_path}\")\n print(f\"DONE\".center(100, \"_\"))", "def move_and_filter_tiles_folders(tiles_folders, classes, slides_id, cases_ids, output_folder, background_pixel_value,\n background_threshold, expected_shape, logger):\n def move_jpeg_file(inputs):\n slide_id, img_filepath = inputs[0], inputs[1]\n is_mostly_background, percent_background = is_tile_mostly_background(img_filepath=img_filepath,\n background_pixel_value=background_pixel_value,\n background_threshold=background_threshold,\n expected_shape=expected_shape)\n\n # If img considered not mostly background, move to processed folder, otherwise is discarded\n if not is_mostly_background: # copy to dest folder\n new_filepath = os.path.join(output_folder, slide_id, os.path.basename(img_filepath))\n shutil.copyfile(os.path.abspath(img_filepath),\n os.path.abspath(new_filepath))\n\n assert len(tiles_folders) == len(classes) == len(slides_id) == len(cases_ids)\n\n destination_folders = []\n for i, (tile_folder, slide_id, class_, case_id) in \\\n tqdm(enumerate(zip(tiles_folders, slides_id, classes, cases_ids)), total=len(tiles_folders)):\n new_folderpath = os.path.abspath(os.path.join(output_folder, slide_id))\n destination_folders.append(new_folderpath)\n # if destination folder already exists then folder already processed -> skip\n if os.path.exists(new_folderpath):\n continue\n os.makedirs(new_folderpath)\n\n images_filenames = [f for f in os.listdir(tile_folder) if f.endswith(('.jpeg', '.jpg', '.png', '.pt'))]\n\n # Write a summary.txt file containing all the tiles of WSI, before background discarding\n with open(os.path.join(new_folderpath, 'summary.txt'), 'w') as f:\n f.write('\\n'.join(images_filenames))\n # Write file containing label as int\n with open(os.path.join(new_folderpath, 'label.txt'), 'w') as f:\n f.write(str(class_))\n # Write file containing case id\n with open(os.path.join(new_folderpath, 'case_id.txt'), 'w') as f:\n f.write(case_id)\n\n # Add slide if within arguments of move_jpeg_file\n try:\n with futures.ThreadPoolExecutor(max_workers=N_PROCESSES) as pool:\n images_filepaths = [(slide_id, os.path.join(tile_folder, img_filename))\n for img_filename in images_filenames]\n list(pool.map(move_jpeg_file, images_filepaths))\n except (SyntaxError, ValueError) as e:\n logger.warn(' discarding %s because some image files are corrumpted: %s' % (slide_id, e))\n continue\n\n # return all destination slides folders\n return list(map(os.path.abspath, destination_folders))", "def process_files(file_location, day):\n # construct file path\n file_dir = PREFIX+file_location\n file_pattern = file_dir+'lz_'+day+'*_raw.root'\n # print(file_pattern)\n file_list = glob.glob(file_pattern)\n print(\"There are %s MC files in the requested directory (%s).\" %(len(file_list), file_dir))\n file_names = []\n for f in file_list:\n file_name_only = f.split('/')\n file_names.append(file_name_only[-1])\n return file_names", "def pele_folders(input_, file_list, dir_=None):\r\n os.chdir(\"../\")\r\n if not dir_:\r\n base = basename(input_)\r\n base = base.replace(\".pdb\", \"\")\r\n else:\r\n base = dir_\r\n count = 0\r\n folder = []\r\n for files in file_list:\r\n name = basename(files)\r\n name = name.replace(\".pdb\", \"\")\r\n if not count:\r\n hold = \"bla\"\r\n count += 1\r\n if name != \"original\" and hold != name[:-1]:\r\n hold = name[:-1]\r\n folder.append(\"mutations_{}/{}\\n\".format(base, hold))\r\n with open(\"dirnames_{}.txt\".format(base), \"w\") as txt:\r\n txt.writelines(folder)", "def process_files(geodata_name, inp_dict):\n input_paths = inp_dict[\".xls\"][:]\n try:\n data = geodata(geodata_name)\n except UnicodeDecodeError:\n showerror(\"Ошибка кодирования\", \"Файл данных должен быть закодирован в utf-8\")\n data = geodata(askopenfilenames(initialdir=os.path.abspath(os.getcwd()), filetypes=[(\"Файл данных txt\", \".txt\")], title=\"Выберите файл данных txt\")[0])\n\n\n for book in input_paths:\n book_flag = False\n with open_workbook(book, formatting_info=True) as rb:\n header = False\n wb = copy(rb)\n for numb, sheet in enumerate(rb.sheets()):\n column = \"False\"\n for row in range(sheet.nrows):\n if column != \"False\":\n for data_row in data:\n if sheet.cell(row, column).value == data_row[0]:\n sheet_wb = wb.get_sheet(numb)\n sheet_wb.write(row, sheet.ncols, data_row[1])\n sheet_wb.write(row, sheet.ncols+1, data_row[2])\n break\n else:\n for col in range(sheet.ncols):\n for data_row in data:\n if sheet.cell(row, col).value == data_row[0]:\n column = col\n book_flag = True\n sheet_wb = wb.get_sheet(numb)\n sheet_wb.write(row, sheet.ncols, data_row[1])\n sheet_wb.write(row, sheet.ncols+1, data_row[2])\n if not header:\n header = True\n style_list = get_xlwt_style_list(rb)\n wb.get_sheet(numb).write(0, sheet.ncols, u\"Широта\", style=style_list[sheet.cell_xf_index(0, 0)])\n wb.get_sheet(numb).write(0, sheet.ncols+1, u\"Долгота\", style=style_list[sheet.cell_xf_index(0, 0)])\n break\n if book_flag:\n if not os.path.isdir(\"out\"):\n os.mkdir(\"out\")\n f_out = get_output_name(book)\n wb.save(f_out)\n inp_dict[\"del\"].append(f_out)\n inp_dict[\"out\"].append(f_out)\n return inp_dict", "def entry_parser():\n # from tools import file_importer, file_outporter\n from copy import copy\n from collections import defaultdict\n import os.path\n \n print(\"this is entry parser\")\n \n # inPathL = [\"bob/processed/proteinGroups - OST-1-09042017.txt\",\"bob/processed/proteinGroups_OST2.txt\",\"bob/processed/proteinGroups_OST3.txt\"]\n inpathL = []\n inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"txt_cav1ko-1-17082017\", \"proteinGroups.txt\"),\"r\")\n # outPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n fileCount = 1\n # outF = file_outporter(outPath)\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1.csv\"),\"w\")\n # newFlag = True\n \n finDict = defaultdict(list)\n cN = 0\n # for relPath in inPathL:\n outDict = {}\n # inpF = file_importer(relPath)\n headerFlag = True\n \n for inpLine in inpF:\n cN += 1\n if headerFlag:\n headerFlag = False\n headerLine = inpLine\n continue\n inpLine = inpLine.strip(\"\\n\\r\")\n inpItem = inpLine.split(\"\\t\")\n geneL = inpItem[0].split(\";\")\n lenS = len(geneL[0])\n curGene = geneL[0]\n for geneI in geneL: # find gene name with the shortest length\n if len(geneI) < lenS:\n lenS = len(geneI)\n curGene = geneI\n if \"__\" in curGene: continue # get rid of contaminant lines\n try: # get rid of wonky lines introduced by excel\n int(curGene)\n continue\n except ValueError: \n pass\n\n if curGene[-2] == \"-\":\n curGene = curGene[:-2]\n if curGene[-3] == \"-\":\n curGene = curGene[:-3]\n \n # remove ambiguities based on gene name from the entire entry:\n \n corrPos = geneL.index(curGene)\n corrLine = []\n targetCount = 46 # after the 45th item row in the list, peptide IDs and modification start to appear which are allowed to have multiple entries and do not need to be disambiguated\n currCount = 1\n pepFlag = True\n for inpE in inpItem:\n currCount += 1\n if currCount == targetCount:\n pepFlag = False\n # print inpE\n if \";\" in inpE and pepFlag:\n try:\n corrLine.append(inpE.split(\";\")[corrPos])\n except IndexError:\n corrLine.append(inpE.split(\";\")[0])\n else:\n corrLine.append(inpE.rstrip(\"\\n\"))\n\n \n if inpItem[6] == \"\":\n # print \"no protein name found. adding the uniprot ID.\"\n inpItem[6] = curGene\n \n \"\"\"\n try:\n for inpN in inpItem[4:10]:\n inpItem[inpItem.index(inpN)] = int(inpN)\n countFlag = True\n except ValueError:\n print inpItem[4:10]\n countFlag = False\n if countFlag:\n if sum(inpItem[4:10]) == 0: continue # there are some unexpressed proteins in there\n \n \"\"\"\n # print len(corrLine)\n if curGene in outDict: # handle duplicate protein entries and merge them together\n # print \"%s is duplicate\" % curGene\n if curGene == \"Protein IDs\": \n \"\"\"\n quickCount2 = 0\n for quickDictI in outDict[curGene]:\n print str(quickCount2) + \" \" + quickDictI\n quickCount2 += 1\n quickList = inpItem\n quickCount3 = 0\n for quickImp in quickList:\n print str(quickCount3) + \" \" + quickImp\n quickCount3 += 1 \n # print inpItem\n # print outDict[curGene]\n \"\"\"\n continue\n combList = []\n \n \"\"\"\n addL = []\n for i in outDict[curGene][3:]:\n addL.append(i)\n addL2 = []\n for j in corrLine[3:]:\n addL2.append(i)\n outL[3:] = map(add, addL, addL2) # admittedly this looks terrible\n \"\"\"\n \n indexN = 0\n for cItem in corrLine:\n # print indexN\n # print \"---\"\n # print len(corrLine)\n if indexN < 18 or 30 <= indexN <= 43:\n try:\n currC = int(cItem)\n currC = currC + int(outDict[curGene][indexN]) # numbers like peptide counts or LFQ values are added up during merge\n except ValueError:\n currC = cItem\n \n elif 18 <= indexN <= 25 or 28 <= indexN <= 29: # sequence coverage and scores\n currC = max([float(cItem),float(outDict[curGene][indexN])])\n \n elif 26 <= indexN <= 27 or indexN == 44:\n \"\"\"\n quickCount = 0\n for corrItem in corrLine:\n print str(quickCount) + \" \" + corrItem\n quickCount += 1\n \n import time\n \n print relPath\n print corrLine\n print outDict[curGene]\n print \"++++++++++++++++++++++++\"\n print indexN\n time.sleep(0.5)\"\"\"\n currC = cItem\n\n \n else:\n corrL = cItem.split(\";\")\n # print indexN\n # print corrLine\n # print outDict[curGene][indexN]\n dictL = outDict[curGene][indexN].split(\";\")\n mergeL = copy(dictL)\n for corrI in corrL:\n if corrI not in dictL:\n mergeL.append(corrI)\n \n currC = \";\".join(mergeL)\n\n combList.append(currC)\n\n \n indexN +=1\n \n \n combList[-1] = \"merged\" \n outDict[curGene] = combList \n # print \"merged:\"\n # print combList\n else:\n corrLine.append(\"unique\")\n outDict[curGene] = corrLine\n\n \n print(fileCount)\n \n\n # if not newFlag: print fileCount, testKey, finDict[testKey] \n # if newFlag:\n # newFlag = False\n \n for outKey,outValue in list(outDict.items()): \n if outKey in finDict: # add modified dicts together into single, unified dict\n # print fileCount, finDict[outKey]\n # print outValue\n outIndex = 0\n for outItem in outValue:\n finDict[outKey][outIndex].append(outItem)\n outIndex += 1\n # print finDict[outKey]\n\n else: # or just add new entries\n if fileCount == 1:\n for outItem in outValue:\n finDict[outKey].append([outItem])\n \n else: # fill up entries that were not present in the previous cycle\n loopCount = 0\n while loopCount < fileCount - 1:\n for i in range(len(outValue)):\n if len(finDict[outKey]) == i:\n finDict[outKey].append([])\n else:\n finDict[outKey][i].append(\"\")\n loopCount += 1\n outIndex = 0\n for outItem in outValue:\n # print finDict[outKey]\n finDict[outKey][outIndex].append(outItem) \n outIndex += 1\n\n for testKey in finDict: # fill up entries in result dict which were not present in previous file\n if len(finDict[testKey][0]) < fileCount:\n for i in range(len(finDict[testKey])):\n finDict[testKey][i].append(\"\")\n\n if len(inpathL) > 1: fileCount += 1 # this is needed if multiple files are parsed\n for finK, finV in list(finDict.items()):\n for finI in finV[-1]:\n if finI != \"unique\" and finI != \"\":\n print(finK, finV)\n\n \n \n outN = 0 \n # prepare header for file:\n headList = headerLine.strip(\"\\n\\r\").split(\"\\t\")\n if fileCount > 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\")\n headerCount = 1\n while headerCount < fileCount:\n outF.write(headerI + \"-\" + str(headerCount) + \"|\")\n headerCount += 1 \n outF.write(headerI + \"-\" + str(headerCount) + \"\\t\")\n \n headerCount = 1\n while headerCount < fileCount:\n outF.write(headList[-1] + \"-\" + str(headerCount) + \"|\")\n headerCount += 1\n \n outF.write(headList[-1] + \"-\" + str(headerCount) + \"\\n\")\n\n elif fileCount == 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\") \n outF.write(headerI + \"\\t\")\n outF.write(headList[-1].replace(\",\",\".\") + \"\\n\")\n \n else:\n print(\"number of input files should be at least one. Got less somehow\")\n raise ValueError\n \n \n for outDK, outDV in list(finDict.items()): # write out assembled results to a file\n outN += 1\n if len(outDK) > 30: print(\"this line should not be displayed\")\n # print outDV[1]\n # if outN == 100: break\n nameCount = 0\n for outI in outDV:\n # if nameCount == 0: print outI\n for outPiece in outI[:-1]:\n outU = outPiece.replace(\",\",\".\")\n if outU == \"\": outF.write(\"_|\")\n else: outF.write(str(outU) + \"|\")\n if outI[-1] == \"\": # handle missing entries\n if nameCount == 6: outF.write(outDV[0][0] + \"\\t\") # replace missing gene names with their uniprot ID\n else: outF.write(\"_\\t\")\n else: outF.write(str(outI[-1]).replace(\",\",\".\") + \"\\t\")\n nameCount += 1\n outF.write(\"\\n\")\n \n\n print(\"unique proteins: \", outN)\n print(\"lines parsed: \", cN)\n # print headerLine\n inpF.close()\n outF.close()", "def test_input_folder(self):\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folder\"\n params[\"input\"] = data_dir + \"build-custom/files/\"\n params[\"input_extension\"] = \"fna.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n files = list_files_folder(params[\"input\"], params[\"input_extension\"])\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")\n\n # Wrong extension\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folder_wrong_extension\"\n params[\"input\"] = data_dir + \"build-custom/files/\"\n params[\"input_extension\"] = \"xxx.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertFalse(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom ran but it should fail\")\n\n # Wrong folder\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folder_wrong_folder\"\n params[\"input\"] = data_dir + \"wrong-place/\"\n params[\"input_extension\"] = \"fna.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertFalse(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom ran but it should fail\")", "def join_per(folderin, folderout, IDfile='./eco/final_ID.csv', column='ECO_ID', naming='*_eco_{}.shp'):\n #import csv with IDs to obtain list for merge\n df = pd.read_csv(IDfile)\n ecoNms = list(np.unique(df[column]))#get list of unique ecoregions \n\n for ecoNm in ecoNms:\n fileList = glob.glob(folderin + naming.format(ecoNm))#here also need dict ref\n rsgislib.vectorutils.mergeShapefiles(fileList, folderout + 'gla14_eco_{}.shp'.format(ecoNm))#use dict to get ecoNm, create new folder too?\n \n #mkdir is make new folder", "def extract_write_to_file(self, num_extracted, write_dir, sub_h, sub_w, margin=10):\n\n file_seed = len(os.listdir(write_dir))\n\n for i in range(num_extracted):\n file_num = str(file_seed + i)\n write_path = os.path.join(write_dir, file_num + \".\" + 'jpg')\n\n print('extracting {}/{} images of dimension {}x{}'.format(i, num_extracted, sub_h, sub_w))\n print('writting to location: {}'.format(write_path))\n\n self.extract_single(sub_h, sub_w, write_path)", "def main(\n file_pattern=INFILE_PATTERN,\n # folder_pattern=INFOLDER_PATTERN,\n tol_td=TOLERANCE_TIMEDELTA,\n outlier=OUTLIER_THRESHOLD,\n args=ARGS,\n):\n # Initialize IO-directories and setup logging\n path_in, path_out = initialize_io()\n\n # path_diffs = path_out / \"diff_imgs\"\n # if args.export:\n # # Folder not needed otherwise, but variable needs to be passed\n # if not path_diffs.is_dir():\n # path_diffs.mkdir()\n # logging.info(f\"Created folder '{path_diffs}'\")\n\n # Find matching files\n # NOTE: This can take potentially long\n # A folderwise sorting would be much faster\n # t0 = time.time()\n filelist = sorted(path_in.rglob(file_pattern))\n # dur = time.time() - t0\n\n n_files = len(filelist)\n logging.info(f\"Found {n_files} matching files in '{path_in}'\")\n # f\"(took {dur:.4} seconds)\")\n\n # act_list = []\n # df_agg = None\n df_list = []\n med_list = []\n for csv_path in filelist:\n logging.info(f\"Reading '{csv_path.name}'\")\n\n hive, rpi, method, day_str = parse_filename(csv_path.name)\n name = f\"RPi{rpi}_{day_str}_{method}\"\n # Read CSV\n # header = [\n # \"time_central\", \"duration\", \"activity\",\n # \"time1\", \"time2\",\n # \"file1\", \"file2\"\n # ]\n # See https://pandas.pydata.org/pandas-docs/stable/reference/\n # api/pandas.read_csv.html\n # df = pd.read_csv(csv_path, index_col=\"time\", parse_dates=True,\n # date_parser=my_date_parser)\n # Works only with the default pandas time format:\n df = pd.read_csv(\n csv_path,\n index_col=\"time_central\",\n parse_dates=[\"time_central\", \"time1\", \"time2\"],\n # converters={\"path\": my_path_parser}),\n )\n df[\"hour\"] = df.index.hour\n df[\"hive\"] = [hive] * len(df)\n df[\"rpi\"] = [rpi] * len(df)\n df[\"method\"] = [method] * len(df)\n\n # if df_agg is None:\n # df_agg = df\n # else:\n # df_agg = pd.concat([df_agg])\n\n # act_dict = {name: df[\"activity\"]}\n #\n # act_list.append(act_dict)\n\n # Plot_single_activity day\n h_median = plot_single_activity(df[\"activity\"], name, path_out)[1]\n\n # series = df.activity\n # series.index = series.index.hour\n hourly_bxpl_single(df, name, path_out)\n\n # Remove outliers\n if any(df.activity >= outlier):\n logging.warning(\n f\"Found {sum(df.activity >= outlier)} outliers \"\n f\"in {csv_path.name}, filtering them out.\")\n\n # Crop df to plausible measurements\n df = df[df.activity < outlier]\n\n if len(df) > 0:\n name += \"_removed-ols\"\n\n # Plot_single_activity day\n h_median = plot_single_activity(\n df[\"activity\"], name, path_out)[1]\n else:\n logging.warning(f\"All data in {csv_path.name} are outliers, \"\n \"skipping..\")\n continue\n\n df_list.append(df)\n med_list.append(h_median)\n\n df_agg = pd.concat(df_list)\n\n name = \"aggregated\"\n # name_euc = name + \"_euclidean\"\n # name_man = name + \"_manhattan\"\n\n # df_agg_euc = df_agg[df_agg.method == \"euclidean\"]\n # df_agg_man = df_agg[df_agg.method == \"manhattan\"]\n\n # Plot_single_activity day\n # plot_single_activity(df_agg_euc[\"activity\"], name_euc, path_out)\n plot_single_activity(df_agg[\"activity\"], name, path_out)\n\n # series = df.activity\n # series.index = series.index.hour\n\n # hourly_bxpl_single(df_agg_euc, name_euc, path_out)\n hourly_bxpl_single(df_agg, name, path_out)\n\n # Plot all medians\n plot_median_days(med_list, \"median-days\", path_out)\n\n # Plot functional median boxplot\n\n try:\n pass\n\n except KeyboardInterrupt:\n logging.info(\"Manually interrupted script\")\n\n finally:\n # if len(rows) > 0:\n # logging.info(f\"Exporting {len(rows)} rows to CSV\")\n # export_csv(rows, row_cols, path_out, hive, rpi, method)\n\n logging.info(\"Done.\")", "def process_images():\n\t\n\tparser = argparse.ArgumentParser(description=\"Splice image patch for face from GAN generated donor to detected face in recipient image.\")\n\tparser.add_argument(\"-d\", \"--donor\", dest=\"donor\", default=\"./GAN_Faces/\", help=\"path to directory containing GAN generated faces\")\n\tparser.add_argument(\"-r\", \"--recipient\", dest=\"recipient\", default=\"./MediFor_Images/\", help=\"path to directory containing images into which faces are spliced\")\n\tparser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"./GAN_MediFor/\", help=\"output directory into which spliced images are saved\")\n\tparser.add_argument(\"-f\", \"--files\", dest=\"files\", default=False, help=\"If the input and output are files not directories\", action='store_true')\n\n\targs = parser.parse_args()\n\tdonor_directory = args.donor\n\trecipient_directory = args.recipient\n\tout_directory = args.output\n\tfi = args.files\n\t\n\t# donor images\n\ttry:\n\t\thead_image_paths = os.listdir(donor_directory) if not fi else [donor_directory]\n\t\tif not os.path.exists(head_image_paths[0]):\n\t\t\traise ValueError\n\texcept:\n\t\tprint('Did you create the donor image directory?')\n\t\tprint('Quiting ...')\n\t\treturn\n\t\t\n\t# recipient images\n\ttry:\n\t\trecipient_paths = os.listdir(recipient_directory) if not fi else [recipient_directory]\n\t\tif not os.path.exists(recipient_paths[0]):\n\t\t\traise ValueError\n\texcept:\n\t\tprint('Did you create the recipient image directory?')\n\t\tprint('Quiting ...')\n\t\treturn\n\t\n\t# output folder existence\n\tif not os.path.exists(out_directory) and not fi:\n\t\tprint('Did you create the output image directory?')\n\t\tprint('Quiting...')\n\t\treturn\n\t\n\t# log errors\n\tlf = open('./log.txt', 'w')\n\t\n\t\"\"\"\n\tTowards the objectives of the MediFor program, all Progressive GAN generated face images are utilized in combination with all available images in recipient images.\n\t\n\tNaming convention:\n\tThe spliced images are named as <donor image name>--<recipient image name>.png\n\tThe spliced images can be renamed at a later date if a hashing function is used to rename donor or recipient image file names.\t\n\t\"\"\"\n\n\tfor head_img in head_image_paths:\n\t\thead_path = donor_directory + head_img if not fi else head_img\n\t\tfor recipient_img in recipient_paths:\n\t\t\trecipient_path = recipient_directory + recipient_img if not fi else recipient_img\n\t\t\tout_img = head_img.split('.')[0] + '--' + recipient_img.split('.')[0] + '.png'\n\t\t\tout_path = os.path.join(out_directory, out_img) if not fi else out_directory\n\t\t\ttry:\n\t\t\t\tsplice_donor_recipient(recipient_path, head_path, out_path)\n\t\t\t\tprint('donor: {}, recipient: {}\\n output: {}'.format(head_path, recipient_path, out_path))\n\t\t\texcept Exception as err:\n\t\t\t\tprint(err)\n\t\t\t\tlf.write('Issue with: {}\\n'.format(out_img))\n\t\n\tlf.close()", "def convert_directory(dicom_directory, output_folder, compression=True, reorient=True):\n # sort dicom files by series uid\n dicom_series = {}\n for root, _, files in os.walk(dicom_directory):\n for dicom_file in files:\n file_path = os.path.join(root, dicom_file)\n # noinspection PyBroadException\n try:\n if common.is_dicom_file(file_path):\n # read the dicom as fast as possible\n # (max length for SeriesInstanceUID is 64 so defer_size 100 should be ok)\n\n dicom_headers = pydicom.read_file(file_path,\n defer_size=\"1 KB\",\n stop_before_pixels=False,\n force=dicom2nifti.settings.pydicom_read_force)\n if not _is_valid_imaging_dicom(dicom_headers):\n logger.info(\"Skipping: %s\" % file_path)\n continue\n logger.info(\"Organizing: %s\" % file_path)\n if dicom_headers.SeriesInstanceUID not in dicom_series:\n dicom_series[dicom_headers.SeriesInstanceUID] = []\n dicom_series[dicom_headers.SeriesInstanceUID].append(dicom_headers)\n except: # Explicitly capturing all errors here to be able to continue processing all the rest\n logger.warning(\"Unable to read: %s\" % file_path)\n traceback.print_exc()\n\n # start converting one by one\n for series_id, dicom_input in dicom_series.items():\n base_filename = \"\"\n # noinspection PyBroadException\n try:\n # construct the filename for the nifti\n base_filename = \"\"\n if 'SeriesNumber' in dicom_input[0]:\n base_filename = _remove_accents('%s' % dicom_input[0].SeriesNumber)\n if 'SeriesDescription' in dicom_input[0]:\n base_filename = _remove_accents('%s_%s' % (base_filename,\n dicom_input[0].SeriesDescription))\n elif 'SequenceName' in dicom_input[0]:\n base_filename = _remove_accents('%s_%s' % (base_filename,\n dicom_input[0].SequenceName))\n elif 'ProtocolName' in dicom_input[0]:\n base_filename = _remove_accents('%s_%s' % (base_filename,\n dicom_input[0].ProtocolName))\n else:\n base_filename = _remove_accents(dicom_input[0].SeriesInstanceUID)\n logger.info('--------------------------------------------')\n logger.info('Start converting %s' % base_filename)\n if compression:\n nifti_file = os.path.join(output_folder, base_filename + '.nii.gz')\n else:\n nifti_file = os.path.join(output_folder, base_filename + '.nii')\n convert_dicom.dicom_array_to_nifti(dicom_input, nifti_file, reorient)\n gc.collect()\n except: # Explicitly capturing app exceptions here to be able to continue processing\n logger.info(\"Unable to convert: %s\" % base_filename)\n traceback.print_exc()", "def prepare_PBS_jobs(self, folders_glob, skeleton_function):\n\n folder_fnames_list = glob(folders_glob)\n basedir = os.getcwd()\n\n for input_folder in folder_fnames_list:\n # get eXXsYY from input/eXXsYY\n system_name = input_folder.split('/')[-1].split('_')[0]\n # create data/eXXsYY if it does not exist already\n data_folder = os.path.realpath(\n os.path.join(\n self.data_folder,\n system_name\n )\n )\n create_folder(data_folder)\n # Symlink the files inside the input folder to the data folder\n create_symlinks(files=os.path.join(input_folder, 'structure*'),\n dst_folder=os.path.realpath(data_folder))\n create_symlinks(files=os.path.join(input_folder, '*.in'),\n dst_folder=os.path.realpath(data_folder))\n # Move inside the data folder\n os.chdir(data_folder)\n skeleton = skeleton_function(\n system_name=system_name,\n job_directory=os.path.join('/work/{}'.format(self.user),\n self.project_name, system_name),\n destination=os.path.realpath(data_folder)\n )\n sim = Simulation(skeleton)\n sim.writeSimulationFiles()\n\n os.chdir(basedir)", "def transform_all_files(in_folder, out_folder):\n if not exists(out_folder):\n mkdir(out_folder)\n all_files = get_all_files_and_nested(in_folder)\n for in_file in all_files:\n out_file_name = in_file.replace(in_folder, out_folder)\n transform_file_to_utf_8_from(in_file, out_file_name=out_file_name)", "def file_name_search():\n directory = \"/Users/andrewpowers/Documents/server/fastq_pass\"\n\n for file in os.listdir(directory):\n output_file = re.sub('fastq', 'fasta', file)\n os.system(bash_command.format(directory+\"/\"+file, output_file))\n print('File {} converted to fasta.'.format(file))\n print('Conversion Done.')", "def runAll(self):\n \n worker = worker()\n if self.FileFolder.text() == \"\":\n self.makeWarningPopup(\"Please Select a file or Files to run\") \n elif self.OutputFolder.text() == \"\":\n self.makeWarningPopup(\"Please select an output folder\")\n else:\n TheFiles = self.FileFolder.text()\n TheOutPutFolder = self.OutputFolder.text()\n \n runArt = worker.MakeUITeamConversion(self,TheFiles,TheOutPutFolder)", "def process_all_images(input_path: str, output_path: str, resized_image_shape: Tuple,transformations:List[TransformationsEnum]):\n\n output_images_path = os.path.join(output_path, \"images\")\n csv_file_path = os.path.join(output_path, \"metadata.csv\")\n\n prepare_folders(output_path, output_images_path)\n prepare_csv(csv_file_path)\n\n df = pd.read_csv(csv_file_path)\n current_id = 1 #has to check the current id in the folder or be set to 1 if none\n categories_names = list(os.listdir(input_path))\n\n encoder = LabelBinarizer()\n encoder.fit(categories_names)\n\n\n for folder_name in os.listdir(input_path):\n current_category_name = folder_name\n category_path = os.path.join(input_path, folder_name)\n images_in_category = list(Path(category_path).glob(\"*.jpg\"))\n df, current_id = process_image(\n df, current_id, encoder, current_category_name, images_in_category,output_images_path, resized_image_shape,transformations\n )\n\n df.to_csv(csv_file_path, index=False, quotechar='\"', encoding='ascii')\n\n print(\"done, processed\", len(df), \"images\")", "def main(file):\n\n # Get the current working directory.\n here = os.getcwd()\n #Need the file_name to set globe, so that other functions can access to it.\n global file_name\n # Spite the Input into file_path and file_name.\n file_path = spilt_path(file)[0]\n file_name = spilt_path(file)[1]\n\n # Try to get into the file_path, if exist\n try:\n os.chdir(file_path)\n except IOError, e:\n print e\n\n # Now convert it\n convertFile(file_name)\n # going back to orgin folder\n os.chdir(here)\n return os.path.join(output_dir, file_name)", "def _do_query_extract(self, extract_data):\n import tempfile\n import uuid\n import os\n import sqlite3\n import unicodecsv as csv\n\n p = extract_data['_partition'] # Set in _make_partition_dict\n\n file_name = extract_data.get('name', None)\n \n if file_name:\n file_ = self.bundle.filesystem.path('extracts', file_name)\n else:\n file_ = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) )\n\n if extract_data.get('query', False):\n query = extract_data['query']\n else:\n\n source_table = extract_data.get('source_table', False)\n \n if not source_table:\n source_table = p.table.name\n \n extract_table = extract_data.get('extract_table', False)\n \n if not extract_table:\n extract_table = source_table\n \n query = self.bundle.schema.extract_query(source_table,extract_table )\n\n where = extract_data.get('extract_where', False)\n \n if where:\n query = query + \" WHERE \"+where\n\n self.bundle.log(\"Running CSV extract from a query\")\n self.bundle.log(\" Partition: {}\".format(p.name))\n self.bundle.log(\" Source table: {}\".format(source_table))\n self.bundle.log(\" Extract Table: {}\".format(extract_table))\n self.bundle.log(\" Query: {}\".format(query.replace('\\n',' ')))\n self.bundle.log(\" Name: {}\".format(extract_data['name'])) \n self.bundle.log(\" Output: {}\".format(file_)) \n\n #self.bundle.log(query)\n\n conn = sqlite3.connect(p.database.path)\n\n lr = self.bundle.init_log_rate(100000,\"Extract to {}\".format(file_name))\n\n with open(file_, 'w') as f:\n conn.row_factory = sqlite3.Row\n \n try:\n rows = conn.execute(query)\n except:\n print query\n raise\n \n \n first = rows.fetchone()\n \n if not first:\n raise Exception(\"Got no data from query: {}\".format(query))\n \n writer = csv.writer(f)\n\n writer.writerow(first.keys())\n writer.writerow(tuple(first))\n \n for row in rows:\n lr()\n writer.writerow(tuple(row))\n\n return file_", "def move_trajs_to_folder(self, input_folders):\n if type(input_folders) == str:\n input_folders = glob(input_folders)\n elif type(input_folders) == list:\n pass\n else:\n raise ValueError('input_folders must be of type str or list')\n data_folder = os.path.abspath(self.data_folder)\n for folder in input_folders:\n dst_folder = os.path.join(data_folder, os.path.basename(folder))\n create_folder(dst_folder)\n os.rename(\n src=os.path.abspath(os.path.join(folder, 'Production.nc')),\n dst=os.path.join(dst_folder, 'Production.nc')\n )", "def main(input_folder, output_folder, degree, stream_mode, concurrent_mode, chinese_naming, trace_mode):\n\n if not os.path.exists(input_folder):\n logger.error('input path [{}] not exist, quit'.format(input_folder))\n exit(0)\n\n if not os.path.isdir(input_folder):\n logger.error('input path [{}] is not folder, quit'.format(input_folder))\n exit(0)\n\n if output_folder is None:\n output_folder = os.path.join(input_folder, 'cleaned')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n\n if not os.path.exists(output_folder):\n logger.error('output path [{}] not exist, quit'.format(output_folder))\n exit(0)\n\n if not os.path.isdir(output_folder):\n logger.error('output path [{}] is not folder, quit'.format(output_folder))\n exit(0)\n\n if trace_mode is None:\n trace_mode = False\n\n if stream_mode is None:\n stream_mode = False\n\n if concurrent_mode is None:\n concurrent_mode = False\n\n if chinese_naming is None:\n chinese_naming = False\n\n logger.info('program is running in watching mode, watch path \\'{}\\', press Control-C to stop'.format(input_folder))\n\n pool = None\n if concurrent_mode:\n logger.info('init process pool ... ')\n max_count = multiprocessing.cpu_count()\n pool = multiprocessing.Pool(max_count)\n logger.info('{} processes initialed'.format(max_count))\n\n event_handler = InputFileMatchingEventHandler(batch_cleansing, output_folder, degree, stream_mode, concurrent_mode, pool, chinese_naming, trace_mode)\n observer = Observer()\n observer.schedule(event_handler, input_folder, recursive=False)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n logger.info('program stopped')\n observer.stop()\n except Exception as e:\n logger.error(e)\n observer.stop()\n finally:\n observer.join()\n pool.close()\n pool.join()", "def main():\n\n\t# Script arguments... \n\t\"\"\" If running as standalone, hardcode theWorkspace and inFile \"\"\"\n\ttheWorkspace = arcpy.GetParameterAsText(0)\n\tif not theWorkspace:\n\t\ttheWorkspace = r\"d:\\_dataTest\"\n\ttheWorkspace = r\"d:\\_dataTest\"\n\tarcpy.env.workspace = theWorkspace\n\tarcpy.env.overwriteOutput = True\n\toutWorkspace = os.path.join(theWorkspace, \"_repair\")\n\n\tinFile = arcpy.GetParameterAsText(1)\n\tif not inFile:\n\t\tinFile = \"updateMultipleSourcePaths.csv\"\n\t#inFile = \"FixSource4.csv\"\n\t#inFile = os.path.join(theWorkspace, inFile) + \".csv\"\n\t# opens the infile.csv, read only; then creates tuple of inFile\n\t#f = open(inFile, \"r\") \n\t#update_list = [tuple(line.strip().split(\",\") for line in f)]\n\n\n\tmxd = None\n\toutMXDName = \"none\"\n\tnewPath = []\n\t# makes sure the .csv file exists\n\tif arcpy.Exists(inFile):\n\t\tmyMsgs (\"Repair source list: \" + inFile)\n\t\t# walks thru the workspace to create list of files \n\t\tfor root, dirs, files in os.walk(theWorkspace): \n\t\t\tif root == outWorkspace:\n\t\t\t\tprint(\"heh now\")\n\t\t\t\tpass\n\t\t\t# creates list of .mxd's and works thru them\n\t\t\tmxdList = arcpy.ListFiles(\"*.mxd\")\n\t\t\tfor fileName in mxdList:\n\t\t\t\tfullPath = os.path.join(root, fileName) \n\t\t\t\tmxd = arcpy.mapping.MapDocument(fullPath)\n\t\t\t\tmyMsgs (\"*** Processing mxd: \" + fullPath)\n\t\t\t\t#mxd.findAndReplaceWorkspacePaths(\"v:\\\\\", \"\\\\\\\\dfg.alaska.local\\\\gis\\\\Anchorage\\\\gisshare\\\\\", validate=False)\n\t\t\t\t#mxd.findAndReplaceWorkspacePaths(\"t:\\\\\", \"\\\\\\\\dfg.alaska.local\\\\gis\\\\Anchorage\\\\GISStaff\\\\\", validate=False)\n\t\t\t\t#mxd.findAndReplaceWorkspacePaths(\"u:\\\\\", \"\\\\\\\\dfg.alaska.local\\\\gis\\\\Anchorage\\\\GISStaff\\\\\", validate=False)\n\t\t\t\t# New output mxd....\n\t\t\t\tbasename, extension = os.path.splitext(fileName)\n\t\t\t\toutMXDName = os.path.join(outWorkspace, (str(basename) + \"_fix.mxd\"))\n\t\t\t\t# create list of the tables since they are handle differently\n\t\t\t\ttheTables = arcpy.mapping.ListTableViews(mxd)\n\t\t\t\t# Loops thru layers, checks for broken links and tries to repai\n\t\t\t\tlyrList = arcpy.mapping.ListLayers(mxd)\n\t\t\t\tfor lyr in lyrList:\n\t\t\t\t\tif lyr.isBroken:\n\t\t\t\t\t\tif lyr.isGroupLayer or (\"Events\" in lyr.name):\n\t\t\t\t\t\t\tprint(\"...skipping group or event\")\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t#print(lyr.isServiceLayer)\n\t\t\t\t\t\tif lyr.isServiceLayer:\n\t\t\t\t\t\t\tif lyr.supports(\"SERVICEPROPERTIES\"):\n\t\t\t\t\t\t\t\tcnt = 0\n\t\t\t\t\t\t\t\tfor i, j in lyr.serviceProperties.iteritems():\n\t\t\t\t\t\t\t\t\tif cnt == 2:\n\t\t\t\t\t\t\t\t\t\tdataSource = str(j)\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tcnt += 1 \n\t\t\t\t\t\t\t\tprint(\"sees this as service....using findAndReplsWorkspacePath\")\n\t\t\t\t\t\t\t\tnewPath = findUpdatePath(inFile, dataSource)\n\t\t\t\t\t\t\t\tlyr.findAndReplaceWorkspacePath(lyr.dataSource, newPath, False)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint(\"--> a service layer but no SERVICE PROPOERTIES\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(lyr.dataSource)\n\t\t\t\t\t\t\tnewPath = findUpdatePath(inFile, lyr.dataSource)\n\t\t\t\t\t\t\tnewDSPath, newDSName = os.path.split(newPath[0])\n\t\t\t\t\t\t\tprint(\"..newDSPAth \" + newDSPath)\n\t\t\t\t\t\t\tprint(\"..newDSName \" + newDSName)\n\t\t\t\t\t\t\tsameType = newPath[1]\n\t\t\t\t\t\t\tprint(\" same type? \" + str(sameType))\n\t\t\t\t\t\t\tcvrList = [r\"\\arc\", r\"\\polygon\", r\"\\region\", r\"\\point\", r\"\\tic\" ]\n\t\t\t\t\t\t\t#print newDSPath\n\t\t\t\t\t\t\tif newPath == \"no match\":\n\t\t\t\t\t\t\t\tprint(\"...no match to: \" + lyr.dataSource)\n\t\t\t\t\t\t\t\tnewPath[0] = \"not found\"\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\telif lyr.supports(\"dataSource\") and lyr.supports(\"datasetName\"):\n\t\t\t\t\t\t\t\tif lyr in theTables:\n\t\t\t\t\t\t\t\t\tprint(\"thinks its a table....using findAndReplsWorkspacePath\")\n\t\t\t\t\t\t\t\t\tlyr.findAndReplaceWorkspacePath(lyr.dataSource, newPath, False) \n\t\t\t\t\t\t\t\telif lyr.isRasterLayer:\n\t\t\t\t\t\t\t\t\tprint(\"thinks its a raster....using findAndReplsWorkspacePath\")\n\t\t\t\t\t\t\t\t\t#lyr.replaceDataSource(newPath, \"RASTER_WORKSPACE\", lyr.datasetName, False)\n\t\t\t\t\t\t\t\t\tlyr.findAndReplaceWorkspacePath(lyr.dataSource, newPath, False)\n\t\t\t\t\t\t\t\telif lyr.supports(\"dataSource\") and lyr.supports(\"datasetName\"):\n\t\t\t\t\t\t\t\t\tif not sameType and newPath[1] == \"gdb\":\n\t\t\t\t\t\t\t\t\t\tprint(\"..................moving to fgdb\")\n\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"FILEGDB_WORKSPACE\", newDSName, False) \n\t\t\t\t\t\t\t\t\telif r\".shp\" in lyr.dataSource:\n\t\t\t\t\t\t\t\t\t\tprint(\"thinks its a shape\")\n\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"SHAPEFILE_WORKSPACE\", lyr.datasetName, False)\n\t\t\t\t\t\t\t\t\telif r\".sde\" in lyr.dataSource:\n\t\t\t\t\t\t\t\t\t\tprint(\"thinks its a sde\")\n\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"SDE_Workspace\", lyr.datasetName, False)\n\t\t\t\t\t\t\t\t\telif r\".mdb\" in lyr.dataSource:\n\t\t\t\t\t\t\t\t\t\tprint(\"thinks its a pgdb\")\n\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"ACCESS_WORKSPACE\", lyr.datasetName, False)\n\t\t\t\t\t\t\t\t\telif r\".gdb\" in lyr.dataSource:\n\t\t\t\t\t\t\t\t\t\tprint(\"thinks its a fgdb\")\n\n\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"FILEGDB_WORKSPACE\", lyr.datasetName, False)\n\t\t\t\t\t\t\t\t\telif sameType:\n\t\t\t\t\t\t\t\t\t\tfor cvr in cvrList:\n\t\t\t\t\t\t\t\t\t\t\tif cvr in lyr.dataSource:\n\t\t\t\t\t\t\t\t\t\t\t\tprint(\"to WS sametype is True\")\n\t\t\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"ARCINFO_WORKSPACE\", newDSName, False)\n\t\t\t\t\t\t\t\t\telif not sameType:\n\t\t\t\t\t\t\t\t\t\tfor cvr in cvrList:\n\n\t\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"FILEGDB_WORKSPACE\", newDSName, False)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\"\"\"else:\n newPath[0] = \"not found\" \"\"\"\n\t\t\t\t\t\t\tprint(\" **** the new data source: \" + newPath[0])\n\t\t\t\t\t\t\tprint(\"\")\n\n\t\t\t\tprint(outMXDName)\n\t\t\t\t#mxd.saveACopy(outMXDName, '10.1')\n\t\t\tif arcpy.Exists(outMXDName):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\toutMXDName.save()\n\t\t\t\telse:\n mxd.saveACopy(outMXDName, '10.1')\n\t\t\t\tdel mxd\n\telse:\n\t\tmyMsgs (\"Repair source list: \" + inFile + \" does not exit.\")\n\n\tmyMsgs('!!! Success !!! ')", "def process(self, tile):\n directory = os.path.join(self.Cg_Cfg.output_preprocess, tile.upper())\n print(\"Start speckle filtering: \" + tile.upper())\n year_outcore_list = [\"2019\", \"2018\"]\n year_filter_list = [\"2019\", \"2018\"]\n\n year_outcore_str = \"-\".join(year_outcore_list) # pour les noms de fichiers\n\n filelist_s1des = []\n filelist_s1asc = []\n filelist_s1des_updateoutcore = []\n filelist_s1asc_updateoutcore = []\n # Build the lists of files :\n # - for computing outcores\n # - for filtering\n\n for y in year_outcore_list:\n for file_it in glob.glob(os.path.join(directory, \"s1?_?????_??_DES_???_\" + y + \"????t??????.tif\")):\n filelist_s1des_updateoutcore.append(file_it)\n\n for file_it in glob.glob(os.path.join(directory, \"s1?_?????_??_ASC_???_\" + y + \"????t??????.tif\")):\n filelist_s1asc_updateoutcore.append(file_it)\n\n # Select only 100 images for the outcore dataset (for both ASC and DES outcores)\n filelist_s1des_updateoutcore = filelist_s1des_updateoutcore[:100]\n filelist_s1asc_updateoutcore = filelist_s1asc_updateoutcore[:100]\n\n for y in year_filter_list:\n for file_it in glob.glob(os.path.join(directory, \"s1?_?????_??_DES_???_\" + y + \"????t??????.tif\")):\n filelist_s1des.append(file_it)\n\n for file_it in glob.glob(os.path.join(directory, \"s1?_?????_??_ASC_???_\" + y + \"????t??????.tif\")):\n filelist_s1asc.append(file_it)\n\n print(filelist_s1des)\n print()\n print(filelist_s1asc)\n print()\n\n if self.Cg_Cfg.Reset_outcore:\n processed_files = []\n try:\n os.remove(os.path.join(directory, \"outcore\" + year_filter + \".txt\"))\n except:\n pass\n else:\n try:\n processed_files = \\\n pickle.load(open(os.path.join(directory, \"outcore\" + year_filter + \".txt\")))\n except pickle.PickleError:\n processed_files = []\n\n # Compute the outcores for ASC and DES images\n\n for file_it in processed_files:\n try:\n filelist_s1des_updateoutcore.remove(file_it)\n filelist_s1asc_updateoutcore.remove(file_it)\n except ValueError:\n pass\n\n # Build the strings containing the filenames to be processed\n filelist_s1des_updateoutcore_str = \" \".join(filelist_s1des_updateoutcore)\n filelist_s1asc_updateoutcore_str = \" \".join(filelist_s1asc_updateoutcore)\n filelist_s1des_str = \" \".join(filelist_s1des)\n filelist_s1asc_str = \" \".join(filelist_s1asc)\n\n pids = []\n\n # Adapts the processing ressources to only two processes\n\n ram_per_process = int(self.Cg_Cfg.ram_per_process * self.Cg_Cfg.nb_procs / 2)\n OTBThreads = int(self.Cg_Cfg.OTBThreads * self.Cg_Cfg.nb_procs / 2)\n\n ####### TK\n # On vide la liste des fichiers ASC pour eviter de calculer l'outcore\n filelist_s1asc_updateoutcore = []\n filelist_s1asc = []\n #\n\n if filelist_s1des_updateoutcore:\n command = 'export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS={};'.format(OTBThreads)\\\n + \"otbcli_MultitempFilteringOutcore -progress false -inl \"\\\n + filelist_s1des_updateoutcore_str + \" -oc \"\\\n + os.path.join(directory, \"outcore\" + year_outcore_str + \"_S1DES.tif\")\\\n + \" -wr {}\".format(self.Cg_Cfg.Window_radius)\\\n + \" -ram {}\".format(str(ram_per_process))\n pids.append([Popen(command, stdout=self.Cg_Cfg.stdoutfile,\n stderr=self.Cg_Cfg.stderrfile, shell=True), command])\n if filelist_s1asc_updateoutcore:\n command = 'export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS={};'.format(OTBThreads)\\\n + \"otbcli_MultitempFilteringOutcore -progress false -inl \"\\\n + filelist_s1asc_updateoutcore_str + \" -oc \"\\\n + os.path.join(directory, \"outcore\" + year_outcore_str + \"_S1ASC.tif\")\\\n + \" -wr \" + str(self.Cg_Cfg.Window_radius)\\\n + \" -ram {}\".format(str(ram_per_process))\n pids.append([Popen(command, stdout=self.Cg_Cfg.stdoutfile,\n stderr=self.Cg_Cfg.stderrfile, shell=True), command])\n try:\n os.makedirs(os.path.join(directory, \"filtered\"))\n except os.error:\n pass\n\n title = \"Compute outcore\"\n nb_cmd = len(pids)\n print(title + \"... 0%\")\n while len(pids) > 0:\n\n for i, pid in enumerate(pids):\n status = pid[0].poll()\n if status:\n print(\"Error in pid #\" + str(i) + \" id = \" + str(pid[0]))\n print(pid[1])\n del pids[i]\n break\n\n elif status == 0:\n del pids[i]\n print(title + \"... \" + str(int((nb_cmd - len(pids)) * 100. / nb_cmd)) + \"%\")\n time.sleep(0.2)\n break\n time.sleep(2)\n\n processed_files = processed_files + filelist_s1des_updateoutcore\\\n + filelist_s1asc_updateoutcore\n\n pickle.dump(processed_files, open(os.path.join(directory, \"outcore.txt\"), 'w'))\n\n # Compute the filtered images using the outcores\n\n pids = []\n if filelist_s1des:\n command = 'export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS={};'.format(OTBThreads)\\\n + \"otbcli_MultitempFilteringFilter -progress false -inl \"\\\n + filelist_s1des_str + \" -oc \"\\\n + os.path.join(directory, \"outcore\" + year_outcore_str + \"_S1DES.tif\")\\\n + \" -wr \" + str(self.Cg_Cfg.Window_radius) + \" -enl \"\\\n + os.path.join(directory, \"filtered\", \"enl_\" + year_outcore_str + \"_S1DES.tif\")\\\n + \" -ram {}\".format(str(ram_per_process))\n pids.append([Popen(command, stdout=self.Cg_Cfg.stdoutfile,\n stderr=self.Cg_Cfg.stderrfile, shell=True), command])\n\n if filelist_s1asc:\n command = 'export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS={};'.format(OTBThreads)\\\n + \"otbcli_MultitempFilteringFilter -progress false -inl \"\\\n + filelist_s1asc_str + \" -oc \"\\\n + os.path.join(directory, \"outcore\" + year_outcore_str + \"_S1ASC.tif\")\\\n + \" -wr \" + str(self.Cg_Cfg.Window_radius) + \" -enl \"\\\n + os.path.join(directory, \"filtered\", \"enl_\" + year_outcore_str + \"_S1ASC.tif\")\\\n + \" -ram {}\".format(str(ram_per_process))\n pids.append([Popen(command, stdout=self.Cg_Cfg.stdoutfile,\n stderr=self.Cg_Cfg.stderrfile, shell=True), command])\n\n title = \"Compute filtered images\"\n nb_cmd = len(pids)\n print(title + \"... 0%\")\n while len(pids) > 0:\n\n for i, pid in enumerate(pids):\n status = pid[0].poll()\n if status:\n print(\"Error in pid #\" + str(i) + \" id = \" + str(pid[0]))\n print(pid[1])\n del pids[i]\n break\n\n elif status == 0:\n del pids[i]\n print(title + \"... \" + str(int((nb_cmd - len(pids)) * 100. / nb_cmd)) + \"%\")\n time.sleep(0.2)\n break\n time.sleep(2)\n\n filtering_directory = os.path.join(directory, 'filtered/')\n for f in os.listdir(filtering_directory):\n fullpath = os.path.join(filtering_directory, f)\n if os.path.isfile(fullpath) and f.startswith('s1') and f.endswith('filtered.tif'):\n dst = gdal.Open(fullpath, gdal.GA_Update)\n dst.SetMetadataItem('FILTERED', 'true')\n dst.SetMetadataItem('FILTERING_WINDOW_RADIUS', str(self.Cg_Cfg.Window_radius))\n dst.SetMetadataItem('FILTERING_PROCESSINGDATE', str(datetime.datetime.now()))", "def preprocess(self):\n filtered_data = pd.read_csv(self.input)\n\n if self.config.getboolean(\"filterMissingsInGenes\"):\n # first filter out the genes that have more missings than threshold\n filtered_data = self.filterMissings(self.config[\"threshold\"], filtered_data)\n if self.config.getboolean(\"filterMissingsInSamples\"):\n # second transpose matrix and filter out samples that have more missings than threshold\n filtered_samples = self.filterMissings(self.config[\"threshold\"], filtered_data.T)\n filtered_data = filtered_samples.T\n\n # transpose back into original orientation and save\n filePrefix = self.input.split(\"/\")[-1].split(\".\")[\n 0] # split path by / to receive filename, split filename by . to receive filename without ending\n filename = self.output + filePrefix + \"_filtered.csv\"\n filtered_data.to_csv(filename, index=False)\n return filename", "def runDataExtraction():\r\n config = CONFIG['steps']['DataExtraction']\r\n ci = config['inputs']\r\n co = config['outputs']\r\n columns = ci['columns']\r\n nrows = ci['nrows']\r\n input_bucket = ci['bucket']\r\n no_of_files = ci['no_of_files']\r\n\r\n output_bucket = co['bucket']\r\n csv_name_prefix = co['csv_name_prefix']\r\n\r\n minio_config = CONFIG['artifacts']['minio']\r\n minioClient = create_minio_client(minio_config[\"endpoint_url\"],\r\n access_key=minio_config[\"access_key\"],\r\n secret_key=minio_config[\"secret_key\"],\r\n secure=minio_config['secure'])\r\n\r\n boto_client = boto3.client(\"s3\",\r\n endpoint_url=minio_config[\"endpoint_url\"],\r\n aws_access_key_id=minio_config[\"access_key\"],\r\n aws_secret_access_key=minio_config[\"secret_key\"],\r\n region_name=minio_config[\"region_name\"])\r\n\r\n zip_files = get_files(input_bucket, boto_client, file_type='zip')\r\n\r\n no_of_files_to_process = no_of_files if no_of_files is not None else len(\r\n zip_files)\r\n for zip_file in tqdm(zip_files[:no_of_files_to_process], total=no_of_files_to_process):\r\n process_file(zip_file, input_bucket, output_bucket, minioClient, columns,\r\n nrows=nrows, output_csv_name_prefix=csv_name_prefix)", "def get_output(self, output_dir=\"tools_output\"):\n\n output_dir = self.project_dir / output_dir / self.name\n # create output directory if didn't exist\n if not output_dir.exists():\n os.makedirs(output_dir)\n logger.info(f\"Created {output_dir}\")\n\n for outfile in self.output:\n outfile = self.project_dir / outfile\n if outfile.exists():\n src = os.fspath(outfile)\n dst = os.fspath(output_dir / outfile.name)\n shutil.move(src, dst)\n logger.info(f\"Moved {outfile.name} to {output_dir}\")\n else:\n msg = f\"File not found: {outfile} - did you execute run() before?\"\n logger.error(msg)\n raise FileNotFoundError(msg)", "def test_batch(from_dir, to_dir, doc_type):\n\n if from_dir[-1] != \"/\":\n from_dir = from_dir + \"/\"\n if to_dir[-1] != \"/\":\n to_dir = to_dir + \"/\"\n\n os.chdir(from_dir)\n for pdf_file in os.listdir(from_dir):\n if pdf_file.endswith(\".pdf\"):\n # Appends a row to the csv file \"output.csv\" with the stats from that particular document\n analyze(from_dir, pdf_file, doc_type)\n\n # Moving to the 'to' directory since we're done analyzing it.\n destination = to_dir + pdf_file\n shutil.move(from_dir+ pdf_file, destination)", "def treat(input, output):\n files = find(input)\n acc = []\n for file in files:\n fileInfo = extract(file)\n out = makeOutputPath(output, fileInfo[\"path\"], fileInfo[\"filename\"])\n if not out == None:\n fileInfo[\"outPath\"] = out\n acc += [fileInfo]\n return acc", "def main():\r\n\r\n directory = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n path = os.path.join(directory, 'dump_3')\r\n if not (os.path.exists(path)):\r\n os.mkdir(path)\r\n\r\n for date in range(1, 31):\r\n # date-month-year\r\n # file_name1 = path + '\\\\' + str(date) + '-8-2020' + '_file1.txt'\r\n\r\n # year-month-date\r\n # file_name1 = path + '\\\\' + '2020-08-' + str(date) + '_file3.txt'\r\n\r\n # month_year_date\r\n file_name1 = path + '\\\\' + 'Aug_2020_' + str(date) + '_file5.txt'\r\n\r\n # date-month-year\r\n # file_name2 = path + '\\\\' + str(date) + '-8-2020' + '_file2.txt'\r\n\r\n # year-month-date\r\n # file_name2 = path + '\\\\' + '2020-08-' + str(date) + '_file4.txt'\r\n\r\n # month_year_date\r\n file_name2 = path + '\\\\' + 'Aug_2020_' + str(date) + '_file6.txt'\r\n\r\n rows = []\r\n for row in range(100):\r\n string = 'asddfgfhgkhjghkweoriuywoipywbnxvnmznvnmbatr'\r\n rows.append(string)\r\n with open(file_name1, 'w') as f1, open(file_name2, 'w') as f2:\r\n f1.writelines(rows)\r\n f2.writelines(rows)", "def split_excel_files(self):\n for x in self.files:\n if x[-4:] not in [\".xls\", \"xlsx\"]:\n continue\n else:\n files = pd.read_excel(x, sheet_name=None)\n for k, v in files.items():\n #get name with the extension stripped\n name = k.split(\".\")[0]\n out_path = x.split(\".\")[0]\n try:\n os.mkdir(out_path)\n except:\n print(\"directory exists\")\n v.to_csv(f\"{out_path}/{name}.csv\", index=False)\n os.remove(x)\n self.files = [os.path.join(dp, f) for dp, dn, fn in os.walk(self.path) for f in fn]\n self.csv_files = [x for x in self.files if x[-3:] == \"csv\"]", "def county_file_merger(folder_path):\n\n print(\"\\n*******************--- Starting File Merger for .csv files ---*******************\")\n with open(\"result.csv\",\"wb\") as outfile:\n for filename in os.listdir(folder_path):\n with open(filename,\"rb\") as infile:\n for line in infile:\n outfile.write(line)\n infile.close()\n outfile.close()\n print(\"\\nResult saved to -----> result.csv \")\n print(\"\\n*******************--- Finished File Merger for .csv files ---*******************\")", "def wrt_gau_input(self):\n fp = open(\"myfiles.dat\", \"w\") \n nmol = self.model['nmol']\n prefix = self.config['job_prefix']\n incr = self.config['incr']\n for i in xrange(0, nmol, incr):\n self.wrt_gau_input_once(i)\n print >>fp, \"%sx%s.gjf\" % (prefix, i)\n fp.close()\n \n # extra jobs\n fp = open(\"link.sh\", \"w\")\n print >>fp, \"#! /bin/bash\"\n print >>fp, \"# sampling shell input\"\n print >>fp, \"rm linking.gjf\"\n print >>fp, \"myfiles=`more myfiles.dat`\"\n print >>fp, \"\"\"\n for onefile in $myfiles;\n do cat $onefile >> linking.gjf;\n echo -e '\\\\n--Link1--\\\\n' >> linking.gjf;\n done\n \"\"\"\n fp.close()\n return", "def copy_images(proposal, run_number, source_dir, target_dir, tiff_file_path, tiff_file_name):\n\tprint('\\n\\nIn copy_images().\\nproposal: {}\\nrun_number: {}\\n\\n'.format(proposal, run_number, tiff_file_path, tiff_file_name))\n\n\t# Determine proper subdirectories.\n\tsource_dir, ipts_dir, new_subdir = determine_subdirectories(tiff_file_path)\n\n\t# Determine source and target directories.\n\tinitial_image_dir, new_image_dir = determine_source_and_target_directories(source_dir, ipts_dir, target_dir, proposal, new_subdir, run_number)\n\t\t\t\n\t# Identify target files (for use in cataloging). Wait for file count to be stable for at least 60.0 seconds.\n\t# target_files = get_target_files(initial_image_dir, run_number, new_image_dir)\n\ttarget_files = get_target_files_patiently(initial_image_dir, run_number, new_image_dir, wait_period_sec=60.0)\n\n\tprint('\\n\\nIn copy_images().\\ninitial_image_dir: {}\\nnew_image_dir: {}\\n\\n'.format(initial_image_dir, new_image_dir))\n\n\t# Assure target directory exists.\n\tassure_directory_exists(new_image_dir)\n\tcopy_files_batch(initial_image_dir, new_image_dir, run_number)\n\n\t# ---------------------\n\t# Handle raw tpx3 files\n\tinitial_tpx3_dir, new_tpx3_dir = determine_raw_tpx3_directories(target_dir, proposal, new_subdir, run_number)\n\t# Identify target tpx files. Wait for file count to be stable for at least 60.0 seconds.\n\ttarget_files = get_target_files_patiently(initial_tpx3_dir, run_number, new_tpx3_dir, wait_period_sec=60.0, for_main_image_files=False)\n\n\tprint('\\n\\nIn copy_images(); raw tpx3 portion.\\ninitial_tpx3_dir: {}\\nnew_tpx3_dir: {}\\n\\n'.format(initial_tpx3_dir, new_tpx3_dir))\n\n\t# Assure target directory exists.\n\tassure_directory_exists(new_tpx3_dir)\n\tcopy_tpx3_files_batch(initial_tpx3_dir, new_tpx3_dir)\n\n\treturn target_files", "def subsample_imageset(self, source_folder_name, destination_folder_name, sample_step=4):\n photo_list = self.get_photo_list(source_folder_name)\n for i in range(0, len(photo_list), sample_step):\n copyfile(source_folder_name + '/' + photo_list[i], destination_folder_name + '/' + photo_list[i])", "def main_convert():\n\n verbose = True\n\n # Build parser.\n parser = argparse.ArgumentParser()\n\n parser.add_argument('fname_pattern', action='store', help='File name pattern')\n parser.add_argument('-R', '--recursive', action='store_true', default=True,\n help='Search several subdirectories')\n\n # Run parser, extract arguments.\n args = parser.parse_args()\n\n # List of files.\n pattern = os.path.normpath(unicode(args.fname_pattern))\n\n if os.path.isdir(pattern):\n pattern = os.path.join(pattern, '*')\n fname_list = glob.glob(pattern)\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n else:\n fname_list = glob.glob(pattern)\n\n to_be_removed = []\n for f in fname_list:\n if os.path.isdir(f):\n to_be_removed.append(f)\n\n for f in to_be_removed:\n fname_list.remove(f)\n\n # Do the work.\n num_files = len(fname_list)\n for k, f_src in enumerate(fname_list):\n f_src = os.path.abspath(f_src)\n\n b_src, e = os.path.splitext(f_src)\n\n folder = os.path.basename(os.path.dirname(f_src))\n if (e == '.mp3' or e == '.wma' or e == '.wav' or e == '.aiff') and b_src != 'tmp' and folder != '.audio_convert':\n\n if verbose:\n try:\n print('%3d/%d: [%s -> .m4a] %s' % (k, num_files, e, os.path.basename(b_src)))\n except Exception as e:\n val = repr(f_src)\n raise Exception('Problem processing file: %s' % val)\n\n # Temporary working copy.\n path_work = os.path.dirname(f_src)\n f_tmp_src = os.path.join(path_work, 'tmp' + e)\n shutil.copy(f_src, f_tmp_src)\n\n # Transcode file format.\n f_tmp_dst = convert(f_tmp_src, verbose=verbose)\n\n # Finish.\n b_tmp_dst, e_dst = os.path.splitext(f_tmp_dst)\n\n f_dst = b_src + e_dst\n if os.path.isfile(f_dst):\n os.remove(f_dst)\n os.rename(f_tmp_dst, f_dst)\n\n if os.path.isfile(f_tmp_src):\n os.remove(f_tmp_src)\n\n if os.path.isfile(f_dst):\n move_processed_file(f_src)\n\n # Done.", "def FS1Year(inputFolderPath = './Formatted Files Without Missing', outputFolderPath = './Feature Selection'):\n\tfileList = []\n\tfor root, dirs, files in os.walk(inputFolderPath): \n\t for afile in files:\n\t \tfileList.append(afile)\n\n\ttargetList = [2704,2707,2713,2716,2718,808,811,1954]\n\t# targetList = [1994,1997,2003,2006,2008,807,810,1953]\n\tyearList = [(1998,2015),(2005,2015),(2005,2015),(2005,2015),(2005,2015),(1960,2014),(1961,2014),(2002,2012)]\n\n\n\tfor i in range(len(targetList)):\n\t\t# i = 0\n\t\trows = []\n\t\tfor year in range(yearList[i][0],yearList[i][1]+1):\n\t\t\t# print str(year) + '-' + str(targetList[i]) \n\t\t\tregex = re.compile(\"(\"+ str(year) +\").*\")\n\t\t\tfiles = [m.group(0) for l in fileList for m in [regex.search(l)] if m and len(l) == 28]\n\t\t\t# print files\n\t\t\t# call([\"java\",\"-jar\",\"MINE.jar\",\"./New Formatted Files/\"+files[0],str(targetList[i]+1),\"cv=0.5\"])\n\t\t\t\n\n\t\t\t# load the CSV file as a numpy matrix\n\t\t\t# dataset = np.loadtxt('./New Formatted Files/'+files[0], delimiter=\",\", skiprows=1, usecols=tuple(range(1,3240)))\n\t\t\t# dataset = np.genfromtxt('./New Formatted Files/'+files[0], delimiter=\",\", names=True, autostrip=True, max_rows=10, missing_values=np.nan, usecols=tuple(range(1,30)))\n\t\t\twith open(inputFolderPath+'/'+files[0],'rb') as f:\n\t\t\t reader = csv.reader(f)\n\t\t\t header = next(reader)\n\t\t\t num_cols = len(header)\n\t\t\t # print header\n\t\t\t print i\n\t\t\t target_idx = [idx for idx, item in enumerate(header) if item.startswith(str(targetList[i]).zfill(4))]\n\t\t\t if len(target_idx) > 0:\n\t\t\t \ttarget = target_idx[0]-1\n\t\t\t \tprint ('OK',year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t else:\n\t\t\t \tprint (year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t \tbreak\n\t\t\t f.close()\n\t\t\tdataset = np.genfromtxt(inputFolderPath+'/'+files[0], delimiter=\",\", skip_header=1, autostrip=True, missing_values=np.nan, usecols=tuple(range(1,num_cols)))\n\t\t\t# print (dataset.shape)\n\t\t\tX = np.concatenate((dataset[:,0:target],dataset[:,target+1:dataset.shape[1]]),axis=1)\n\t\t\t# X = np.concatenate((dataset[:,0:2],dataset[:,3:dataset.shape[1]),axis=1)\n\t\t\ty = dataset[:,target]\n\t\t\t# print tuple(range(1,3240))\n\t\t\t# print dataset.dtype.names[0]\n\t\t\t# print dataset.dtype.names[-1]\n\t\t\t# print dataset[0]\n\t\t\timp = Imputer(missing_values='NaN', strategy='median', axis=0)\n\t\t\timputedX = imp.fit_transform(X,y)\n\t\t\timputedX = np.array([imputedX[j] for j in range(imputedX.shape[0]) if not np.isnan(y[j])])\n\t\t\tdeleteMissingY = np.array([x1 for x1 in y if not np.isnan(x1)])\n\t\t\t# print dataset[0]\n\t\t\t# print (imputedX.shape, y.shape)\n\t\t\t# print (imputedX.shape, deleteMissingY.shape)\n\t\t\t# print (np.any(np.isnan(imputedX)), np.all(np.isfinite(imputedX)))\n\t\t\t# imputedX_new = SelectKBest(chi2, k=10).fit_transform(imputedX, y)\n\t\t\tk = 30\n\t\t\tselection = SelectKBest(f_regression, k=k)\n\t\t\timputedX_new = selection.fit_transform(imputedX, deleteMissingY)\n\t\t\t# print (len(selection.get_support()), len(header[1:target+1]+header[target+2:]))\n\t\t\tselectedFeatures = [[item, selection.scores_[idx], selection.pvalues_[idx]] for idx, item in enumerate(header[1:target+1]+header[target+2:]) if selection.get_support()[idx]]\n\t\t\tselectedFeatures.sort(key=lambda x: x[1], reverse=True)\n\t\t\t# for sf in selectedFeatures:\n\t\t\t# \tprint sf\n\t\t\t# print selection.scores_\n\t\t\t# print selection.get_support()\n\t\t\t# print (imputedX_new.shape, y.shape)\n\t\t\t# print (imputedX_new.shape, deleteMissingY.shape)\n\t\t\t# print imputedX[0,1994]\n\t\t\t# print dataset['3137_Estimates_and_projections_of_the_total_population_by_sex_age_and_rural__urban_areasSexTotal_10year_age_bands__2534_Geographical_coverage__National_Thousands_Persons__ILO']\n\t\t\t# print dataset\n\t\t\t# separate the data from the target attributes\n\t\t\t# X = np.concatenate((imputedDataset[:,0:7],imputedDataset[:,0:7]),axis=1)\n\t\t\t# y = imputedDataset[:,8]\n\t\t\trows.append([year, 'score', 'p-value'])\n\t\t\trows.extend(selectedFeatures)\n\t\t\trows.append(['', '', ''])\n\t\t\t# print 'Hey'\n\n\t\tfilename = outputFolderPath+'/'+('Indicator%d - k%d - %s.csv' % (targetList[i], k, 'f_regression'))\n\t\twith open(filename,'wb') as w:\n\t\t\ta = csv.writer(w, delimiter = ',')\n\t\t\ta.writerows(rows)\n\t\tw.close()", "def analyze_images_in_folder(self, folder, generate_zmax = False, show_result = True, save_mask = True, save_excel = True):\r\n flat_cell_counted_in_folder = 0 \r\n total_cells_counted_in_folder = 0\r\n \r\n # If need to do zmax projection first\r\n if generate_zmax == True:\r\n ProcessImage.cam_screening_post_processing(folder)\r\n # Here a new folder for maxProjection is generated inside, change the path\r\n folder = os.path.join(folder, 'maxProjection')\r\n \r\n # If background images are taken\r\n if os.path.exists(os.path.join(folder, 'background')):\r\n # If the background image is taken to substract out\r\n background_substraction = True\r\n \r\n # Get all the background files names\r\n background_fileNameList = []\r\n for file in os.listdir(os.path.join(folder, 'background')):\r\n if \"tif\" in file: \r\n background_fileNameList.append(os.path.join(folder, 'background', file))\r\n \r\n background_image = ProcessImage.image_stack_calculation(background_fileNameList, operation = \"mean\")\r\n \r\n # Get a list of file names\r\n fileNameList = []\r\n for file in os.listdir(folder):\r\n if \"tif\" in file and \"LED\" not in file:\r\n fileNameList.append(file)\r\n \r\n print(fileNameList)\r\n \r\n # Analyse each image\r\n for image_file_name in fileNameList:\r\n print(image_file_name)\r\n Rawimage = imread(os.path.join(folder, image_file_name))\r\n\r\n if background_substraction == True:\r\n Rawimage = np.abs(Rawimage - background_image)\r\n \r\n # Analyze each image\r\n # Run the detection on input image.\r\n MLresults = self.DetectionOnImage(Rawimage, axis = None, show_result = show_result)\r\n\r\n if save_mask == True:\r\n \r\n if not os.path.exists(os.path.join(folder, 'ML_masks')):\r\n # If the folder is not there, create the folder\r\n os.mkdir(os.path.join(folder, 'ML_masks')) \r\n \r\n fig, ax = plt.subplots()\r\n # Set class_names = [None,None,None,None] to mute class name display.\r\n visualize.display_instances(Rawimage, MLresults['rois'], MLresults['masks'], MLresults['class_ids'],\r\n class_names = [None,None,None,None], ax=ax,\r\n centre_coors = MLresults['Centre_coor'], Centre_coor_radius = 2, \r\n WhiteSpace = (0, 0))#MLresults['class_ids'],MLresults['scores'], \r\n # ax.imshow(fig)\r\n fig.tight_layout()\r\n # Save the detection Rawimage\r\n fig_name = os.path.join(folder, 'ML_masks', 'ML_mask_{}.png'.format(image_file_name[0:len(image_file_name)-4]))\r\n plt.savefig(fname = fig_name, dpi=200, pad_inches=0.0, bbox_inches='tight')\r\n \r\n if flat_cell_counted_in_folder == 0:\r\n cell_Data, flat_cell_counted_in_folder, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, image_file_name, flat_cell_counted_in_folder)\r\n else: \r\n Cell_Data_new, flat_cell_counted_in_folder, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, image_file_name, flat_cell_counted_in_folder)\r\n if len(Cell_Data_new) > 0:\r\n cell_Data = cell_Data.append(Cell_Data_new)\r\n total_cells_counted_in_folder += total_cells_counted_in_coord\r\n \r\n if save_excel == True:\r\n # Save to excel\r\n cell_Data.to_excel(os.path.join(folder, 'CellsProperties_{}flat_outof_{}cells.xlsx'.format(flat_cell_counted_in_folder, total_cells_counted_in_folder)))\r\n \r\n return cell_Data", "def split_data_into_exchanges(source_path, destination_path):\n for subdir, dirs, files in os.walk(source_path):\n for file in files:\n source_full_file = os.path.join(subdir, file)\n print(source_full_file)\n df = pd.read_csv(source_full_file)\n for group_name, df in df.groupby(['Ticker', 'Exchange']):\n file_name = destination_path / str(df['Date'].iloc[0]) / convertTuple(group_name)\n utils.make_dir(file_name)\n with open(file_name, \"w+\") as f:\n df.to_csv(f, index=False)", "def main(output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'\n files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']\n for filename in files:\n r = requests.get(baseurl+filename, stream=True)\n if r.status == 200:\n with open(output_filepath+\"/\"+filename, \"wb\") as f:\n f.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)", "def main(directory, csv_file, task_name):\n csv_data = pd.read_csv(csv_file)\n colnames = csv_data.columns.tolist()\n\n edat_files = glob.glob(directory + \"*.edat*\")\n text_files = glob.glob(directory + \"*-*.txt\")\n all_files = edat_files + text_files\n pairs = []\n paired_texts = []\n\n for text_file in text_files:\n [text_fname, _] = os.path.splitext(text_file)\n for edat_file in edat_files:\n [edat_fname, _] = os.path.splitext(edat_file)\n if text_fname == edat_fname:\n pairs.append([text_file, edat_file])\n\n for pair in pairs:\n paired_texts.append(pair[0])\n\n unpaired_texts = list(set(text_files) - set(paired_texts))\n three_files = []\n pop_idx = []\n\n # List of lists\n for i_file in range(len(unpaired_texts)):\n for j_pair in range(len(paired_texts)):\n if (unpaired_texts[i_file][:len(unpaired_texts[i_file])-6] in paired_texts[j_pair]):\n three_files.append([paired_texts[j_pair], pairs[j_pair][1],\n unpaired_texts[i_file]])\n pop_idx.append(i_file)\n\n for rm in reversed(pop_idx):\n unpaired_texts.pop(rm)\n\n # three_files is the text files and edats that form a triad (one edat, two\n # similarly named text files).\n for triad in three_files:\n for i_pair in reversed(range(len(pairs))):\n if triad[0:2] == pairs[i_pair]:\n pairs.pop(i_pair)\n\n two_texts = []\n all_two_texts = []\n two_text_pairs = []\n\n for i_file in range(len(unpaired_texts)):\n for j_file in range(i_file + 1, len(unpaired_texts)):\n if (unpaired_texts[i_file][:len(unpaired_texts[i_file])-6] in unpaired_texts[j_file]):\n all_two_texts.append(i_file)\n all_two_texts.append(j_file)\n two_text_pairs.append([i_file, j_file])\n\n all_two_texts = sorted(all_two_texts, reverse=True)\n\n # two_texts is the text files that pair with other text files.\n for i_pair in range(len(two_text_pairs)):\n two_texts.append([unpaired_texts[two_text_pairs[i_pair][0]],\n unpaired_texts[two_text_pairs[i_pair][1]]])\n\n for i_file in all_two_texts:\n unpaired_texts.pop(i_file)\n\n # one_text is the remaining un-paired text files.\n one_text = [[unpaired_texts[i_file]] for i_file in range(len(unpaired_texts))]\n\n # Determine subject IDs and timepoints for all files.\n # Assumes that files will be named according to convention\n # blahblahblah_[subj]-[tp].txt or blahblahblah-[subj]-[tp].txt.\n one_text_subjects = [get_subject(file_[0]) for file_ in one_text]\n one_text_timepoints = [get_timepoint(file_[0]) for file_ in one_text]\n two_text_subjects = [get_subject(pair[0]) for pair in two_texts]\n two_text_timepoints = [get_timepoint(pair[0]) for pair in two_texts]\n three_file_subjects = [get_subject(triad[0]) for triad in three_files]\n three_file_timepoints = [get_timepoint(triad[0]) for triad in three_files]\n pair_subjects = [get_subject(pair[0]) for pair in pairs]\n pair_timepoints = [get_timepoint(pair[0]) for pair in pairs]\n\n af_files = ([item for sublist in pairs for item in sublist] +\n [item for sublist in two_texts for item in sublist] +\n [item for sublist in three_files for item in sublist] +\n [item for sublist in one_text for item in sublist])\n\n one_edat = list(set(all_files) - set(af_files))\n one_edat = [[edat] for edat in one_edat]\n one_edat_subjects = [get_subject(file_[0]) for file_ in one_edat]\n one_edat_timepoints = [get_timepoint(file_[0]) for file_ in one_edat]\n\n all_subjects = (one_text_subjects + two_text_subjects + three_file_subjects +\n pair_subjects + one_edat_subjects)\n all_notetype = (([\"one_text\"] * len(one_text_subjects)) +\n ([\"two_texts\"] * len(two_text_subjects)) +\n ([\"three_files\"] * len(three_file_subjects)) +\n ([\"pair\"] * len(pair_subjects)) +\n ([\"one_edat\"] * len(one_edat_subjects)))\n all_timepoints = (one_text_timepoints + two_text_timepoints +\n three_file_timepoints + pair_timepoints +\n one_edat_timepoints)\n all_file_sets = one_text + two_texts + three_files + pairs + one_edat\n\n organized_dir = org_dir_dict.get(task_name)\n\n for i_subj in range(len(all_subjects)):\n month = timepoint_dict.get(task_name).get(all_timepoints[i_subj])\n files_note = note_dict.get(all_notetype[i_subj])\n if len(all_subjects) > 4:\n try:\n print(\"Successfully organized %s-%s\" % (all_subjects[i_subj], month))\n print(\"Moved:\")\n subject_id = all_subjects[i_subj]\n files = all_file_sets[i_subj]\n note = organize_files(subject_id, month, files, organized_dir)\n note.append(files_note)\n orged = 1\n orgedwhen = time.strftime(\"%Y/%m/%d\")\n orgedby = \"PY\"\n except IOError:\n print(\"%s-%s couldn't be organized.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n note = files_note\n orged = 0\n orgedwhen = \"\"\n orgedby = \"\"\n\n try:\n if all_notetype[i_subj] == \"pair\":\n print(\"Successfully converted %s-%s\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 1\n convedwhen = time.strftime(\"%Y/%m/%d\")\n convedby = \"PY\"\n else:\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n except IOError:\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n else:\n print(\"%s-%s couldn't be organized.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n note = files_note\n orged = 0\n orgedwhen = \"\"\n orgedby = \"\"\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n\n csv_data = add_subject(csv_data, all_subjects[i_subj],\n all_timepoints[i_subj], orged, orgedwhen, orgedby,\n conved, convedwhen, convedby, note)\n\n csv_data = csv_data[colnames]\n csv_data.to_csv(csv_file, index=False)", "def features_from_folder(label_folder, audio_folder, output_folder):\n print('Listing label files from folder.')\n #scan labels folder\n labels_list = os.listdir(label_folder)\n label_files = []\n for filename in labels_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'txt':\n continue\n #save to without its extension\n label_files.append(filename[:-4])\n\n print('Listing audio files from folder.')\n #scan audio folder\n audios_list = os.listdir(audio_folder)\n audio_files = []\n for filename in audios_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'wav':\n continue\n #save to without its extension\n audio_files.append(filename[:-4])\n\n print('Removing files without matches')\n #use only the files with matching audio/label\n files_to_process = []\n for label_file in label_files:\n if label_file in audio_files:\n files_to_process.append(label_file)\n\n print('Processing each file...')\n i = 1\n class_count = {}\n total_f = len(files_to_process)\n #for each file\n for processing in files_to_process:\n print('File', str(i) + '/' + str(total_f))\n i += 1\n\n #\n label_file = os.path.join(label_folder, processing + \".txt\")\n audio_file = os.path.join(audio_folder, processing + \".wav\")\n\n #get the segments from the corresponding label file\n segments = get_segments(label_file)\n\n #\n total_s = len(segments)\n j = 1\n #for each segment\n for segment in segments:\n print('\\tSegment', str(j) + '/' + str(total_s), segment['class'])\n j += 1\n\n if class_count.get(segment['class']) is None:\n class_count[segment['class']] = 1\n else:\n class_count[segment['class']] += 1\n output_filename = segment['class']\n output_filename += '-' + format(class_count[segment['class']], '04d')\n output_filename = os.path.join(output_folder, output_filename)\n\n #get its features\n segment_features = features_from_label(audio_file, segment)\n\n #save it to a file\n fe.write_as_bin(output_filename, segment_features)", "def convert(self,inputDir, outputDir):\n print \"mp_cellomics2tiff:\",\"INPUT:\", inputDir\n print \"mp_cellomics2tiff:\",\"OUTPUT:\", outputDir\n\n # input image files\n c01s = glob.glob(inputDir + \"/*.C01\")\n\n if os.path.isdir(outputDir):\n # check if entire dataset is already converted\n if cutils.isDatasetConverted(inputDir,outputDir):\n logfile = open(os.path.join(outputDir,'cellomics2tiff_error.log'),'w')\n msg = \"Seems that data was converted already, stopping.\"\n print >> logfile, msg\n print \"mp_cellomics2tiff:\",msg\n logfile.close()\n return\n else:\n os.makedirs(outputDir)\n\n metadataDir = os.path.join(outputDir,\"metadata\")\n if not os.path.isdir(metadataDir):\n os.makedirs(metadataDir)\n \n logging.basicConfig(filename=outputDir+'/cellomics2tiff.log', format='%(levelname)s:%(message)s', level=logging.DEBUG)\n logging.basicConfig(level=logging.DEBUG)\n\n # convert the metadata in MS Access files to CSV \n msg = \"Converting metadata to \", metadataDir\n print \"mp_cellomics2tiff:\",msg \n mdbs = glob.glob(inputDir + \"/*.MDB\")\n mdbs.extend(glob.glob(inputDir + \"/*.mdb\"))\n for mdb in mdbs:\n print \"MDB:\",mdb\n mdb_export(mdb, metadataDir)\n\n # Convert the data\n start_time_convert = time.time()\n msg = \"Converting...\"\n print \"mp_cellomics2tiff:\",msg \n logging.info(msg)\n pool = multiprocessing.Pool(None)\n files = glob.glob(inputDir + \"/*.C01\")\n\n # http://stackoverflow.com/questions/8521883/multiprocessing-pool-map-and-function-with-two-arguments\n r = pool.map(cellomics2tiff, zip(files,repeat(outputDir)))\n msg = \"Time elapsed: \" + str(time.time() - start_time_convert) + \"s\"\n print \"mp_cellomics2tiff:\",msg\n logging.info(msg)", "def get_files_to_copy(initial_image_dir, run_number):\n\tfiles_to_copy_ini = os.listdir(initial_image_dir)\n\t# print('\\n\\nfiles_to_copy_ini:\\n{}\\n\\n'.format('\\n'.join(str(f) for f in files_to_copy_ini)))\n\tfiles_to_copy = []\n\tfor file_in_dir in files_to_copy_ini:\n\t\tif re.search('Run_{}'.format(run_number), file_in_dir):\n\t\t\tfiles_to_copy.append(os.path.join(initial_image_dir, file_in_dir))\n\n\t# print('\\n\\nNumber of files to copy:\\n{}\\n\\n'.format(len(files_to_copy)))\n\t# Temporarily only print last 15 characters of file name to reduce log file load.\t\n\t# print('\\n\\nfiles_to_copy (last 15 chars):\\n{}\\n\\n'.format('\\n'.join(str(f[-15:]) for f in files_to_copy)))\n\t# print('\\n\\nfiles_to_copy:\\n{}\\n\\n'.format('\\n'.join(str(f) for f in files_to_copy)))\n\t# print('\\n\\nfiles_to_copy:\\n{}\\n\\n'.format(files_to_copy))\n\treturn files_to_copy", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def main():\n for root, dirs, list in os.walk(\"/home/congcongchen/Desktop/grader/hw4/2014-10-12-123254\"): \n for i in list: \n dir = os.path.join(root, i) \n if i.endswith(\".zip\"):#unzip the file\n print dir\n\ttry:\n \tunzip(root,dir)\n\texcept:#catch all exception\n\t\tprint \"Error\"+dir \n\n\n for root, dirs, list in os.walk(\"/home/congcongchen/Desktop/grader/hw4/2014-10-12-123254\"): \n for i in list: \n dir = os.path.join(root, i) \n if i.endswith(\".cpp\"):#change the name of the file\n remove(root,i)", "def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))" ]
[ "0.6164347", "0.5919336", "0.5901154", "0.58560616", "0.57583755", "0.5725686", "0.571907", "0.56493175", "0.55794436", "0.5573059", "0.55641127", "0.55598956", "0.5552147", "0.55351996", "0.5534053", "0.5514603", "0.55066884", "0.5490536", "0.54603547", "0.5438883", "0.5437651", "0.5428646", "0.5425792", "0.54197073", "0.5378494", "0.53693575", "0.53616387", "0.5328064", "0.53086877", "0.5304949", "0.5287287", "0.5282106", "0.5281432", "0.52472365", "0.5215628", "0.5207029", "0.51957947", "0.51929647", "0.51801306", "0.5178844", "0.51747894", "0.51742744", "0.51742405", "0.51570934", "0.51540333", "0.51355267", "0.5127797", "0.51037467", "0.5074478", "0.50710624", "0.5068041", "0.5067882", "0.50644845", "0.5059636", "0.50503", "0.50480026", "0.5041673", "0.5039858", "0.5035513", "0.5031997", "0.50316805", "0.50173765", "0.50133914", "0.5002454", "0.49995416", "0.49986303", "0.49952707", "0.49911395", "0.49821907", "0.49811807", "0.49787346", "0.49733922", "0.49692357", "0.49662954", "0.49660122", "0.49656776", "0.4957635", "0.49532416", "0.4943107", "0.49386793", "0.4929206", "0.49281463", "0.49262604", "0.49242347", "0.49241343", "0.4922926", "0.49209043", "0.49174887", "0.49162346", "0.4911522", "0.4900091", "0.48990664", "0.4897706", "0.48956963", "0.489557", "0.48893222", "0.48885965", "0.488735", "0.48807058", "0.4869276" ]
0.76440537
0
Display info about pet.
def describe_pet(pet_name,animal_type = 'dog'): print("I have a " + animal_type + ".") print("My " + animal_type + "'s name is " + pet_name.title() + ".\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_pet(self):\n pet = self.pet_factory.get_pet()\n print \"We have a lovely {}\".format(pet)\n print \"It says {}\".format(pet.speak())\n print \"We also have {}\".format(self.pet_factory.get_food())", "def show_pet(self):\n pet = self.pet_factory.get_pet()\n\n print(\"this is a lovely \", pet)\n print(\"It says \", pet.speak())\n print(\"It eats \", self.pet_factory.get_food())", "def describe_pet(animal_type, pet_name):\n print(\"\\nI have a \" + animal_type + \".\")\n print(\"My \" + animal_type + \"'s name is \" + pet_name.title() + \".\")", "def describe_pet(animal_type, pet_name):\n print(\"\\nI have a \" + animal_type + \".\")\n print(\"My \" + animal_type + \"'s name is \" + pet_name.title() + \".\")", "def describe_pet(animal_type, pet_name):\n print(\"\\nI have a \" + animal_type + \".\")\n print(\"My \" + animal_type + \"'s name is \" + pet_name.title() + \".\")", "def describe_pet(animal_type, pet_name):\n print(\"\\nI have a \" + animal_type + \".\")\n print(\"My \" + animal_type + \"'s name is \" + pet_name.title() + \".\")", "def describe_pet(animal_type, pet_name):\r\n print(f\"\\nI have a {animal_type}.\")\r\n print(f\"My {animal_type}'s name is {pet_name.title()}.\")", "def describe_pet(animal_type, pet_name):\n print(f\"\\nI have a {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}.\")", "def describe_pet(animal_type, pet_name):\n print(f\"\\nI have a {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}.\")", "def describe_pets(animal_type, pet_name):\n print(f\"\\nI have a {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}\")", "def describe_pets(animal_type, pet_name):\n print(f\"\\nI have a {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}\")", "def describe_pet(pet_name, animal_type='dog'):\n print(\"\\nI have a \" + animal_type + \".\")\n print(\"My \" + animal_type + \"'s name is \" + pet_name.title() + \".\")", "def describe_pet(animal_type='dog',pet_name):\n\tprint(\"\\nI have a \" + animal_type + \".\")\n\tprint(\"My \" + animal_type + \"'s name is \" + pet_name.title() + \".\")", "def describe_pet(pet_name, animal_type='tiger'):\n\tprint(f\"\\nI have a {animal_type}.\")\n\tprint(f\"My {animal_type}'s name is {pet_name}.\")", "def describe_pet(pet_name, animal_type='dog'):\n print(\"\\nI have a \" + animal_type + \".\")\n print(\"My \" + animal_type + \"'s name is \" + pet_name.title() + \".\")\n \n # A dog named Willie.", "def describe_pet(animal, name):\r\n print(\"\\nI have a \" + animal + \".\")\r\n print(\"Its name is \" + name + \".\")", "def describe_pet(name, animal='dog'):\r\n print(\"\\nI have a \" + animal + \".\")\r\n print(\"Its name is \" + name + \".\")", "def describe_pet(animal_type,pet_name):\n print(\"\\nI have a \"+animal_type+\".\")\n print(\"\\tMy \"+animal_type+\"'s name is \"+pet_name+\".\")", "def describe_pet(pet_name, animal_type='dog'):\n print(f\"\\nI have a(n) {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}.\")", "def describe_pet(pet_name, animal_type = 'dog'):\r\n print(f\"\\nI have a {animal_type}.\")\r\n print(f\"My {animal_type}'s name is {pet_name.title()}.\")", "def describe_pets(animal_type,name):\n print(\"I have a \" + animal_type + \".\")\n print(\"My \" + animal_type + \"'s name is \" + name.title())", "def pet_display(pet_id):\n pet = Pets.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n db.session.commit()\n flash(\"Succesfully updated\")\n return redirect(f'/{pet_id}')\n else:\n return render_template('display.html', pet=pet, form=form)", "def show_pet(pet_id):\n \n pet = Pet.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n notes = form.notes.data\n photo_url = form.photo_url.data\n available = form.available.data\n\n pet.notes = notes\n pet.photo_url = photo_url\n pet.available = available\n\n db.session.commit()\n flash(f\"notes: {notes}, photo_url={photo_url}, available={pet.available}\")\n \n return redirect(f'/{pet_id}')\n\n p = dict(name=pet.name, species=pet.species, photo_url=pet.photo_url, age=pet.age, notes=pet.notes, available=pet.available)\n\n\n return render_template('pet_edit_form.html', pet=p, form=form)", "def show_pet_details(id):\n \n pet = Pet.query.get_or_404(id)\n form = EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n pet.notes = form.notes.data\n pet.available = form.available.data\n pet.photo_url = form.photo_url.data\n db.session.commit()\n return redirect('/')\n\n else:\n return render_template('/pet_edit.html', form = form, pet = pet)", "def describe_pet2(pet_name, animal_type='dog'):\n print(f\"\\nI have a {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}.\")", "def show_add_pet():\n\n form = PetForm()\n\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n available = form.available.data\n\n pet = Pet(name=name, species=species, photo_url=photo_url, age=age, notes=notes, available=available)\n\n db.session.add(pet)\n db.session.commit()\n\n return redirect(\"/\")\n\n else:\n return render_template('add_pet.html', form=form)", "def show_and_edit_pet_page(pet_id):\n \n pet = Pet.query.get(pet_id)\n\n form = EditPetPage(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n\n return redirect('/')\n\n else:\n return render_template('display_pet.html', pet=pet, form=form)", "def show_edit_pet(pet_id):\n pet = Pet.query.get(pet_id)\n form = EditPet(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.add(pet)\n db.session.commit()\n\n return redirect(\"/\")\n\n else:\n return render_template('edit_pet.html', form=form, pet=pet)", "def show_edit_pet(id):\r\n pet = Pet.query.get_or_404(id)\r\n form = EditPetForm(obj=pet)\r\n\r\n if form.validate_on_submit():\r\n pet.photo_url = form.photo_url.data\r\n pet.notes = form.notes.data\r\n pet.available = form.available.data\r\n db.session.commit()\r\n\r\n return redirect('/')\r\n\r\n else:\r\n return render_template(\"pet_profile.html\", form=form, pet=pet)", "def parterre_info(id):\n parterre = get_parterre(id)\n return render_template(\n \"parterre-info.html\",\n parterre = parterre,\n title = parterre.get_name(),\n capteurs = get_capteurs_parterre(id))", "def pets():\n \n pets_owned = db.execute(\"SELECT pets.id, pet_types.imgsrc, pet_types.pet_type, pets.created, pets.exp, pets.name, users.active_pet_id FROM owners JOIN pets ON pets.id = owners.pet_id JOIN pet_types ON pets.type = pet_types.id JOIN users ON users.id = owners.owner_id WHERE owner_id = ?\", (session_get_int(\"user_id\"), )).fetchall()\n return render_template(\"list.html\", pets_owned=pets_owned)", "def get(self, pet_id):\n app.logger.info(\"Request to Retrieve a pet with id [%s]\", pet_id)\n pet = Pet.find(pet_id)\n if not pet:\n abort(status.HTTP_404_NOT_FOUND, \"Pet with id '{}' was not found.\".format(pet_id))\n return pet.serialize(), status.HTTP_200_OK", "def show_homepage():\n\n pets = Pet.query.all()\n rando_pet = get_info_random_pet()\n\n name = rando_pet['petfinder']['pet']['name']['$t']\n age = rando_pet['petfinder']['pet']['age']['$t']\n image = rando_pet['petfinder']['pet']['media']['photos']['photo'][0]['$t']\n\n return render_template('homepage.html', pets=pets,\n name=name, age=age, image=image)", "def view_pokemon(self, team_name: str, team_choice: str) -> None:\n\n print(f\"\\n\\u001b[1m\\u001b[4mTeam\\u001b[0m: \\u001b[7m {team_name} \\u001b[0m\")\n print(f\"\\n\\u001b[4mPokémon Slot #{int(team_choice)}\\u001b[0m\\n\\n\")\n print(f\"\\u001b[1mName\\u001b[0m: {self.name}\")\n print(f\"\\u001b[1mPokédex ID:\\u001b[0m {self.id}\\n\")\n print(f\"\\u001b[1mHeight\\u001b[0m: {self.height} decimetres\")\n print(f\"\\u001b[1mWeight\\u001b[0m: {self.weight} hectograms\\n\")\n\n if len(self.types) == 2:\n print(f\"\\u001b[1mTypes\\u001b[0m: {self.types[0]}\")\n print(f\" {self.types[1]}\")\n else:\n print(f\"\\u001b[1mType\\u001b[0m: {self.types[0]}\")\n\n print(\"\")\n print(\"\\u001b[1mAbilities\\u001b[0m:\")\n if len(self.abilities) > 0:\n for ability in self.abilities:\n print(f\" - \\u001b[4m{ability}\\u001b[0m:\")\n print(f\" {self.abilities[ability]}\")\n else:\n print(\" This Pokémon has no abilities.\")\n\n print(\"\")\n print(\"\\u001b[1mCurrent Move Set\\u001b[0m:\")\n if len(self.move_set) > 0:\n for move in self.move_set:\n print(f\" - {move.name}\")\n else:\n print(\" This Pokémon cannot learn any moves.\")\n\n print(\"\\n\")", "async def info(self, ctx: \"IceTeaContext\", *, otag: TagConverter):\n tag: models.Tag = otag\n if not tag.alias:\n embed = discord.Embed(description=f\"{ctx.message.guild.name} ``{tag.title}`` tag information\")\n user = ctx.guild.get_member(tag.author)\n embed.set_author(name=user.display_name, icon_url=user.avatar_url)\n embed.add_field(name=\"Tag name\", value=tag.title)\n embed.add_field(name=\"Amount used\", value=str(tag.count))\n embed.timestamp = tag.created\n await ctx.send(embed=embed)\n else:\n embed = discord.Embed(description=f\"{ctx.message.guild.name} ``{tag.title}`` alias information\")\n user = ctx.guild.get_member(tag.author)\n embed.add_field(name=\"Author\", value=user or \"Unknown\")\n embed.add_field(name=\"Amount used\", value=str(tag.count))\n embed.timestamp = tag.created\n await ctx.send(embed=embed)", "def _view_animal(self):\n print(repr(self.animals[self.park_location]))", "def pokemonInfo():\n pokemonId = request.args.get('id')\n user_email = request.args.get('user_email')\n if pokemonId is None:\n return jsonify(message=\"No pokemon ID specified\"), 404\n else:\n pokemon = session.query(Pokemon).filter_by(id=pokemonId).first()\n if pokemon is None:\n return jsonify(message=\"Nothing found\"), 404\n else:\n # Verify if the user is authorized to modify or delete this entry\n if pokemon.user is not None and pokemon.user.email == user_email:\n return jsonify(pokemon=pokemon.getJSON(), authorized=True)\n else:\n return jsonify(pokemon=pokemon.getJSON())", "def add_information_about_new_pet(self, auth_key: json, name: str, animal_type: str, age: int, pet_photo: str) -> json:\n\n data = MultipartEncoder(\n fields={\n 'name': name,\n 'animal_type': animal_type,\n 'age': age,\n 'pet_photo': (pet_photo, open(pet_photo, 'rb'), 'image/jpeg')\n })\n headers = {'auth_key': auth_key['key'], 'Content-Type': data.content_type}\n\n res = requests.post(self.base_url + 'api/pets', headers=headers, data=data)\n status = res.status_code\n\n result = \"\"\n try:\n result = res.json()\n except:\n result = res.text\n\n return status, result", "def info():\n print(\"Made using the OOP RPG game creator (c) Claire.\\n\")", "def update_information_about_pet(self, auth_key: json, pet_id: str, name: str, animal_type: str, age: int) -> json:\n\n data = {\n 'name': name,\n 'animal_type': animal_type,\n 'age': age\n }\n headers = {'auth_key': auth_key['key']}\n\n res = requests.put(self.base_url + f'api/pets/{pet_id}', headers=headers, data=data)\n status = res.status_code\n\n result = \"\"\n try:\n result = res.json()\n except:\n result = res.text\n\n return status, result", "def plante_info(id):\n plante = get_plante(id)\n return render_template(\n \"plante-info.html\",\n plante = plante,\n title = plante.get_name(),\n parterre = get_parterre(plante.get_parterre()))", "def show_pet_with_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = PetFormEdit(obj=pet)\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n \n db.session.commit()\n return redirect('/')\n else:\n return render_template('pet.html', pet=pet, form=form)", "def get_pet_user_info():\n\n return db.session.query(User.email, Pet.pet_name, Pet.pet_type, Pet.pet_breed, \n Pet.pet_color, Pet.pet.status, Pet.last_address).filter(User.user_id == Pet.user_id).all()", "def view_edit_pet(id):\n pet = Pet.query.get_or_404(id)\n form = PetEditForm(obj=pet)\n if form.validate_on_submit():\n form.populate_obj(pet)\n db.session.commit()\n\n flash(f\"Updated {pet.species} named {pet.name}\")\n return redirect(f'/{id}')\n else:\n return render_template(\"pet_edit_form.html\", form=form, pet=pet)", "def info():\n print 'Loading info page'\n\n team_list = datastore.get_all_teams(engine)\n\n return render_template('info.html', rows=team_list)", "def load_animal_info(rescue_id, animal_id):\n\n animal_info = c.get_animal(animal_id)\n title = animal_info.name\n\n return render_template('animal_info.html', animal_info=animal_info,\n title=title)", "def pet_detail_edit(pet_id):\n\n pet = Pet.query.get_or_404(pet_id)\n form = PetEditForm(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n flash(f\"Pet{pet_id} updated!\")\n return redirect(f\"/{pet_id}\")\n\n else:\n return render_template(\"pet_detail.html\", form=form, pet=pet)", "def test_get_pets(self):\n response = self.client.open(\n '/pet',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def view_animal(self):\n self._view_animal()", "def test_get_pet_by_id(self):\n response = self.client.open(\n '/pet/{petId}'.format(pet_id=789),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def add_information_about_new_pet_without_photo(self, auth_key: json, name: str, animal_type: str, age: int) -> json:\n\n data = {\n 'name': name,\n 'animal_type': animal_type,\n 'age': age\n }\n headers = {'auth_key': auth_key['key']}\n\n res = requests.post(self.base_url + f'api/create_pet_simple', headers=headers, data=data)\n status = res.status_code\n\n result = \"\"\n try:\n result = res.json()\n except:\n result = res.text\n\n return status, result", "def add_pet(name, age, animal='dog'):\n print('You have a %d year old %s named %s' % (age, animal, name))", "def capteur_info(id):\n capteur = get_capteur(id)\n return render_template(\n \"capteur-info.html\",\n capteur = capteur,\n title = capteur.get_name(),\n parterre = get_parterre(capteur.get_parterre()),\n mesure = get_typeMesure(capteur.get_typeMesure()))", "def add_pet():\r\n form = AddPetForm()\r\n if form.validate_on_submit():\r\n name = form.name.data\r\n species = form.species.data\r\n photo_url = form.photo_url.data\r\n age = form.age.data\r\n notes = form.notes.data\r\n\r\n new_pet = Pet(name=name, species=species, photo_url=photo_url, age=age, notes=notes)\r\n db.session.add(new_pet)\r\n db.session.commit()\r\n return redirect('/')\r\n\r\n else:\r\n return render_template(\"add_pet.html\", form=form)", "def petedit():\n if request.method == \"POST\":\n pet_id = request.form.get(\"pet_id\")\n rename = request.form.get(\"rename\")\n rows = db.execute(\"SELECT count(*) as count FROM owners WHERE owner_id = ? AND pet_id = ?\",\n (session_get_int(\"user_id\"), pet_id, )).fetchall()\n # Confirmed user owns this pet\n if rows[0]['count'] == 1:\n exp = db.execute(\"SELECT exp FROM pets WHERE id = ?\", (pet_id, )).fetchall()\n if (int(exp[0]['exp']) >= 100):\n db.execute(\"UPDATE pets SET name = ? WHERE id = ?\", (rename, pet_id, ))\n con.commit()\n\n # Put updated info into session for pet\n set_active_pet_in_session(session_get_int(\"user_id\"))\n flash(\"Pet renamed to \" + rename)\n else:\n return apology(\"Your pet needs at least 100 experience to be renamed!\", 403)\n\n return redirect('/pets/edit/?id='+ str(pet_id))\n else:\n pet_id = int(request.args.get('id'))\n\n # This ensures the current user owns the pet being renamed\n pet_info = db.execute(\"SELECT pets.id, pet_types.imgsrc, pet_types.pet_type, pets.created, pets.exp, pets.name, users.active_pet_id FROM owners JOIN pets ON pets.id = owners.pet_id JOIN pet_types ON pets.type = pet_types.id JOIN users ON users.id = owners.owner_id WHERE owner_id = ? AND pet_id = ?\", \n (session_get_int(\"user_id\"), pet_id, )).fetchall()\n if len(pet_info) == 1:\n return render_template(\"petedit.html\", pet_info=pet_info)\n else:\n return apology(\"Error getting pet info\", 403)", "def add_pet():\n form = AddPet()\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n\n new_pet = Pet(name=name,\n species=species,\n photo_url=photo_url,\n age=age,\n notes=notes)\n\n db.session.add(new_pet)\n db.session.commit()\n return redirect('/')\n else:\n return render_template('/add_form.html', form=form)", "def show_myhero(self):\n description = (self.name + ' Level is: ' + str(self.level) + ' Age is: ' + str(\n self.age) + ' Rank is: ' + self.rank + ' health is: ' + str(self.health)).title()\n print(description)", "def get_random_pet():\n resp = HTTP_request.get(' https://api.petfinder.com/v2/animals',\n params={\n \"limit\": 100,\n },\n headers={\"Authorization\": f\"Bearer {pet_finder_token}\"})\n\n pets = resp.json()[\"animals\"]\n\n random_pet = random.choice(pets)\n\n return {\"name\": random_pet[\"name\"], \"age\": random_pet[\"age\"], \"photo_url\": random_pet[\"photos\"][0][\"medium\"]}", "def print_player_info(self):\n\t\tclear_screen()\n\n\t\tprint(\"# PLAYER INFO #\\n\")\n\t\tprint(\"Name{:.>17} \".format(self.info['Name']))\n\t\tprint(\"Race{:.>17} \".format(self.info['Race']))\n\t\tprint(\"Level{:.>16} \".format(self.stats['Level']))\n\t\tprint(\"Hit Points{:.>11} \".format(self.stats['HP']))\n\t\tprint(\"Gold Pieces{:.>10} \".format(self.stats['GOLD']))\n\t\n\t\tpress_enter()", "def show_info(self):\n txt = \"Brand: %s\\nModel: %s\\nHostname: %s\\n\"%(self.brand, self.model, self.hostname)\n return txt", "def show_home_page():\n\n pet_list = Pet.query.all()\n return render_template('index.html', pet_list=pet_list)", "async def info(self, ctx, *, tag):\n try:\n self.fetch_tag(ctx, tag)\n except Exception as error:\n return await ctx.send(error)\n data = self._tag_dict[ctx.guild.id][tag]\n author = self.bot.get_user(data['author']) or await self.bot.fetch_user(data['author'])\n embed = discord.Embed(colour=self.bot.colour)\n embed.title = tag\n embed.description = f\"<:author:734991429843157042> **{author}**\\n\"\n embed.description += f\"Uses: **{data['uses']}**\\n\"\n embed.description += f\"ID: **{data['id']}**\"\n embed.set_author(name=str(author), icon_url=author.avatar_url)\n await ctx.send(embed=embed)", "def display_profile(self):\n print(f\"Id: {self._id}\")\n print(f\"username: {self.username}\")\n print(f\"name: {self.name}\")\n print(f\"contact: {self.contact}\")\n print(f\"address: {self.address}\")", "def print_player_info(player):\n print(\"\\nPLAYER INFO\")\n player.print_energy()\n print(\"Current location: {}\".format(player.location.name))\n print(\"Carrying: {0}/{1}\".format(player.get_items_total_weight(), player.get_capacity()))", "def show_myhero(self):\n description = (self.name + ' Level is: ' + str(self.level) + ' Age is: ' + str(\n self.age) + ' Rank is: ' + self.rank + ' health is: ' + str(self.health) + ' magic is: ' + str(self.__magic)).title()\n print(description)", "def show_info(self): \n color= Fore.WHITE\n print(f\"\"\" {color} \nNombre: {self.name} \nRuta: {self.route }\nFecha de salida: {self.departure_date}\"\"\")\n print(\"<\"*8, \">\"*8)\n print(\"El precio por habitacion es:\")\n for key, value in self.prize.items():\n color_value= (Fore.GREEN + str(value))\n color_key= Fore.WHITE + \"Habitacion\" + \" \" + key\n print(f\"\"\" {color_key} : {color_value}$ \"\"\")\n \n print(Fore.WHITE + \"<\"*8, \">\"*8)\n for floor, info in self.floors_info.items():\n piso=(Fore.WHITE + floor)\n print(f\" {piso}:{info} \")\n \n \n print(\"<\"*8, \">\"*8)\n print(\"Capacidad por tipo de habitacion: \")\n for key, value in self.room_capacity.items():\n print(f\"Habitacion {key}: {value} personas \",\"\\t\")\n return \"\"", "def cat_details(cat_id, shelter_id):\n\n shelter = petfinder.shelter_data_map(shelter_id)\n shelter = list(shelter.values())\n cat = petfinder.cat_data_map(cat_id)\n cat = list(cat.values())\n\n return render_template('more_details.html',\n shelter=shelter,\n cat=cat)\n\n #if user selects <3 to favorite a cat then redirct to the login page", "def do_info(self, args):\n if self.exploit is None:\n eprint(colorize('No exploit set; nothing to describe. Select an exploit with the \\'use\\' command',\n 'cyan'))\n else:\n eprint(colorize('\\n ' + self.exploit.DESCRIPTION + '\\n', 'green'))", "async def info(self, ctx):\n\t\tembed = discord.Embed(\n\t\t\tdescription=\"Created By Seperoph#1399 and AkaBaka#4654\",\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tembed.set_author(\n\t\t\tname=\"Bot Information\"\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Head Programmers:\",\n\t\t\tvalue=\"Seperoph#1399 and AkaBaka#4654\",\n\t\t\tinline=True\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Python Version:\",\n\t\t\tvalue=f\"{platform.python_version()}\",\n\t\t\tinline=True\n\t\t)\n\t\tawait ctx.respond(embed=embed)", "def show_user_info(self):\n name = self.get_user_name()\n print(f'Name: {name.title()}')\n print(f'Age: {self.age}')\n print(f'Gender: {self.gender.title()}')\n print(f'Mobile: {self.m_number}')", "def add_pet():\n\n form = AddPetForm()\n\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n\n pet = Pet(name=name, \n species=species, \n photo_url=photo_url, \n age=age, \n notes=notes)\n db.session.add(pet)\n db.session.commit()\n \n return redirect('/')\n\n else:\n return render_template(\n \"add_pet_form.html\", form=form)", "def ProfService_detail(request, pk):\n ProfService = get_object_or_404(PServices, pk=pk)\n \n return render(request, \"ProfService_detail.html\", {\"ProfService\": ProfService})", "def shelterGetPets(url, URL_JSON_KEY, shelter_id):\n \n method = \"shelter.getPets?\"\n count = \"&count=100\"\n url+= method + URL_JSON_KEY + shelter_id + count\n petJson = urlopen(url)\n petInfo = load(reader(petJson))\n return petInfo", "def info(self):\n pp = pprint.PrettyPrinter(indent=4)\n print_text_box('Info')\n pp.pprint(self.manager.data[\"info\"])\n print('')", "def show_info(self):\n print(\"Problem number: \" + str(self.number))\n print(\"Problem name: \" + str(self.name))\n print(\"Problem description: \" + str(self.desc))", "def info(self, id):", "def add_pet_form():\n\n form = AddPetForm()\n \n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n available = form.available.data\n\n new_pet = Pet(name=name, species=species, photo_url=photo_url, age=age, notes=notes, available=available)\n db.session.add(new_pet)\n db.session.commit()\n\n flash(f\"Added {name}, species: {species}, age: {age}, notes: {notes}, photo_url={photo_url}, available={available}\")\n \n return redirect(\"/add\")\n\n else:\n return render_template(\n \"pet_add_form.html\", form=form)", "def info(id):\n sql = \"select distinct name, description, stars, url, last_push_date, repo_id, created_date, avatar from python_repos where repo_id=\"+id\n db = get_db()\n cursor = db.execute(sql)\n repo_info = cursor.fetchall()\n return render_template('repo.html',info=repo_info)", "def select_pets_by_person(conn, person_id):\n sql = \"\"\"\n SELECT person.first_name \n || ' ' \n || person.last_name AS `Owner`, \n pet.name AS `Pet Name`, \n pet.breed AS `Pet Breed`, \n pet.age AS `Pet Age` \n FROM person_pet \n INNER JOIN person \n ON person.id = person_pet.person_id \n INNER JOIN pet \n ON pet.id = person_pet.pet_id \n WHERE person.id = ? \n \"\"\"\n cur = conn.cursor()\n try:\n cur.execute(sql, (person_id,))\n data = cur.fetchall()\n if data:\n name = data[0][0]\n print \"\\nQuerying for pets belonging to {}\\n\".format(name)\n print sql_pp(cur, data)\n except OperationalError, msg:\n print \"SQL error {} while running our code\".format(msg)", "def printPokemon():\n print(\" _ \")\n print(\" _ __ ___ | | _____ _ __ ___ ___ _ __ \")\n print(\" | '_ \\ / _ \\| |/ / _ \\ '_ ` _ \\ / _ \\| '_ \\ \")\n print(\" | |_) | (_) | < __/ | | | | | (_) | | | |\")\n print(\" | .__/ \\___/|_|\\_\\___|_| |_| |_|\\___/|_| |_|\")\n print(\" |_| \")", "def pokemon():\n # TODO: Add form, nature and habitat to displayed table.\n color = request.form.get(\"pokecolor\")\n pokemon_of_color = []\n pokemon_with_data = []\n if color:\n pokemon_of_color = get_pokemon_of_color(color)\n # This is not a good implementation, because after getting the Pokemon\n # with a certain color, now I need to get each species to extract the\n # forms.\n for poke in pokemon_of_color:\n print(\"Extending Species: {0}\".format(poke))\n extended_poke = {}\n extended_poke[\"name\"] = poke\n extended_poke[\"forms\"] = get_forms_of_pokemon_species(species=poke)\n pokemon_with_data.append(extended_poke)\n return render_template(\"pokemon.html.j2\", pokemon=pokemon_with_data)", "def get(self):\n WriteTemplate(self.response, 'tips.html', {})", "async def _me_pets(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n messages = ch.print_pets(ctx.user_object)\n await self.paginate(ctx, messages)", "def displayInfo(self, model):\n\t\ttaglist = []\n\t\tfor tag in model.tags:\n\t\t\ttaglist.append(tag.tagname)\n\t\tself.infoText.SetPage(infoTemplate.render(model=model, tags= ','.join(taglist)))", "def get_info(self):\n if self.own_home:\n return print(f'Hi! My name is {self.name}, I\\'m {self.age}. Currently I have {self.own_home} house')\n return print(f'Hi! My name is {self.name}, I\\'m {self.age}. I don\\'t have any home now!')", "def show():\n info(str(Project))", "def player_information():\n if request.method == 'POST':\n result = request.form\n if request.files:\n playerImage = request.files['playerImage'].read()\n else:\n playerImage = None\n player = Player(player_image=playerImage, player_fname=result['player_first_name'],\n player_lname=result['player_last_name'], team_id=result['team_selected'])\n db.session.add(player)\n db.session.commit()\n teams = get_team()\n if teams:\n return render_template('team-players.html', teams=teams)\n else:\n return render_template('team-players.html')", "def plants (plant_name, plant_type):\n print (f\"\\n{plant_name.title()} is a {plant_type}. \\n\")", "def add_pet():\n form = PetAddForm()\n if form.validate_on_submit():\n pet = Pet()\n form.populate_obj(pet)\n db.session.add(pet)\n db.session.commit()\n\n flash(f\"Added {form.species.data} named {form.name.data}\")\n return redirect('/')\n else:\n return render_template(\"add_pet_form.html\", form=form)", "def show_person(uuid=None, fanchart=False):\n t0 = time.time()\n uuid = request.args.get(\"uuid\", uuid)\n fanchart_shown = request.args.get(\"fanchart\", fanchart)\n dbg = request.args.get(\"debug\", None)\n u_context = UserContext(user_session, current_user, request)\n\n with PersonReaderTx(\"read_tx\", u_context) as service:\n result = service.get_person_data(uuid)\n\n # result {'person':PersonBl, 'objs':{uniq_id:obj}, 'jscode':str, 'root':{root_type,root_user,batch_id}}\n if Status.has_failed(result):\n flash(f'{result.get(\"statustext\",\"error\")}', \"error\")\n person = result.get(\"person\")\n objs = result.get(\"objs\", [])\n print(f\"# Person with {len(objs)} objects\")\n jscode = result.get(\"jscode\", \"\")\n # Batch or Audit node data like {'root_type', 'root_user', 'id'}\n person.root = result.get(\"root\")\n\n stk_logger(u_context, f\"-> bp.scene.routes.show_person n={len(objs)}\")\n\n last_year_allowed = datetime.now().year - shareds.PRIVACY_LIMIT\n may_edit = current_user.has_role(\"audit\") # or current_user.has_role('admin')\n # may_edit = 0\n return render_template(\n \"/scene/person.html\",\n person=person,\n obj=objs,\n jscode=jscode,\n menuno=12,\n debug=dbg,\n last_year_allowed=last_year_allowed,\n elapsed=time.time() - t0,\n user_context=u_context,\n may_edit=may_edit,\n fanchart_shown=fanchart_shown,\n )", "def info_pollu(request):\r\n return render(request, 'info_pollu.html')", "def detail_speaker(request, pk, slug, template=\"core/detail_speaker.html\"):\n try:\n speaker = Speaker.objects.get(pk=pk, slug=slug)\n except Speaker.DoesNotExist:\n raise Http404(_(u'Houve algum problema tentando obter o palestrate! Você tem certeza de que ele existe?'))\n\n response = { 'speaker': speaker, 'show_all_info': True }\n return direct_to_template(request, template, response)", "def get_description(self):\n print(\"This Iron door.\")", "def home():\n pets = Pets.query.all()\n return render_template('home.html', pets=pets)", "def pet_add_form():\n\n form = PetAddForm()\n\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n\n flash(f\"Added a {species} called {name}\")\n\n pet = Pet(\n name=name,\n species=species,\n photo_url=photo_url if photo_url != '' else None,\n age=age,\n notes=notes\n )\n\n db.session.add(pet)\n db.session.commit()\n\n return redirect(\"/\")\n\n else:\n return render_template(\"pet_add_form.html\", form=form)", "def display(animal):\n for name, valeur in animal.items(): # boucle contenant deux variables pour le nom et la valeur de chaque clef dans le dictionaire\n print(\"donnée de votre animal: {} : {}\".format(name,valeur))", "def _show(self, indent = 0):\n print(\" \"*indent, \"Name:\", self.name)\n print(\" \"*indent, \"Description:\", self.description)", "def info(self):", "def info(self):", "def tell(self):\n print('Name {}, Age {}'. format(self.name, self.age), end=\" \")" ]
[ "0.8161232", "0.81274664", "0.74135685", "0.74135685", "0.74135685", "0.74135685", "0.7373951", "0.7348695", "0.7348695", "0.7345544", "0.7345544", "0.7304899", "0.72921777", "0.72867423", "0.72316194", "0.7195379", "0.71254575", "0.7090836", "0.709054", "0.70647824", "0.69525325", "0.6921077", "0.6898156", "0.6804146", "0.6629937", "0.6606723", "0.6397", "0.6393406", "0.6369469", "0.63379055", "0.62589353", "0.6203738", "0.61927897", "0.6092811", "0.6071056", "0.60576856", "0.6037025", "0.59680116", "0.5962951", "0.59503156", "0.59354794", "0.58850837", "0.5883279", "0.58823293", "0.5866023", "0.5841362", "0.58236074", "0.58118093", "0.58110136", "0.578221", "0.5730358", "0.5709198", "0.5700082", "0.5675273", "0.5638187", "0.5612084", "0.5605033", "0.5591994", "0.5564739", "0.5561304", "0.5558715", "0.555473", "0.5553998", "0.5552795", "0.5549793", "0.5545592", "0.55443734", "0.5538924", "0.5520495", "0.5509178", "0.5480259", "0.54713595", "0.5443407", "0.54379207", "0.5436876", "0.54107785", "0.5400326", "0.5394891", "0.53883", "0.53874266", "0.53864205", "0.53655744", "0.5364375", "0.5357936", "0.53520584", "0.53510094", "0.53470033", "0.5338842", "0.5333391", "0.5331747", "0.5330342", "0.53170544", "0.5301703", "0.5288671", "0.52873206", "0.52847886", "0.52837425", "0.5280268", "0.5280268", "0.52736914" ]
0.7029722
20
Load selected iterations and classes 3D for visualization mode.
def _load(self): self.firstIter = 1 self.lastIter = self.protocol.getLastFinishedIter() if self.viewIter.get() == ITER_LAST: self._iterations = [self.lastIter] else: self._iterations = self._getListFromRangeString(self.iterSelection.get()) from matplotlib.ticker import FuncFormatter self._plotFormatter = FuncFormatter(self._formatFreq)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_i3d(eval_type, h5_dir='param/'):\n state_dict = {}\n load_unit3d(state_dict, eval_type, 'Conv3d_1a_7x7', 'conv_1a', h5_dir)\n\n load_unit3d(state_dict, eval_type, 'Conv3d_2b_1x1', 'conv_2b', h5_dir)\n load_unit3d(state_dict, eval_type, 'Conv3d_2c_3x3', 'conv_2c', h5_dir)\n\n load_block(state_dict, eval_type, 'Mixed_3b', 'mixed_3b', h5_dir)\n load_block(state_dict, eval_type, 'Mixed_3c', 'mixed_3c', h5_dir)\n\n load_block(state_dict, eval_type, 'Mixed_4b', 'mixed_4b', h5_dir)\n load_block(state_dict, eval_type, 'Mixed_4c', 'mixed_4c', h5_dir)\n load_block(state_dict, eval_type, 'Mixed_4d', 'mixed_4d', h5_dir)\n load_block(state_dict, eval_type, 'Mixed_4e', 'mixed_4e', h5_dir)\n load_block(state_dict, eval_type, 'Mixed_4f', 'mixed_4f', h5_dir)\n\n load_block(state_dict, eval_type, 'Mixed_5b', 'mixed_5b', h5_dir)\n load_block(state_dict, eval_type, 'Mixed_5c', 'mixed_5c', h5_dir)\n\n load_unit3d(state_dict, eval_type, 'Logits', 'logits', h5_dir,\n use_batch_norm=False, use_bias=True)\n return state_dict", "def enable3D(self):\r\n if(self.dataController.fileLoaded==True):\r\n self.dataController.toggleInteractiveMode()\r\n\r\n self.midsagittalView = False\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False\r\n self.threeDView = True", "def loadGt(self):\n print('Loading and preparing results...')\n res = Pascal3D(self.cfg, self.dataset_dir, self.list_flag, self.transforms, self.training, load_cad=False)\n res.dataset = dict()\n res.dataset['categories'] = copy.deepcopy(self.category_to_id_map)\n res.dataset['images'] = []\n anns = []\n count = 1\n tic = time.time()\n for idx in tqdm(range(len(self.img_list_all))):\n res.dataset['images'].append({'id': self.img_list_all[idx]})\n if self.list_flag in ['train', 'val']:\n target = self._add_gt_annotations_Pascal3D(idx)\n\n for id in range(len(target)):\n ann = dict()\n ann['image_id'] = self.img_list_all[idx]\n ann['category_id'] = int(target.get_field('labels')[id].numpy())\n bb = target.bbox[id].numpy()\n x1, x2, y1, y2 = bb[0], bb[2], bb[1], bb[3]\n w = x2 - x1\n h = y2 - y1\n x_c = (x1 + x2)/2\n y_c = (y1 + y2)/2\n ann['bbox'] = [x_c, y_c, w, h]\n ann['area'] = w * h\n\n if target.has_field('segms'):\n ann['segms'] = target.get_field('segms')[id]\n # now only support compressed RLE format as segmentation results\n ann['area'] = maskUtils.area(ann['segms'])\n if not 'boxes' in ann:\n ann['boxes'] = maskUtils.toBbox(ann['segms'])\n\n ann['id'] = count\n ann['iscrowd'] = 0\n count += 1\n anns.append(ann)\n\n print('DONE (t={:0.2f}s)'.format(time.time() - tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res", "def loadRes(self, predictions, type='boxes'):\n print('Loading and preparing results...')\n res = Car3D(self.cfg, self.dataset_dir, self.list_flag, self.transforms, self.training)\n res.dataset = dict()\n res.dataset['categories'] = copy.deepcopy(self.category_to_id_map)\n res.dataset['images'] = []\n anns = []\n count = 1\n tic = time.time()\n\n for idx in tqdm(range(len(self.img_list_all))):\n res.dataset['images'].append({'id': self.img_list_all[idx]})\n prediction = predictions[idx]\n if type == 'boxes':\n for id in range(len(prediction)):\n ann = dict()\n ann['image_id'] = self.img_list_all[idx]\n ann['category_id'] = int(prediction.get_field('labels')[id].numpy())\n bb = prediction.bbox[id].numpy()\n x1, x2, y1, y2 = bb[0], bb[2], bb[1], bb[3]\n w = x2 - x1\n h = y2 - y1\n x_c = (x1 + x2) / 2\n y_c = (y1 + y2) / 2\n ann['bbox'] = [x_c, y_c, w, h]\n ann['area'] = w * h\n ann['id'] = count\n ann['iscrowd'] = 0\n ann['score'] = prediction.get_field('scores')[id].numpy()\n\n count += 1\n anns.append(ann)\n elif type == 'segms':\n masks = prediction.get_field(\"mask\")\n masks = self.masker([masks], [prediction])[0]\n\n for id in range(len(prediction)):\n ann = dict()\n ann['image_id'] = self.img_list_all[idx]\n ann['score'] = prediction.get_field('scores')[id].numpy()\n binary_mask = masks[id, 0]\n fortran_binary_mask = np.asfortranarray(binary_mask)\n ann['segms'] = maskUtils.encode(fortran_binary_mask)\n ann['category_id'] = int(prediction.get_field('labels')[id].numpy())\n # now only support compressed RLE format as segmentation results\n ann['area'] = maskUtils.area(ann['segms'])\n if not 'boxes' in ann:\n ann['boxes'] = maskUtils.toBbox(ann['segms'])\n ann['id'] = count\n count += 1\n ann['iscrowd'] = 0\n anns.append(ann)\n\n print('DONE (t={:0.2f}s)'.format(time.time() - tic))\n res.dataset['annotations'] = anns\n res.createIndex()\n #res.eval_class = []\n return res", "def loadGt(self, type='boxes'):\n print('Loading and preparing results...')\n res = Car3D(self.cfg, self.dataset_dir, self.list_flag, self.transforms, self.training)\n res.dataset = dict()\n res.dataset['categories'] = copy.deepcopy(self.category_to_id_map)\n res.dataset['images'] = []\n anns = []\n count = 1\n tic = time.time()\n for idx in tqdm(range(len(self.img_list_all))):\n res.dataset['images'].append({'id': self.img_list_all[idx]})\n img = Image.open(os.path.join(self.dataset_dir, 'images', self.img_list_all[ idx ] + '.jpg')).convert(\"RGB\")\n image_shape = img.size\n if self.list_flag in ['train', 'val']:\n im_scale = 1.0\n target = self._add_gt_annotations_Car3d(idx, image_shape, im_scale)\n\n if type == 'boxes':\n for id in range(len(target)):\n ann = dict()\n ann['image_id'] = self.img_list_all[idx]\n ann['category_id'] = int(target.get_field('labels')[id].numpy())\n bb = target.bbox[id].numpy()\n x1, x2, y1, y2 = bb[0], bb[2], bb[1], bb[3]\n w = x2 - x1\n h = y2 - y1\n x_c = (x1 + x2)/2\n y_c = (y1 + y2)/2\n ann['bbox'] = [x_c, y_c, w, h]\n ann['area'] = w * h\n ann['id'] = count\n ann['iscrowd'] = 0\n count += 1\n anns.append(ann)\n\n elif type == 'segms':\n for id in range(len(target)):\n ann = dict()\n ann['image_id'] = self.img_list_all[idx]\n ann['segms'] = target.get_field('segms')[id]\n ann['category_id'] = int(target.get_field('labels')[id].numpy())\n # now only support compressed RLE format as segmentation results\n ann['area'] = maskUtils.area(ann['segms'])\n if not 'boxes' in ann:\n ann['boxes'] = maskUtils.toBbox(ann['segms'])\n ann['id'] = count\n count += 1\n ann['iscrowd'] = 0\n anns.append(ann)\n\n print('DONE (t={:0.2f}s)'.format(time.time() - tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res", "def get_3d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['3d'][:, to_select, :][:, to_sort, :]", "def visualize_scene_3D(self, pointcloud, objects, labels=None, calib=None):\n self.figure = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=(1280, 720))\n\n # Point Cloud\n self.visuallize_pointcloud(pointcloud)\n\n # 3D Boxes of model output\n for obj in objects:\n bbox_3d = obj.bbox_3d\n color = self.__get_box_color(obj.label)\n self.visualize_3d_bbox(bbox=bbox_3d, color=color, calib=calib)\n\n self.__draw_text_3D(*bbox_3d.pos, text=str( round(obj.score,2) ), color=color)\n\n # 3D Boxes of dataset labels \n if labels is not None:\n for obj in labels:\n self.visualize_3d_bbox(obj.bbox_3d, (1,1,0), calib)\n\n self.__show_3D()", "def loadRes(self, predictions):\n print('Loading and preparing results...')\n res = Pascal3D(self.cfg, self.dataset_dir, self.list_flag, self.transforms, self.training, load_cad=False)\n res.dataset = dict()\n res.dataset['categories'] = copy.deepcopy(self.category_to_id_map)\n res.dataset['images'] = []\n anns = []\n count = 1\n tic = time.time()\n masker = Masker(threshold=0.5, padding=1)\n\n for idx in tqdm(range(len(self.img_list_all))):\n res.dataset['images'].append({'id': self.img_list_all[idx]})\n prediction = predictions[idx]\n for id in range(len(prediction)):\n ann = dict()\n ann['image_id'] = self.img_list_all[idx]\n ann['category_id'] = int(prediction.get_field('labels')[id].cpu().numpy())\n bb = prediction.bbox[id].cpu().numpy()\n x1, x2, y1, y2 = bb[0], bb[2], bb[1], bb[3]\n w = x2 - x1\n h = y2 - y1\n x_c = (x1 + x2) / 2\n y_c = (y1 + y2) / 2\n ann['bbox'] = [x_c, y_c, w, h]\n ann['area'] = w * h\n ann['score'] = prediction.get_field('scores')[id].cpu().numpy()\n\n if prediction.has_field('mask'):\n image_width, image_height, _ = res.targets[idx]['record']['imgsize']\n\n masks = prediction.get_field('mask')\n # Masker is necessary only if masks haven't been already resized.\n if list(masks.shape[-2:]) != [image_height, image_width]:\n masks = masker(masks.expand(1, -1, -1, -1, -1), prediction)\n masks = masks[0]\n\n ann['segms'] = [maskUtils.encode(np.array(mask[0, :, :, np.newaxis], order=\"F\"))[0] for mask in masks]\n ann['category_id'] = int(prediction.get_field('labels')[id].cpu().numpy())\n # now only support compressed RLE format as segmentation results\n #ann['area'] = maskUtils.area(ann['segms'])\n if not 'boxes' in ann:\n ann['boxes'] = maskUtils.toBbox(ann['segms'])\n\n ann['id'] = count\n count += 1\n ann['iscrowd'] = 0\n anns.append(ann)\n\n print('DONE (t={:0.2f}s)'.format(time.time() - tic))\n res.dataset['annotations'] = anns\n res.createIndex()\n return res", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def _load( self, i ):\n if ir.config.verbosity_level >= 2: print(\"[observation] Lazy loading raster\")\n self._raster_data[i] = raster_cube( self._raster_files, line=self._line_info['description'][i], keep_null=self._keep_null )", "def view_point_cloud_model_and_affordance(number_of_object_per_category=5):\n list_pc_paths = [f for f in glob.glob('./dataset/*.npy', recursive=True)]\n set_objects = set([os.path.basename(pc_path).split('_')[0] for pc_path in list_pc_paths])\n\n for obj in set_objects:\n try:\n # load point cloud models\n pc_models = np.load('./dataset/{}_point_cloud_models.npy'.format(obj))[\n :number_of_object_per_category]\n # load point cloud grasp affordance\n pc_affordance = np.load('./dataset/{}_point_cloud_grasp_affordance.npy'.format(obj))[\n :number_of_object_per_category]\n\n # visualization\n for i, m in enumerate(pc_models):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n list_model_x, list_model_y, list_model_z = [], [], []\n list_affordance_x, list_affordance_y, list_affordance_z = [], [], []\n\n for x in range(32):\n for y in range(32):\n for z in range(32):\n if pc_affordance[i, x, y, z] == 1:\n list_affordance_x.append(x)\n list_affordance_y.append(y)\n list_affordance_z.append(z)\n elif m[x, y, z] == 1:\n list_model_x.append(x)\n list_model_y.append(y)\n list_model_z.append(z)\n\n ax.scatter(list_model_x, list_model_y, list_model_z, c='#0c457d')\n ax.scatter(list_affordance_x, list_affordance_y, list_affordance_z, c='#e8702a', alpha=0.35)\n ax.set_xlim(0, 32)\n ax.set_ylim(0, 32)\n ax.set_zlim(0, 32)\n plt.show()\n\n except FileNotFoundError:\n print('Some point cloud npy files are not found.')\n continue", "def load_cityscapes(self, dataset_dir, subset):\n self.class_labels = {\n 'unlabeled':0,\n 'ego vehicle':1, \n 'rectification border':2,\n 'out of roi':3, \n 'static':4, \n 'dynamic':5, \n 'ground':6, \n 'road':7, \n 'sidewalk':8, \n 'parking':9, \n 'rail track':10, \n 'building':11, \n 'wall':12, \n 'fence':13, \n 'guard rail':14, \n 'bridge':15, \n 'tunnel':16, \n 'pole':17, \n 'polegroup':18, \n 'traffic light':19, \n 'traffic sign':20, \n 'vegetation':21, \n 'terrain':22, \n 'sky':23, \n 'person':24, \n 'rider':25, \n 'car':26, \n 'truck':27, \n 'bus':28, \n 'caravan':29, \n 'trailer':30, \n 'train':31, \n 'motorcycle':32, \n 'bicycle':33, \n 'license plate':34}\n \n annotation_dir = dataset_dir + 'gtFine_trainvaltest/' + subset + '_all.json'\n self.image_info = json.load(open(annotation_dir, 'r'))\n \n # Add classes\n for i in range(len(self.class_labels)):\n self.add_class(\"cityscape\", i, list(self.class_labels.keys())[i])", "def showEntireDataset(wl_listG, wl_listV, tsvd_graphlet_vectors, kpca_graphlet_gram, tsvd_shortestpath_vectors,\n kpca_shortestpath_gram, classes):\n for i in range(1, 8):\n if (i == 6):\n data_tsvd = tsvd_graphlet_vectors\n data_kpca = kpca_graphlet_gram\n elif (i == 7):\n data_tsvd = tsvd_shortestpath_vectors\n data_kpca = kpca_shortestpath_gram\n else:\n data_tsvd = wl_listV[i - 1]\n data_kpca = wl_listG[i - 1]\n fig = plt.figure(figsize=(15, 15))\n if (i == 6):\n fig.suptitle('Graphlet', fontsize=25)\n elif (i == 7):\n fig.suptitle('Shortest Path', fontsize=25)\n else:\n fig.suptitle(f'Weisfeiler-Lehman {i}', fontsize=25)\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223, projection='3d')\n ax4 = fig.add_subplot(224, projection='3d')\n ax1.title.set_text('2D TruncatedSVD')\n ax2.title.set_text('2D KernelPCA')\n ax3.title.set_text('3D TruncatedSVD')\n ax4.title.set_text('3D KernelPCA')\n ax1.scatter(data_tsvd[:, 0], data_tsvd[:, 1], c=classes)\n ax2.scatter(data_kpca[:, 0], data_kpca[:, 1], c=classes)\n ax3.scatter3D(data_tsvd[:, 0], data_tsvd[:, 1], data_tsvd[:, 2], c=classes)\n ax4.scatter3D(data_kpca[:, 0], data_kpca[:, 1], data_kpca[:, 2], c=classes)\n plt.show()\n print(\"________________________________________________________________________________________\")\n print()", "def run_3D_predictions(self, min_size=5000):\n cases = self.test_loader.dataset.im_ids\n assert len(cases) == len(self.test_loader)\n for (test_batch, case) in tqdm(zip(self.test_loader, cases), total=len(cases)):\n test_x = torch.squeeze(test_batch[0], dim=0)\n if self.pseudo_3D:\n pred, _, act, _ = self.model.predict_3D_pseudo3D_2Dconv(test_x,\n **self.pred_3D_params)\n else:\n pred, _, act, _ = self.model.predict_3D(test_x,\n **self.pred_3D_params)\n assert len(pred.shape) == 3\n assert len(act.shape) == 4\n pred = remove_3D_connected_components(pred, min_size=min_size)\n pred = self.post_process_stage1(pred)\n self.save_pred(pred, act, case)\n case_raw = Path(case).name\n bbox_coord = self.create_bbox_stage1(pred, case_raw)\n self.bbox_coords[case_raw] = bbox_coord\n self.save_bbox_coords()", "def load_3d(path, rgb=True, roi=None, switchOrder=False):\n assert isinstance(path, str)\n assert isinstance(rgb, bool)\n if roi is not None:\n assert isinstance(roi, dict)\n\n fnames = []\n for f in glob(path + \"*.png\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.jpg\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.JPG\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.tif\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.TIF\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.exr\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.ppm\"):\n fnames.append(f)\n\n fnames.sort()\n\n for i in fnames:\n print(i)\n\n if switchOrder:\n fnames.reverse()\n\n sposx = 0\n eposx = 0\n sposy = 0\n eposy = 0\n\n im = vigra.readImage(fnames[0],order='C')\n # im = misc.imread(fnames[0])\n if len(im.shape) == 2:\n rgb = False\n\n if roi is not None:\n sposx = roi[\"pos\"][0]\n eposx = roi[\"pos\"][0] + roi[\"size\"][0]\n sposy = roi[\"pos\"][1]\n eposy = roi[\"pos\"][1] + roi[\"size\"][1]\n if rgb:\n lf = np.zeros((len(fnames), roi[\"size\"][0], roi[\"size\"][1], 3), dtype=np.float32)\n else:\n lf = np.zeros((len(fnames), roi[\"size\"][0], roi[\"size\"][1], 1), dtype=np.float32)\n\n else:\n if rgb:\n lf = np.zeros((len(fnames), im.shape[0], im.shape[1], 3), dtype=np.float32)\n else:\n lf = np.zeros((len(fnames), im.shape[0], im.shape[1], 1), dtype=np.float32)\n\n if roi is None:\n if rgb:\n if len(im.shape) == 3:\n lf[0, :, :, :] = im[:, :, 0:3]\n else:\n for c in range(3):\n lf[0, :, :, c] = im[:]\n else:\n if len(im.shape) == 3:\n lf[0, :, :, 0] = 0.3 * im[:, :, 0] + 0.59 * im[:, :, 1] + 0.11 * im[:, :, 2]\n else:\n lf[0, :, :, 0] == im[:]\n else:\n if rgb:\n if len(im.shape) == 3:\n lf[0, :, :, 0:3] = im[sposx:eposx, sposy:eposy, 0:3]\n else:\n for c in range(3):\n lf[0, :, :, c] = im[sposx:eposx, sposy:eposy]\n else:\n if len(im.shape) == 3:\n lf[0, :, :, 0] = 0.3 * im[sposx:eposx, sposy:eposy, 0] + 0.59 * im[sposx:eposx, sposy:eposy, 1] + 0.11 * im[sposx:eposx, sposy:eposy, 2]\n else:\n lf[0, :, :, 0] = im[sposx:eposx, sposy:eposy]\n\n for n in range(1, len(fnames)):\n # im = misc.imread(fnames[n])\n im = vigra.readImage(fnames[n], order='C')\n if rgb:\n if roi is None:\n if len(im.shape) == 3:\n lf[n, :, :, :] = im[:, :, 0:3]\n else:\n for c in range(3):\n lf[n, :, :, c] = im[:]\n else:\n if len(im.shape) == 3:\n lf[n, :, :, :] = im[sposx:eposx, sposy:eposy, 0:3]\n else:\n for c in range(3):\n lf[n, :, :, c] = im[sposx:eposx, sposy:eposy]\n else:\n if roi is None:\n if len(im.shape) == 3:\n lf[n, :, :, 0] = 0.3 * im[:, :, 0] + 0.59 * im[:, :, 1] + 0.11 * im[:, :, 2]\n else:\n lf[n, :, :, 0] = im[:]\n else:\n if len(im.shape) == 3:\n lf[n, :, :, 0] = 0.3 * im[sposx:eposx, sposy:eposy, 0] + 0.59 * im[sposx:eposx, sposy:eposy, 1] + 0.11 * im[sposx:eposx, sposy:eposy, 2]\n else:\n lf[n, :, :, 0] = im[sposx:eposx, sposy:eposy]\n\n amax = np.amax(lf)\n if amax >= 1:\n lf[:] /= 255\n\n return lf", "def _load_labels_3d(self, results):\n results[\"gt_labels_3d\"] = results[\"ann_info\"][\"gt_labels_3d\"]\n return results", "def test_load_selections3(self, selection):\n self.image_set.create_subset()\n selection.load_selections([SAMPLE_ROI])\n rows, cols = np.column_stack(self.roi_coords)\n for pixel in self.image_set._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [255.0, 0.0, 0.0, 255.]\n )\n for pixel in self.subset._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [0.0, 100.0, 0.0, 255.]\n )", "def loadPredictions(self):\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n message = 'Select folder'\n folderDialog = QtWidgets.QFileDialog(self, message, dir_path)\n folderDialog.setFileMode(QtWidgets.QFileDialog.Directory)\n folderDialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, True)\n fileName = [] # Returns a list of the directory\n\n # Plot the window to select the csv file\n if folderDialog.exec_():\n fileName = folderDialog.selectedFiles()\n # Debug\n #fileName = ['/media/dimitris/TOSHIBA EXT/Image_Document_Classification/PMC-Dataset']\n print(fileName)\n if os.path.isdir(str(fileName[0])):\n self.loadFolder(str(fileName[0]))\n else:\n message = 'Only csv files'\n self.messageBox(message)\n return\n\n self.selectFigures()", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n rgbdir = os.path.join(opt.dataroot, 'rgb_256')\n if opt.do_upsampling:\n rgbdir = os.path.join(opt.dataroot, 'rgb_512')\n uvdir = os.path.join(opt.dataroot, 'iuv')\n self.image_paths = sorted(make_dataset(rgbdir, opt.max_dataset_size))\n n_images = len(self.image_paths)\n layers = sorted(os.listdir(uvdir))\n layers = [l for l in layers if l.isdigit()]\n self.iuv_paths = []\n for l in layers:\n layer_iuv_paths = sorted(make_dataset(os.path.join(uvdir, l), n_images))\n if len(layer_iuv_paths) != n_images:\n print(f'UNEQUAL NUMBER OF IMAGES AND IUVs: {len(layer_iuv_paths)} and {n_images}')\n self.iuv_paths.append(layer_iuv_paths)\n\n # set up per-frame compositing order\n with open(os.path.join(opt.dataroot, 'metadata.json')) as f:\n metadata = json.load(f)\n if 'composite_order' in metadata:\n self.composite_order = metadata['composite_order']\n else:\n self.composite_order = [tuple(range(1, 1 + len(layers)))] * n_images\n\n if opt.use_homographies:\n self.init_homographies(os.path.join(opt.dataroot, 'homographies.txt'), n_images)", "def _load_layer_arrays(\n cls,\n f_obj,\n model,\n nlay,\n ext_unit_dict,\n transient,\n laycon,\n ikvflag,\n ikcflag,\n iwdflg,\n ):\n sf1 = [0] * nlay\n tran = [0] * nlay\n hy = [0] * nlay\n if nlay > 1:\n vcont = [0] * (nlay - 1)\n else:\n vcont = [0] * nlay\n sf2 = [0] * nlay\n wetdry = [0] * nlay\n kv = [0] * nlay # mfusg\n\n for layer in range(nlay):\n util2d_shape = get_util2d_shape_for_layer(model, layer=layer)\n\n # sf1\n if transient:\n if model.verbose:\n print(f\" loading sf1 layer {layer + 1:3d}...\")\n sf1[layer] = Util2d.load(\n f_obj,\n model,\n util2d_shape,\n np.float32,\n \"sf1\",\n ext_unit_dict,\n )\n\n # hy/tran, and kv/vcont\n if ikcflag == 0:\n (\n hy[layer],\n tran[layer],\n kv[layer],\n vcont_k,\n ) = cls._load_hy_tran_kv_vcont(\n f_obj,\n model,\n (layer, laycon[layer]),\n ext_unit_dict,\n ikvflag,\n )\n if layer < nlay - 1:\n vcont[layer] = vcont_k\n\n # sf2\n if transient and (laycon[layer] in [2, 3, 4]):\n if model.verbose:\n print(f\" loading sf2 layer {layer + 1:3d}...\")\n sf2[layer] = Util2d.load(\n f_obj,\n model,\n util2d_shape,\n np.float32,\n \"sf2\",\n ext_unit_dict,\n )\n\n # wetdry\n if (iwdflg != 0) and (laycon[layer] in [1, 3]):\n if model.verbose:\n print(f\" loading sf2 layer {layer + 1:3d}...\")\n wetdry[layer] = Util2d.load(\n f_obj,\n model,\n util2d_shape,\n np.float32,\n \"wetdry\",\n ext_unit_dict,\n )\n\n return sf1, tran, hy, vcont, sf2, wetdry, kv", "def viz(self,slices):\n #layers_copy = deepcopy(self.layers)\n self.layers_copy = self.layers\n imgs = [torch.zeros([1,3,self.N_in,self.N_in])]\n \n for layer in self.layers:\n if isinstance(layer,nn.Conv2d):\n layer2 = nn.Conv2d(3,3,layer.kernel_size,layer.stride,layer.padding)\n imgs.append(layer2(imgs[-1]))\n else:\n imgs.append(layer(imgs[-1]))\n \n assert(len(self.projs) == len(imgs)-1)\n for proj,img in zip(self.projs[::-1],imgs[::-1]):\n (x1,x2),(y1,y2) = slices\n img[0,:,x1:x2+1,y1:y2+1] = 255\n slices = proj(slices)\n (x1,x2),(y1,y2) = slices\n imgs[0][0,:,x1:x2+1,y1:y2+1] = 255\n \n dim = int(np.floor(np.sqrt(len(self.layers))))+1\n fig,axes = plt.subplots(dim,dim,figsize=(10,10))\n for i,img in enumerate(imgs):\n a,b = np.unravel_index(i,(dim,dim))\n axes[a,b].imshow((img[0].detach().permute(1,2,0).numpy()).astype(np.uint8))\n axes[a,b].set_title(str(i))", "def generate_training_data_3D():\n c11 = np.random.uniform(0.05, 1.50, 20)\n c12 = np.random.uniform(-1.50, 1.50, 20)\n c13 = np.random.uniform(-2.50, -0.05, 20)\n c21 = np.random.uniform(-1.50, -0.05, 20)\n c22 = np.random.uniform(-1.50, 1.50, 20)\n c23 = np.random.uniform(0.05, 2.50, 20)\n c1 = np.array([[i, j, k] for i, j, k in zip(c11, c12, c13)])\n c2 = np.array([[i, j, k] for i, j, k in zip(c21, c22, c23)])\n\n points = plt.figure()\n ax = points.add_subplot(111, projection='3d')\n ax.scatter(c1[:, 0], c1[:, 1], c1[:, 2], c='r', marker='^')\n ax.scatter(c2[:, 0], c2[:, 1], c2[:, 2], c='b', marker='*')\n plt.show()\n plt.close()\n\n return c1, c2", "def _preload_all_samples(self):\n if self.mode in ['train_noval', 'train_with_val']:\n\n self._images_train, self._labels_train = [], []\n desc = \"Loading train image pairs & flows\"\n with tqdm(total=len(self._img_trn_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_trn_path):\n pbar.update(1)\n label_path = self._lbl_trn_path[n]\n image, label = self._load_sample(image_path, label_path)\n self._labels_train.append(label)\n self._images_train.append(image)\n\n if self.mode == 'train_with_val':\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n if self.opts['tb_test_imgs'] is True:\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))\n\n elif self.mode in ['val', 'val_notrain']:\n\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n elif self.mode == 'test':\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))", "def visualize_dense_layers(self, last_layer='fc', savefig_path=\"\", nof_times=3, class_number=20):\n\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == last_layer][0]\n\n # Generate three different images of the same output index.\n vis_images = []\n\n # Print class number (20) * nof_times (3)\n # for idx in [20, 20, 20]:\n for idx in [class_number] * nof_times:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx, max_iter=500)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n\n stitched = utils.stitch_images(vis_images)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(last_layer)\n plt.savefig(savefig_path)\n\n print('debug')", "def load_ismrmrd_ifft3d_reconstruction(filename):\n\n if not os.path.isfile(filename):\n print(\"%s is not a valid file\" % filename)\n raise SystemExit\n dset = ismrmrd.Dataset(filename, 'dataset', create_if_needed=False)\n\n #Read some fields from the XML header\n hdr = ismrmrd.xsd.CreateFromDocument(dset.read_xml_header())\n #get encoding and reconstruction information\n enc = hdr.encoding[0]\n # Matrix size\n eNx = enc.encodedSpace.matrixSize.x\n eNy = enc.encodedSpace.matrixSize.y\n eNz = enc.encodedSpace.matrixSize.z\n rNx = enc.reconSpace.matrixSize.x\n rNy = enc.reconSpace.matrixSize.y\n\n # Number of Slices, Reps, Contrasts, etc.\n #We have to wrap the following in a if/else because a valid xml header may\n #not have an entry for some of the parameters\n ncoils = hdr.acquisitionSystemInformation.receiverChannels\n if enc.encodingLimits.slice != None:\n nslices = enc.encodingLimits.slice.maximum + 1\n else:\n nslices = 1\n\n if enc.encodingLimits.repetition != None:\n nreps = enc.encodingLimits.repetition.maximum + 1\n else:\n nreps = 1\n\n if enc.encodingLimits.contrast != None:\n ncontrasts = enc.encodingLimits.contrast.maximum + 1\n else:\n ncontrasts = 1\n\n\n # Loop through the acquisitions looking for noise scans\n firstacq = 0\n for acqnum in range(dset.number_of_acquisitions()):\n acq = dset.read_acquisition(acqnum)\n\n # TODO: Currently ignoring noise scans\n if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):\n print(\"Found noise scan at acq \", acqnum)\n continue\n else:\n firstacq = acqnum\n print(\"Imaging acquisition starts acq \", acqnum)\n break\n\n # Initialiaze a storage array\n all_data = np.zeros((nreps, ncontrasts, nslices, ncoils, eNz, eNy, rNx), dtype=np.complex64)\n\n # Loop through the rest of the acquisitions and stuff\n for acqnum in range(firstacq, dset.number_of_acquisitions()):\n acq = dset.read_acquisition(acqnum)\n head = acq.getHead()\n\n # TODO: this is where we would apply noise pre-whitening\n\n #padd if acquisition data is not complete (padding)\n if acq.data.shape[1]<eNx :\n x0=int((eNx - acq.data.shape[1]) / 2)\n zeros = np.zeros((acq.data.shape[0], x0))\n padded_acq_data = np.append(np.append(zeros, acq.data, axis=1), zeros, axis=1)\n acq.resize(eNx, acq.active_channels, acq.trajectory_dimensions)\n acq.data[:]=padded_acq_data\n\n # Remove oversampling if needed\n if eNx != rNx:\n #xline = transform.transform_kspace_to_image(acq.data, [1])\n xline = transform.transform_kspace_to_image(acq.data, dim=(1,), img_shape=(eNx,))\n x0 = int((eNx - rNx) / 2)\n x1 = int((eNx - rNx) / 2 + rNx)\n xline = xline[:, x0:x1]\n acq.resize(rNx, acq.active_channels, acq.trajectory_dimensions)\n acq.center_sample = int(rNx / 2)\n # need to use the [:] notation here to fill the data\n acq.data[:] = transform.transform_image_to_kspace(xline, dim=(1,), k_shape=(rNx,))\n\n # Stuff into the buffer\n rep = acq.idx.repetition\n contrast = acq.idx.contrast\n slice = acq.idx.slice\n y = acq.idx.kspace_encode_step_1\n z = acq.idx.kspace_encode_step_2\n all_data[rep, contrast, slice, :, z, y, :] = acq.data\n\n # Reconstruct images\n images = np.zeros((nreps, ncontrasts, nslices, eNz, rNy, rNx), dtype=np.float32)\n img_scaled = []\n for rep in range(nreps):\n for contrast in range(ncontrasts):\n for slice in range(nslices):\n # FFT\n if eNz > 1:\n # 3D\n im = transform.transform_kspace_to_image(all_data[rep, contrast, slice, :, :, :, :], [1, 2, 3])\n else:\n # 2D\n im = transform.transform_kspace_to_image(all_data[rep, contrast, slice, :, 0, :, :], [2, 3])\n\n if eNy != rNy:\n x0 = int((eNy - rNy) / 2)\n x1 = int((eNy - rNy) / 2 + rNy)\n im = im[:,:,x0:x1, :]\n\n # Sum of squares\n im = np.sqrt(np.sum(np.abs(im) ** 2, 0))\n\n # Stuff into the output\n if eNz > 1:\n # 3D\n images[rep, contrast, slice, :, :, :] = im\n else:\n # 2D\n images[rep, contrast, slice, 0, :, :] = im\n\n img_scaled.append(im)\n\n dset.close()\n\n return [head, hdr, img_scaled]", "def visualise_data_set(x_arr, y_arr):\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=3)\n\n # Fit and transform x to visualise inside a 3D feature space\n x_visualisation = pca.fit_transform(x_arr)\n\n figure = plt.figure()\n axis = Axes3D(figure)\n\n axis.scatter(x_visualisation[y_arr == 0, 0], x_visualisation[y_arr == 0, 1], x_visualisation[y_arr == 0, 2],\n label=\"Class #0\",\n edgecolor=almost_black, facecolor=palette[0], linewidth=0.3, marker=\"o\")\n axis.scatter(x_visualisation[y_arr == 1, 0], x_visualisation[y_arr == 1, 1], x_visualisation[y_arr == 1, 2],\n label=\"Class #1\",\n edgecolor=almost_black, facecolor=palette[2], linewidth=0.3, marker=\"^\")\n axis.set_title(\"PCA to 3 components\")\n\n plt.show()", "def init():\n ########################\n # OPTIONS\n ########################\n # Debugging tools\n global TIMER # displays time of every major step\n TIMER = True\n global MONITOR # displays monitoring infos\n MONITOR = False\n \n global directories\n directories = {'1Face': 'data/1Face/',\n '2Faces': 'data/2Faces/',\n '3Faces': 'data/3Faces/',\n 'test': 'data/test/'}\n \n # Opt. swicthes\n global maxfinder # to find the max dim. amongst the pictures\n maxfinder = False\n global ML_mode\n ML_mode = {'CNN_Train': False,\n 'CNN_Pred' : True,\n 'Sampler': True}\n \n # Global variables\n global num_pics\n num_pics = {'1Face': 0,\n '2Faces': 0,\n '3Faces': 0}\n global labels\n labels = {'1Face': 0,\n '2Faces': 1,\n '3Faces': 2}\n global num_data\n num_data = 0\n global splitsize # Fraction of data to build the training set\n splitsize = 0.7 \n global maxheight # Resize the pictures to a power of 2 for CNN (2^8 here)\n maxheight = 128\n global maxwidth\n maxwidth = 128\n global TreshEdge # Number of consecutive black pixels to define an edge\n TreshEdge = 2\n global TreshFace # Number of white pixels to define a face (or large edge)\n TreshFace = maxheight/16", "def _load_bboxes_3d(self, results):\n results[\"gt_bboxes_3d\"] = results[\"ann_info\"][\"gt_bboxes_3d\"]\n results[\"bbox3d_fields\"].append(\"gt_bboxes_3d\")\n return results", "def load_cloudset(self, idx: int):\n\n while idx < len(self.files1):\n file = self.files1[idx]\n slashs = [pos for pos, char in enumerate(file) if char == '/']\n filename = file[slashs[-1]:-4]\n print(\"Viewing: \" + filename)\n\n with open(file, 'rb') as f:\n content = pickle.load(f)\n\n hybrid_idx = content[0]\n hybrid_file = [file for file in self.files2 if 'cloud_{}'.format(hybrid_idx) in file]\n hybrid = basics.load_pkl(hybrid_file[0])\n\n local_bfs = content[1]\n sample = content[2]\n bfs_cloud = visualize.prepare_bfs(hybrid, local_bfs)\n\n hybrid_bfs = clouds.merge_clouds([hybrid, bfs_cloud])\n res = self.core_next(hybrid_bfs, sample, 'sample_h{}_i{}'.format(hybrid_idx, idx))\n\n if res is None:\n return\n else:\n idx += res", "def view(config_file):\n import open3d as o3d\n with open(config_file) as f:\n config = json.load(f)\n scenes = get_realsense_scenes(config['realsense_dir'])\n for scene in scenes:\n # if scene['scene_name'] != \"Scene_004\":\n # continue\n scene_data = get_data_from_scene(scene)\n logger.info(\"Visualizing - %s\", scene['scene_name'])\n pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(scene_data['points3d']))\n o3d.visualization.draw_geometries_with_editing([pcd])\n pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(scene_data['points3d_segmented']))\n o3d.visualization.draw_geometries([pcd])", "def run(path, f3_param=[[1, 0.01]], minArea=20, saveNumber=0):\n\tprint('=== path:', path)\n\t\n\t# load x/y/z voxel size (assumes .tif was saved with Fiji\n\txVoxel, yVoxel, zVoxel = readVoxelSize(path)\n\tprint(' xVoxel:', xVoxel, 'yVoxel:', yVoxel, 'zVoxel:', zVoxel)\n\t\n\t# load the data\n\treader = AICSImage(path) \n\tIMG = reader.data.astype(np.float32)\n\tprint(' IMG.shape:', IMG.shape)\n\n\tstructure_channel = 0\n\tstruct_img0 = IMG[0,structure_channel,:,:,:].copy()\n\n\t# give us a guess for our intensity_scaling_param parameters\n\t#from aicssegmentation.core.pre_processing_utils import suggest_normalization_param\n\t#suggest_normalization_param(struct_img0)\n\tlow_ratio, high_ratio = my_suggest_normalization_param(struct_img0)\n\n\t#intensity_scaling_param = [0.0, 22.5]\n\tintensity_scaling_param = [low_ratio, high_ratio]\n\tprint('*** intensity_normalization() intensity_scaling_param:', intensity_scaling_param)\n\t\n\t# intensity normalization\n\tprint('=== calling intensity_normalization()')\n\tstruct_img = intensity_normalization(struct_img0, scaling_param=intensity_scaling_param)\n\n\t# smoothing with edge preserving smoothing \n\tprint('=== calling edge_preserving_smoothing_3d()')\n\tstructure_img_smooth = edge_preserving_smoothing_3d(struct_img)\n\n\t#\n\t\"\"\"\n\tsee: notebooks/playground_filament3d.ipynb\n\n\tscale_x is set based on the estimated thickness of your target filaments.\n\t\tFor example, if visually the thickness of the filaments is usually 3~4 pixels,\n\t\tthen you may want to set scale_x as 1 or something near 1 (like 1.25).\n\t\tMultiple scales can be used, if you have filaments of very different thickness.\n\tcutoff_x is a threshold applied on the actual filter reponse to get the binary result.\n\t\tSmaller cutoff_x may yielf more filaments, especially detecting more dim ones and thicker segmentation,\n\t\twhile larger cutoff_x could be less permisive and yield less filaments and slimmer segmentation.\n\t\"\"\"\n\t#f3_param = [[1, 0.01]] # [scale_1, cutoff_1]\n\tprint('=== calling filament_3d_wrapper() f3_param:', f3_param)\n\tbw = filament_3d_wrapper(structure_img_smooth, f3_param)\n\t\t\n\t#\n\t#minArea = 20 # from recipe\n\tprint('=== calling remove_small_objects() minArea:', minArea)\n\tseg = remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False)\n\n\t#\n\t# save original file again (with saveNumber\n\tsaveNumberStr = ''\n\tif saveNumber>1:\n\t\tsaveNumberStr = '_' + str(saveNumber)\n\t\t\n\t#\n\t# save mask\n\tseg = seg >0\n\tout=seg.astype(np.uint8)\n\tout[out>0]=255\n\t\n\t# save _dvMask\n\tmaskPath = os.path.splitext(path)[0] + '_dvMask' + saveNumberStr + '.tif'\n\tprint('=== saving 3D mask [WILL FAIL IF FILE EXISTS] as maskPath:', maskPath)\n\ttry:\n\t\twriter = omeTifWriter.OmeTifWriter(maskPath)\n\t\twriter.save(out)\n\texcept(OSError) as e:\n\t\tprint(' error: file already exists, di dnot resave, maskPath:', maskPath)\n\t\t\n\t#\n\t# analyze skeleton, take a 3d mask and analyze as a 1-pixel skeleton\n\tretDict0, mySkeleton = myAnalyzeSkeleton(out=out, imagePath=path)\n\tretDict = OrderedDict()\n\tretDict['tifPath'] = path\n\tretDict['maskPath'] = maskPath\n\tretDict['tifFile'] = os.path.basename(path)\n\tretDict['xVoxel'] = xVoxel\n\tretDict['yVoxel'] = yVoxel\n\tretDict['zVoxel'] = zVoxel\n\t#\n\tretDict['params'] = OrderedDict()\n\tretDict['params']['saveNumber'] = saveNumber\n\tretDict['params']['intensity_scaling_param'] = intensity_scaling_param # calculated in my_suggest_normalization_param\n\tretDict['params']['f3_param'] = f3_param[0] # cludge, not sure where to put this. f3_param is a list of list but screws up my .csv output !!!\n\tretDict['params']['minArea'] = minArea\n\n\tretDict.update( retDict0 )\n\n\t# save 1-pixel skeleton: mySkeleton\n\t# save _dvSkel\n\tskelPath = os.path.splitext(path)[0] + '_dvSkel' + saveNumberStr + '.tif'\n\tprint('=== saving 3D skel [WILL FAIL IF FILE EXISTS] as maskPath:', skelPath)\n\ttry:\n\t\twriter = omeTifWriter.OmeTifWriter(skelPath)\n\t\twriter.save(mySkeleton)\n\texcept(OSError) as e:\n\t\tprint(' error: file already exists, di dnot resave, skelPath:', skelPath)\n\t\t\t\n\treturn retDict", "def loadPreviewDataforClassification(self):\n # parameters for data load from GUI\n self.loadDataModel.pathToDataSet = self.entryPath.get()\n self.loadDataModel.firstRowIsTitle = bool(self.checkVarRow.get())\n self.loadDataModel.firstColIsRowNbr = bool(self.checkVarCol.get())\n # if entry field is empty, set nbrOfCategories to 0\n self.loadDataModel.dataIsForTraining = False\n\n # Load data\n try:\n self.loadDataModel.loadData()\n print(\"LoadDataView: self.loadDataModel.data: \", self.loadDataModel.data)\n except FileNotFoundError:\n tk.messagebox.showerror(\"Error\", \" File not found.\")\n except ValueError:\n tk.messagebox.showerror(\"Error\", \"The number of categories entered is incorrect. Enter number > 0 and smaller\"\n \" the number of columns in the dataset.\")\n except:\n print(\"Load data failed because of something different than nbrOfCategories entered or file not found.\")\n else: # if data load worked do the following\n self.loadDataInformation.config(text=\"Data has been successfully loaded and stored.\", fg=\"green\")\n self.previewData()", "def load_select(self, selected):\n\t\tfor filename in selected:\n\t\t\tself.load(filename)\n\n\t\tif \"worddict\" in selected and \"classdict\" in selected:\n\t\t\tself.reverse_dicts()", "def setup_3D( self ):\r\n # ~ Modes and Flags ~\r\n # Use 'GL_DEPTH_TEST' to ensure that OpenGL maintains a sensible drawing order for polygons no matter the viewing angle\r\n glEnable( GL_DEPTH_TEST ) # Do these setup functions really have to be run every single frame? # TODO: Try moving these to the '__init__' , see what happens\r\n # glEnable( GL_CULL_FACE ) # Uncomment to preform backface culling # This might erase arrowheads if they are away-facing!\r\n # ~ View Frustum Setup ~\r\n glMatrixMode( GL_PROJECTION )\r\n glLoadIdentity()\r\n gluPerspective( 70 , self.width / float( self.height ) , 0.1 , 200 )\r\n # ~ View Direction Setup ~\r\n glMatrixMode( GL_MODELVIEW )\r\n glLoadIdentity()\r\n gluLookAt( *self.camera )", "def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)", "def _load_masks_3d(self, results):\n pts_instance_mask_path = results[\"ann_info\"][\"pts_instance_mask_path\"]\n\n if self.file_client is None:\n self.file_client = mmcv.FileClient(**self.file_client_args)\n try:\n mask_bytes = self.file_client.get(pts_instance_mask_path)\n pts_instance_mask = np.frombuffer(mask_bytes, dtype=np.int)\n except ConnectionError:\n mmcv.check_file_exist(pts_instance_mask_path)\n pts_instance_mask = np.fromfile(pts_instance_mask_path, dtype=np.long)\n\n results[\"pts_instance_mask\"] = pts_instance_mask\n results[\"pts_mask_fields\"].append(\"pts_instance_mask\")\n return results", "def main():\n # Load data and pre-process it\n path = \"data2.csv\"\n features, labels, df = load_data(path)\n\n # Learning rates - including our own\n learning_rates = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 0.75]\n\n # Num iterations\n num_iterations = [100 for i in range(9)] + [1000]\n\n # Keep track of all final weights for different learning rates\n lines = []\n\n # Compute weights for each learning rate\n for rate, num_iters in zip(learning_rates, num_iterations):\n\n # Get weights from gradient descent and add to weights list\n weights = gradient_descent(features, labels, rate, num_iters)\n lines.append([rate, num_iters] + weights)\n\n # Now write 'lines' to file\n with open('results2.csv', \"w\") as out_file:\n for line in lines:\n out_file.write(\"{}, {}, {}, {}, {} \\n\".format(line[0], line[1],\n line[2], line[3], line[4]))\n out_file.close()\n\n # Select which weights to use for plotting\n index = -1\n\n\n plot_db.visualize_3d(df, lin_reg_weights=lines[index][2:],\n feat1='norm_x1', feat2='norm_x2', labels='label',\n xlim=(-1, 1), ylim=(-1, 1), zlim=(0, 3),\n alpha=learning_rates[index], xlabel='age',\n ylabel='weight', zlabel='height',\n title='')", "def c3d(self):\n model = Sequential()\n # 1st layer group\n model.add(Conv3D(64, 3, 3, 3, activation='relu',\n border_mode='same', name='conv1',\n subsample=(1, 1, 1),\n input_shape=self.input_shape))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),\n border_mode='valid', name='pool1'))\n # 2nd layer group\n model.add(Conv3D(128, 3, 3, 3, activation='relu',\n border_mode='same', name='conv2',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool2'))\n # 3rd layer group\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool3'))\n # 4th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool4'))\n\n # 5th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5b',\n subsample=(1, 1, 1)))\n model.add(ZeroPadding3D(padding=(0, 1, 1)))\n # model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n # border_mode='valid', name='pool5', dim_ordering=\"tf\"))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='same', name='pool5', dim_ordering=\"tf\"))\n model.add(Flatten())\n\n # FC layers group\n model.add(Dense(4096, activation='relu', name='fc6'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu', name='fc7'))\n model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n for layer in model.layers:\n print(layer.output_shape)\n return model", "def load_params():\n file_name = filedialog.askopenfilename(\n filetypes=[(\"JSON\", \"*.json\")])\n if file_name:\n self.parent_class.classes[\"fractal\"].curve.load_from_file(\n file_name)\n self.parent_class.classes[\"fractal\"].curve.set_parent_parameters(\n )\n self.rules_frame_class.fill_entries_from_rules(\n self.parent_class.classes[\"fractal\"].rules)\n # fill the entries in rules input on load\n self.set_recursion_depth_entry(\n self.parent_class.classes[\"fractal\"].recursion_depth)\n self.set_base_length_entry(\n self.parent_class.classes[\"fractal\"].base_length)\n self.rules_frame_class.render_preview()", "def layer_show_3D(layers, width, accuracys, title, path):\n fig = plt.figure(dpi=120, figsize=(8, 6))\n ax = Axes3D(fig)\n fit = inp.interp2d(layers, width, accuracys)\n y_n = np.linspace(min(layers), max(layers), 5120)\n x_n = np.linspace(min(width), max(width), 5120)\n epoches_n = fit(y_n, x_n)\n surf = ax.plot_surface(y_n, x_n, epoches_n, cmap=cm.rainbow)\n # plt.title(title)\n ax.set_xlabel('layers number')\n ax.set_ylabel('kernel width')\n ax.set_zlabel('accuracy')\n fig.colorbar(surf, shrink=0.5, aspect=5)\n # plt.tight_layout()\n plt.savefig(path)", "def init_layers(self):\n\n # get caching layers activated\n caching_layers = G3WCachingLayer.objects.all()\n for caching_layer in caching_layers:\n self.add_layer(str(caching_layer), caching_layer)", "def __init__(self, file3dm=None):\n self._file3dm = file3dm or File3dm()\n\n # Loop over layers in the file3dm and add them as classattributes\n # with their full name\n for layer in self._file3dm.Layers:\n setattr(UmiLayers, layer.FullPath, layer)\n\n # Loop over predefined umi layers, add them (if they don't exist) and\n # set their color; default color is black (0,0,0,255) if not defined.\n for layer_name in UmiLayers._base_layers:\n layer = self.add_layer(layer_name)\n # Try Sets Layers as class attr\n layer.Color = UmiLayers._base_layers.get(layer.FullPath, (0, 0, 0, 255))[\n \"Color\"\n ]", "def test_visuThreeD1(self):\n\n visu_logic = slicer.modules.visuThreeDWidget.logic\n #visu_logic.set_user_table(self.user_table)\n #visu_logic.set_user_file('/work/maria5/EBDS_CIVILITY/DataShare/TestMatricesForVisualization/AAL78/PerNodeMetrics/Conte_EigenVectorCentrality_4Yr_AAL78Regions.csv')\n #visu_logic.set_user_file('/Users/Wieke/Documents/visuThreeD/neo-0042-4year_AvgSym_normFull.csv')\n # visu_logic.create_node_actors()\n # visu_logic.create_line_actors()\n # visu_logic.update()\n #visu_logic.set_node_range()", "def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial() \r\n # Add adjusted COM (RRA/CMC) model\r\n self.loadAdjustedModel()\r\n # Hide the markers from view\r\n self.hideModelMarkers()\r\n # Load CMC motion to model\r\n self.loadCMCMotion()", "def visualise_data_pca_3d_movie(self, component1, component2, component3, input_data=False):\n if input_data:\n self.__generate_input_data()\n pca_3d_movie(array(self.input_data), component1, component2, component3, self.class_indices, self.path,\n 'high_dimension_data', self.legend)\n else:\n self.__generate_output_data()\n pca_3d_movie(array(self.output_data), component1, component2, component3, self.class_indices, self.path,\n 'low_dimension_data', self.legend)", "def selected_dataset(train_ratio = 0.9, defect_ratio = 1, rescale = True, \\\r\n\tshuffle = True, set_defect_ratio = True, data_type = 'type1', used_pickle = True):\r\n\tpkl_file_root = 'selected_dataset'\r\n\tif used_pickle:\r\n\t\ttry:\r\n\t\t\timages = load_from_pickle('train_images', pkl_file_root)\r\n\t\t\tlabels = load_from_pickle('train_labels', pkl_file_root)\r\n\t\t\tindex = load_from_pickle('specific_indices', pkl_file_root)\r\n\t\t\tnd_img = load_from_pickle('not_defect_image_for_test', pkl_file_root)\r\n\t\texcept:\r\n\t\t\tprint('pickle file not exist, data prosessing..')\r\n\t\t\timages, labels, index, nd_img = selected_dataset(train_ratio, defect_ratio, rescale, shuffle, set_defect_ratio, data_type, False)\r\n\r\n\telse:\r\n\t\ttype1_img, type1_label, type2_img, type2_label = load_data_all()\r\n\t\tif data_type == 'type1':\r\n\t\t\ttype1_imgs, type1_labels = type1_makeup(type1_img, type1_label, v1 = 1000, v2 = 160, masking = True)\r\n\t\t\ttype1 = split_train_test(type1_imgs, type1_labels, train_ratio = train_ratio, shuffle = shuffle)\r\n\t\t\t# again\r\n\t\t\ttrain_img, train_label = type1_makeup(type1['train_img'], type1['train_label'], v1 = 100, masking = False)\r\n\t\t\ttest_img, test_label = type1_makeup(type1['test_img'], type1['test_label'], v1 = 100, masking = False)\r\n\t\t\t\r\n\t\telif data_type == 'type2':\r\n\t\t\ttype2_imgs, type2_labels = type2_makeup(type2_img, type2_label, v1 = 1250, v2 = 55, masking = True)\r\n\t\t\ttype2 = split_train_test(type2_imgs, type2_labels, train_ratio = train_ratio, shuffle = shuffle)\r\n\r\n\t\t\ttrain_img, train_label = type2_makeup(type2['train_img'], type2['train_label'], v1 = 100, masking = False)\r\n\t\t\ttest_img, test_label = type2_makeup(type2['test_img'], type2['test_label'], v1 = 100, masking = False)\r\n\r\n\t\telse:\r\n\t\t\traise ValueError('..')\r\n\r\n\t\t# find not defect surface in sub-set\r\n\t\tnd_img, nd_lb = find_contain_target(test_img, test_label, target = 'background')\r\n\r\n\t\tif set_defect_ratio:\r\n\t\t\t# if only_target was false, (background : defect)ratio set to same ratio or func's argument ratio.\r\n\t\t\tdefect_img, defect_label = find_contain_target(train_img, train_label, 'defect')\r\n\t\t\tn_defect = len(defect_img)\r\n\t\t\tuse_n_image = int(n_defect * defect_ratio)\r\n\t\t\t#print(' # of contain target, ', n_defect) \r\n\t\t\tback_img, back_label = find_contain_target(train_img, train_label, 'background')\r\n\t\t\tn_back_images = len(back_img)\r\n\r\n\t\t\tif n_back_images < (n_defect + use_n_image):\r\n\t\t\t\traise ValueError('ratio error')\r\n\r\n\t\t\trandom_index = np.random.choice((len(back_img)-use_n_image), 1)[0]\r\n\t\t\tback_img = back_img[random_index:random_index+use_n_image]\r\n\t\t\tback_label = back_label[random_index:random_index+use_n_image]\r\n\r\n\t\t\timages = np.concatenate((defect_img, back_img), axis = 0)\r\n\t\t\tlabels = np.concatenate((defect_label, back_label), axis = 0)\r\n\t\telse:\r\n\t\t\timages = train_img\r\n\t\t\tlabels = train_label\r\n\r\n\t\tconcat_images = np.concatenate((images, nd_img), axis = 0)\r\n\t\tconcat_labels = np.concatenate((labels, nd_lb), axis = 0)\r\n\r\n\t\timages, labels, index = shuffle_with_sameindex_img_label(concat_images, concat_labels, memorial_n = nd_img.shape[0]) \r\n\r\n\t\tif rescale:\r\n\t\t\timages = images / 255\r\n\t\t\tnd_img = nd_img / 255\r\n\t\timages = np.expand_dims(images, axis = 3)\r\n\t\tlabels = np.expand_dims(labels, axis = 3)\r\n\t\tnd_img = np.expand_dims(nd_img, axis = 3)\r\n\t\tprint(' >> images set shape : ', images.shape)\r\n\r\n\t\tarray_save_to_pickle(images, 'train_images', pkl_file_root)\r\n\t\tarray_save_to_pickle(labels, 'train_labels', pkl_file_root)\r\n\t\tarray_save_to_pickle(index, 'specific_indices', pkl_file_root)\r\n\t\tarray_save_to_pickle(nd_img, 'not_defect_image_for_test', pkl_file_root)\r\n\t\tnd_img = np.squeeze(nd_img, axis = -1)\r\n\t\tfor i in range(len(nd_img)):\r\n\t\t\tnd_img_name = 'nd_img' + str(i)\r\n\t\t\timg_save(nd_img[i], nd_img_name, rescale = True, mode = 'L', root = 'GAN_TEST_SAMPLE')\r\n\r\n\treturn images, labels, index, nd_img", "def load_slices(self, dataset_dir, n_images, n_patches, channels = [\"base\"]):\n \n # add classes to be trained on\n \n self.add_class(\"slices\", 1, \"tissue\")\n self.add_class(\"slices\", 2, \"mag\")\n \n # collect image list and initialize counter\n \n image_list = os.listdir(dataset_dir)\n image_counter = 0\n patch_counter = 0\n \n # cycle over images and save patches to database.\n \n for i in range(n_images):\n \n image_path = os.path.join(dataset_dir,image_list[i])\n patch_list = os.listdir(image_path)\n \n print(f\"processing: image {i}\") \n \n for j in range(n_patches):\n \n patch_path = os.path.join(image_path, patch_list[j])\n \n patch_image_path = os.path.join(patch_path,\"images\")\n \n file_list = os.listdir(patch_image_path)\n \n image_file_path = os.path.join(patch_image_path,file_list[0])\n \n image = skimage.io.imread(image_file_path)\n \n height, width = image.shape\n \n self.add_image(\n \"slices\",\n image_id = patch_counter,\n path = patch_path,\n width = width, height = height,\n channels = channels,\n )\n patch_counter += 1", "def get_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=2, n_caption=2, video2frames=None, video2frames_target=None, visual_feats_target=None, caption_file_target=None, multi_flag=0):\n if video2frames_target!=None and visual_feats_target!=None:\n if multi_flag == 0:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n else:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target, visual_feat_source2=visual_feats['train2'], video2frames_source2=video2frames['train2'], caption_file_source2=cap_files['train2']),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n\n\n\n else:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train']),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n\n data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x],\n batch_size=batch_size,\n shuffle=(x=='train'),\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate_frame_gru_fn)\n for x in ['train', 'val', 'test']}\n return data_loaders", "def load_defects(self, val_dir):\n \n img_list_1 = os.listdir(val_dir+'/'+'1')\n img_list_2 = os.listdir(val_dir+'/'+'2')\n img_list_3 = os.listdir(val_dir+'/'+'3')\n img_list_4 = os.listdir(val_dir+'/'+'4')\n\n\n\n img_list_1 = self.make_imgs_list(val_dir + '/' + '1', img_list_1)\n img_list_2 = self.make_imgs_list(val_dir + '/' + '2', img_list_2)\n img_list_3 = self.make_imgs_list(val_dir + '/' + '3', img_list_3)\n img_list_4 = self.make_imgs_list(val_dir + '/' + '4', img_list_4)\n\n\n img_list_1 = self.load_imgsLabels(img_list_1)\n img_list_2 = self.load_imgsLabels(img_list_2)\n img_list_3 = self.load_imgsLabels(img_list_3)\n img_list_4 = self.load_imgsLabels(img_list_4)\n\n\n img_list_1 = self.features_to_np_array(img_list_1)\n img_list_2 = self.features_to_np_array(img_list_2)\n img_list_3 = self.features_to_np_array(img_list_3)\n img_list_4 = self.features_to_np_array(img_list_4)\n\n lbl_list_1 = img_list_1.shape[0]*[1]\n lbl_list_2 = img_list_2.shape[0]*[2]\n lbl_list_3 = img_list_3.shape[0]*[3]\n lbl_list_4 = img_list_4.shape[0]*[4]\n\n\n imgs = np.concatenate((img_list_1, img_list_2, img_list_3, img_list_4))\n lbls = lbl_list_1 + lbl_list_2 + lbl_list_3 + lbl_list_4\n\n\n lbls = np.array(lbls)\n \n lbls = lbls - 1\n \n lbls = to_categorical(lbls)\n \n return imgs, lbls", "def load_resource():\n resource_file = 'curves/' + self.combo_box[\"class\"].get() + '.json'\n self.classes[\"fractal\"].curve.load_from_resource(\n resource_file)\n self.classes[\"fractal\"].curve.set_parent_parameters()\n self.classes[\"parameters\"].rules_frame_class.fill_entries_from_rules(\n self.classes[\"fractal\"].rules)\n self.classes[\"parameters\"].set_base_length_entry(\n self.classes[\"fractal\"].base_length)\n self.classes[\"parameters\"].rules_frame_class.render_preview()", "def _load_metadata(self):\n\n cub_dir = self.root / \"CUB_200_2011\"\n images_list: Dict[int, List] = OrderedDict()\n\n with open(str(cub_dir / \"train_test_split.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n is_train_instance = int(row[1]) == 1\n if is_train_instance == self.train:\n images_list[img_id] = []\n\n with open(str(cub_dir / \"images.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n images_list[img_id].append(row[1])\n\n with open(str(cub_dir / \"image_class_labels.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n # CUB starts counting classes from 1 ...\n images_list[img_id].append(int(row[1]) - 1)\n\n with open(str(cub_dir / \"bounding_boxes.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n box_cub = [int(float(x)) for x in row[1:]]\n box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]\n # PathsDataset accepts (top, left, height, width)\n images_list[img_id].append(box_avl)\n\n images_tuples = []\n for _, img_tuple in images_list.items():\n images_tuples.append(tuple(img_tuple))\n self._images = images_tuples # type: ignore\n\n # Integrity check\n for row_check in self._images:\n filepath = self.root / CUB200.images_folder / row_check[0]\n if not filepath.is_file():\n if self.verbose:\n print(\"[CUB200] Error checking integrity of:\", filepath)\n return False\n\n return True", "def load_pretrained_layers(self):\n # Current state of base\n state_dict = self.state_dict()\n param_names = list(state_dict.keys())\n\n # Pretrained VGG base\n pretrained_state_dict = torchvision.models.vgg16(pretrained=True).state_dict()\n pretrained_param_names = list(pretrained_state_dict.keys())\n\n # Transfer conv. parameters from pretrained model to current model\n for i, param in enumerate(param_names[:-4]): # excluding conv6 and conv7 parameters\n state_dict[param] = pretrained_state_dict[pretrained_param_names[i]]\n\n # Convert fc6, fc7 to convolutional layers, and subsample (by decimation) to sizes of conv6 and conv7\n # fc6\n conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view(4096, 512, 7, 7) # (4096, 512, 7, 7)\n conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] # (4096)\n state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) # (1024, 512, 3, 3)\n state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) # (1024)\n # fc7\n conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view(4096, 4096, 1, 1) # (4096, 4096, 1, 1)\n conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] # (4096)\n state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) # (1024, 1024, 1, 1)\n state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) # (1024)\n\n # Note: an FC layer of size (K) operating on a flattened version (C*H*W) of a 2D image of size (C, H, W)...\n # ...is equivalent to a convolutional layer with kernel size (H, W), input channels C, output channels K...\n # ...operating on the 2D image of size (C, H, W) without padding\n\n self.load_state_dict(state_dict)\n\n print(\"\\nLoaded base model.\\n\")", "def show_current_pair_by_3d_slice(iS,iT):\n import matplotlib.pyplot as plt\n import easyreg.viewers as viewers\n fig, ax = plt.subplots(2,3)\n plt.setp(plt.gcf(), 'facecolor', 'white')\n plt.style.use('bmh')\n\n ivsx = viewers.ImageViewer3D_Sliced(ax[0][0], iS, 0, 'source X', True)\n ivsy = viewers.ImageViewer3D_Sliced(ax[0][1], iS, 1, 'source Y', True)\n ivsz = viewers.ImageViewer3D_Sliced(ax[0][2], iS, 2, 'source Z', True)\n\n ivtx = viewers.ImageViewer3D_Sliced(ax[1][0], iT, 0, 'target X', True)\n ivty = viewers.ImageViewer3D_Sliced(ax[1][1], iT, 1, 'target Y', True)\n ivtz = viewers.ImageViewer3D_Sliced(ax[1][2], iT, 2, 'target Z', True)\n\n\n feh = viewers.FigureEventHandler(fig)\n feh.add_axes_event('button_press_event', ax[0][0], ivsx.on_mouse_press, ivsx.get_synchronize, ivsx.set_synchronize)\n feh.add_axes_event('button_press_event', ax[0][1], ivsy.on_mouse_press, ivsy.get_synchronize, ivsy.set_synchronize)\n feh.add_axes_event('button_press_event', ax[0][2], ivsz.on_mouse_press, ivsz.get_synchronize, ivsz.set_synchronize)\n\n feh.add_axes_event('button_press_event', ax[1][0], ivtx.on_mouse_press, ivtx.get_synchronize, ivtx.set_synchronize)\n feh.add_axes_event('button_press_event', ax[1][1], ivty.on_mouse_press, ivty.get_synchronize, ivty.set_synchronize)\n feh.add_axes_event('button_press_event', ax[1][2], ivtz.on_mouse_press, ivtz.get_synchronize, ivtz.set_synchronize)\n\n feh.synchronize([ax[0][0], ax[1][0]])\n feh.synchronize([ax[0][1], ax[1][1]])\n feh.synchronize([ax[0][2], ax[1][2]])", "def force_load(self):\n for selection in self.selections.normal_values():\n selection.force_load()", "def load_experiment(file_name: str):\n exp = Experiment2P()\n # initialize the lazy-load objects with empty lists\n exp.tail_data = []\n exp.replaced_tail_frames = []\n exp.laser_data = []\n exp.all_c = []\n exp.all_dff = []\n exp.func_stacks = []\n with h5py.File(file_name, 'r') as dfile:\n exp.version = dfile[\"version\"][()] # in future allows for version specific loading\n try:\n if exp.version == b\"unstable\" or exp.version == \"unstable\":\n warnings.warn(\"Experiment file was created with development version of analysis code. Trying to \"\n \"load as version 1\")\n elif int(exp.version) > 2:\n raise IOError(f\"File version {exp.version} is larger than highest recognized version '2'\")\n except ValueError:\n raise IOError(f\"File version {exp.version} not recognized\")\n # load general experiment data\n n_planes = dfile[\"n_planes\"][()] # inferrred property of class but used here for loading plane data\n exp.experiment_name = dfile[\"experiment_name\"][()]\n exp.original_path = dfile[\"original_path\"][()]\n exp.scope_name = dfile[\"scope_name\"][()]\n exp.comment = dfile[\"comment\"][()]\n exp.tail_frame_rate = dfile[\"tail_frame_rate\"][()]\n # load singular parameter dictionary\n exp.info_data = exp._load_dictionary(\"info_data\", dfile)\n # load tail-data modification flag if this is version 2\n if int(exp.version) > 1:\n exp.tail_data_augmented = dfile[\"tail_data_augmented\"][()]\n # load per-plane data\n for i in range(n_planes):\n plane_group = dfile[str(i)]\n exp.scanner_data.append(exp._load_dictionary(\"scanner_data\", plane_group))\n exp.tail_data.append(plane_group[\"tail_data\"][()])\n exp.projections.append(plane_group[\"projection\"][()])\n if \"func_stack\" in plane_group:\n exp.func_stacks.append(plane_group[\"func_stack\"][()])\n if \"anat_projection\" in plane_group: # test if this experiment was dual-channel\n exp.anat_projections.append(plane_group[\"anat_projection\"][()])\n if \"tail_data\" in plane_group: # test if this experiment had tail data (for all planes)\n exp.tail_data.append(plane_group[\"tail_data\"][()])\n exp.bout_data.append(plane_group[\"bout_data\"][()])\n exp.tail_frame_times.append(plane_group[\"tail_frame_time\"][()])\n if int(exp.version) > 1 and \"replaced_tail_frames\" in plane_group:\n exp.replaced_tail_frames.append(plane_group[\"replaced_tail_frames\"][()])\n if \"laser_data\" in plane_group: # test if this experiment had laser data\n exp.laser_data.append(plane_group[\"laser_data\"][()])\n exp.all_c.append(plane_group[\"C\"][()])\n exp.all_dff.append(plane_group[\"dff\"][()])\n exp.all_centroids.append(plane_group[\"centroids\"][()])\n exp.all_sizes.append(plane_group[\"sizes\"][()])\n exp.all_spatial.append(plane_group[\"spatial\"][()])\n ps = plane_group[\"mcorr_dict\"][()]\n exp.mcorr_dicts.append(json.loads(ps))\n ps = plane_group[\"cnmf_extract_dict\"][()]\n exp.cnmf_extract_dicts.append(json.loads(ps))\n ps = plane_group[\"cnmf_val_dict\"][()]\n exp.cnmf_val_dicts.append(json.loads(ps))\n exp.populated = True\n return exp", "def get_3d_valid(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_valid['3d'][:, to_select, :][:, to_sort, :]", "def compute(self):\n self.find_n()\n\n # call hotspot field plots\n for scenario in self.scenarios:\n fields_dict = {}\n ancestor_files = []\n for filename in io.get_all_ancestor_files(self.cfg,\n pattern='hotspot_*.nc'):\n key = os.path.basename(os.path.dirname(filename))\n splitname = os.path.basename(filename).split(\"_\")\n if key.split(\"_\")[-1] == scenario:\n fields_dict[(\n f\"{splitname[-1].split('.nc')[0]}_\"\n f\"{splitname[1]}_{key}\")] = iris.load_cube(filename)\n ancestor_files.append(filename)\n fields_dict[\"scenario\"] = scenario\n fields_dict[\"ancestors\"] = ancestor_files\n self.hotspot_fields_plot(fields_dict)\n\n # call scatter plots\n for season in self.seasons:\n timeseries_dict = {\"large_scale\": {}, \"regional\": {}}\n for region, value in timeseries_dict.items():\n for filename in io.get_all_ancestor_files(\n self.cfg,\n pattern=f'rolling_mean_{region}_{season}.nc'):\n value[os.path.basename(os.path.dirname(filename))] = (\n iris.load_cube(filename))\n value[os.path.basename(\n os.path.dirname(filename))] = (filename)\n for var_combination in self.var_combinations:\n self.timeseries_scatter_plot(deepcopy(timeseries_dict), season,\n var_combination)", "def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes", "def load_classes(self):\n super().load_classes()\n from scipy.io import loadmat\n mat_path = os.path.join(self.input_data_path, \"index_ade20k.mat\")\n object_names = loadmat(mat_path)['index']['objectnames'][0][0][0]\n self.ADE20K_CLASSES = {}\n self.ADE20K_CLASSES_reverse = {}\n for i in range(len(object_names)):\n self.ADE20K_CLASSES[object_names[i][0]] = i+1\n self.ADE20K_CLASSES_reverse[i+1] = object_names[i][0]\n if i == 3:\n logger.debug(f\"ADE20K_CLASSES: {self.ADE20K_CLASSES}\")\n logger.debug(f\"ADE20K_CLASSES_reverse: {self.ADE20K_CLASSES_reverse}\")", "def load_data():\n\n print('Loading and Visualizing Data ...')\n\n file_name = path.join(getcwd(), 'ex3', 'src', 'data', 'ex3data1')\n data = scipy.io.loadmat(file_name)\n\n # training data stored in arrays X, y\n # y should be a row vector of labels\n return data['X'], data['y'].T[0]", "def visualize_3d(grbdir,x, y, z, t, thetax, thetay, name):\n # Set ax.azim and ax.elev to ra, dec\n global runconf\n\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n plt.suptitle(r\"Visualisation of {name} in 3d:$\\theta_x$={tx:0.1f},$\\theta_y$={ty:0.1f}\".format(name=name, tx=thetax, ty=thetay))\n # Z\n ax = plt.subplot(2, 2, 1, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = z.ra.deg\n ax.elev = z.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI pointing (z)\")\n\n # Transient\n ax = plt.subplot(2, 2, 2, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = t.ra.deg\n ax.elev = t.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from nominal \\n transient direction\")\n\n # X\n ax = plt.subplot(2, 2, 3, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = x.ra.deg\n ax.elev = x.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI X axis\")\n\n # Z\n ax = plt.subplot(2, 2, 4, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = y.ra.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI Y axis\")\n\n return", "def but_load_net(self):\n a = tk.filedialog.askopenfilename(filetypes = [('NN file',['*.csv','*.pt'])])\n self.nn_obj=load_nn(a)", "def load_class_avg(self, mrcs, factor):\n \n global shape\n \n projection_2D = {}\n extract_2D = {}\n \n if len(factor) == 0: # Empty entry, set factor 1\n factor = 1\n\n with mrcfile.open(mrcs) as mrc:\n for i, data in enumerate(mrc.data):\n projection_2D[i] = data\n mrc.close()\n \n shape = transform.rotate(projection_2D[0].copy(), 45, resize=True).shape[0]\n\n for k, avg in projection_2D.items():\n if factor == 1:\n extract_2D[k] = extract_class_avg(avg)\n else:\n scaled_img = transform.rescale(\n avg, \n scale=(1/float(factor)), \n anti_aliasing=True, \n multichannel=False, # Add to supress warning\n mode='constant' # Add to supress warning\n ) \n extract_2D[k] = extract_class_avg(scaled_img)\n\n return projection_2D, extract_2D", "def plot_3d(results_list): \n x_range = range(len(results_list[0]))\n fig = plt.figure()\n axe = Axes3D(fig)\n\n for idx, result in enumerate(results_list):\n axe.plot(x_range, result, idx)\n plt.show()", "def loadSuitModelsAndAnims(level, flag = 0):\n # print \"print loading level %d suits...\" % level\n \n for key in ModelDict.keys():\n # load/unload the models\n # All the mods are in 3.5 now, except the suita and B headsd which are in 4\n model, phase = ModelDict[key]\n headModel, headPhase = ModelDict[key]\n if flag:\n loader.loadModelNode(\"phase_3.5\" + model + \"mod\")\n loader.loadModelNode(\"phase_\" + str(headPhase) + headModel + \"heads\")\n else:\n loader.unloadModel(\"phase_3.5\" + model + \"mod\")\n loader.unloadModel(\"phase_\" + str(headPhase) + headModel + \"heads\")", "def depthFaceSelect(self, triangleSelected,depth, materials):\t\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\t\n\t\tlayerMaterial = list()\n\t\tself.volumeComposition = list()\n\t\tfor l in range(len(materials)):\n\t\t\tlayerMaterial.append(list())\n\t\t\tself.volumeComposition.append(list())\n\t\t\n\t\tvolumeGeneral = list()\n\t\tself.boolLayers = []\n\t\t\n\t\tfor i in self.slicePoints:\n\t\t\tboolResult2 = self.voxel_slice(i, self.vertices, self.triangles, self.res, self.llc, self.sliceProto, 2)\n\t\t\tprint boolResult2.shape\n\t\t\ttupleResultR = numpy.zeros(boolResult2.shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(boolResult2.shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(boolResult2.shape, dtype=uint8)\n\t\t\ttupleMaterial = list()\n\t\t\tfor l in range(len(materials)):\n\t\t\t\ttupleMaterial.append(numpy.zeros(boolResult2.shape, dtype=float))\n\t\t\t#tupleMaterial = numpy.zeros(boolResult2.shape, dtype=f)\n\t\t\t#lines=self.findSelectedContour(self.vertices,triangleSelected,i ,numpy.array([0,0,1]))\n\t\t\t#boolResult1 = self.findVoxelOfSelectedContour(i, lines, self.res, self.llc, self.sliceProto, depth)\n\t\t\tj = numpy.nditer(boolResult2, flags=['multi_index'], op_flags=['readwrite'])\n\n\t\t\twhile not j.finished:\t\n\t\t\t\tprint type(j.multi_index)\n\t\t\t\tprint j.multi_index\n\t\t\t\tif j[0] == True:\n\t\t\t\t\ttupleResultB[j.multi_index] = materials[0][0]\n\t\t\t\t\ttupleResultG[j.multi_index] = materials[0][1]\n\t\t\t\t\ttupleResultR[j.multi_index] = materials[0][2]\n\t\t\t\t\ttupleMaterial[0][j.multi_index] = 1.0 \n\t\t\t\telse:\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\tj.iternext()\t\t\t\t\n\t\t\t\t\t\n\t\t\tfor k in range(len(triangleSelected)):\n\t\t\t\tboolResult1 = self.findVoxelOfSelectedContour(i, self.vertices, triangleSelected[k], self.res, self.llc, self.sliceProto, depth[k])\n\t\t\t\tboolResult = numpy.logical_and(boolResult1, boolResult2)\n\t\t\t\tprint boolResult.shape\n\n\t\t\t\tj = numpy.nditer(boolResult2, flags=['multi_index'], op_flags=['readwrite'])\n\t\t\t\twhile not j.finished:\n\t\t\t\t\tif j[0] == True:\n\t\t\t\t\t\tif boolResult[j.multi_index] == True:\n\t\t\t\t\t\t\ttupleResultB[j.multi_index] = materials[k + 1][0]\n\t\t\t\t\t\t\ttupleResultG[j.multi_index] = materials[k + 1][1]\n\t\t\t\t\t\t\ttupleResultR[j.multi_index] = materials[k + 1][2]\n\t\t\t\t\t\t\ttupleMaterial[k + 1][j.multi_index] = 1.0 \n\t\t\t\t\t\t\ttupleMaterial[0][j.multi_index] = 0.0\n\t\t\t\t\t\t#else:\n\t\t\t\t\t\t#\ttupleResultB[j.multi_index] = 255\n\t\t\t\t\t\t#\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\t\t#\ttupleResultR[j.multi_index] = 0\t\t\t\t\t\n\t\t\t\t\tj.iternext()\n\t\t\tself.boolLayers.append(boolResult2)\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\t\tfor l in range(len(materials)):\n\t\t\t\tlayerMaterial[l].append(tupleMaterial[l])\n\t\t\t\t\n\t\tprint \"i got here\"\n\t\tself.volumeR=numpy.array(layersR) # create the 3d volume\n\t\tself.volumeG=numpy.array(layersG) \n\t\tself.volumeB=numpy.array(layersB)\n\t\t\n\t\tfor l in range(len(materials)):\n\t\t\tself.volumeComposition[l] = numpy.array(layerMaterial[l])\n\t\tvolumeGeneral.append(self.volumeR)\n\t\tvolumeGeneral.append(self.volumeG)\n\t\tvolumeGeneral.append(self.volumeB)\n\t\t\n\t\treturn volumeGeneral", "def _inception_v3(*args, **kwargs):\n try:\n version = tuple(map(int, torchvision.__version__.split('.')[:2]))\n except ValueError:\n version = 0,\n if version >= (0, 6):\n kwargs['init_weights'] = False\n return torchvision.models.inception_v3(*args, **kwargs)", "def __init__(self, filepath='multidrcl', suffix='DRCL', extension='.IMG', lblext='.LBL', force_read=True, unit='s', feature='sh', eye='L', do_print=True, initdatadir=None, initdata=None, readintuple=None):\n\n Dataset.__init__(self, None, \"mastcam\")\n\n if readintuple != None:\n (self.data, self.fullimages, self.segmentation, self.labels, self.xlabel, self.ylabel, self.xvals, self.rgbdict, self.lblext) = readintuple[0:9]\n if initdata != None:\n self.initdata = initdata\n if self.initfilename != None:\n self.initfilename = initarchive\n else:\n self.initfilename = 'param'\n return\n \n if do_print: print(filepath)\n \n if filepath == '388':\n filepath = '/proj/imbue/data/msl-mastcam/sol388/'\n \n if filepath == 'multidrcl':\n filepath = '/proj/imbue/data/msl-mastcam/multispectral_drcl/'\n \n self.filepath = filepath\n self.xlabel = 'TBD'\n self.ylabel = 'TBD'\n \n #dirname = filepath[:-1]\n #subsetname = dirname.split('/')[-1]\n subsetname = os.path.basename(filepath)\n self.name += \"-\" + subsetname\n if len(suffix) > 0:\n self.name += \"-\" + eye + '-' + suffix + '-' + unit + '-' + feature\n if do_print: print(\"Dataset name: \" + self.name)\n \n self.data = []\n self.cadence = []\n \n self.unit = unit\n self.feature = feature\n self.eye = eye\n\n self.rgbdict = {}\n self.extension = extension\n self.lblext = lblext\n self.suffix = suffix\n \n self.archive = os.path.join(filepath,\n subsetname + eye + \"_\" + suffix + '_' + unit + '_' + feature + \".pkl\")\n\n if initdata != None:\n self.initdata = initdata\n if self.initfilename != None:\n self.initfilename = initarchive\n else:\n self.initfilename = 'param'\n elif initdatadir != None:\n print(\"Reading in initialization data...\")\n #initsubsetname = initdatadir[:-1].split('/')[-1]\n initsubsetname = os.path.basename(initdatadir)\n initarchive = os.path.join(initdatadir,\n initsubsetname + eye + \"_\" + suffix + '_' + unit + '_' + feature + \".pkl\")\n if os.path.exists(initarchive):\n with open(initarchive, 'r') as f:\n self.initdata = pickle.load(f)[0]\n self.initfilename = initarchive\n print(\"...done!\")\n print(\"initdata.shape:\", self.initdata.shape)\n else:\n print(\"...initialization data does not exist!\")\n print(\"Desired pickle was: %s\" % initarchive)\n \n # Determine if we need to preprocess the data\n if (not os.path.exists(self.archive)) or force_read:\n self.read_mastcam_dir(filepath, suffix, unit, feature, extension, lblext, eye)\n else:\n if do_print: print(\"Found pickle at \" + self.archive)\n \n self.readin()", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def __init__(self, image_dir, instances_json, stuff_json=None,\n stuff_only=True, image_size=(64, 64), mask_size=32, normalize_images=True, max_samples=None,\n include_relationships=True, min_object_size=0.02, min_objects=3, max_objects=8,\n include_other=False, instance_whitelist=None, stuff_whitelist=None, learned_transitivity=False,\n include_dummies=True, use_transitivity=False, use_converse=False, learned_symmetry=False,\n learned_converse=False):\n super(CocoSceneGraphDataset, self).__init__()\n self.use_converse = use_converse\n self.learned_transitivity = learned_transitivity\n self.learned_symmetry = learned_symmetry\n self.learned_converse = learned_converse\n self.include_dummies = include_dummies\n self.image_dir = image_dir\n # self.mask_size = image_size[0]\n self.mask_size = mask_size\n self.masks = True\n if self.mask_size == 0:\n self.masks = False\n self.mask_size = 32\n\n self.max_samples = max_samples\n self.normalize_images = normalize_images\n self.include_relationships = include_relationships\n self.set_image_size(image_size)\n self.use_transitivity = use_transitivity\n\n with open(instances_json, 'r') as f:\n instances_data = json.load(f)\n\n with open(stuff_json, 'r') as f:\n stuff_data = json.load(f)\n\n self.image_ids = []\n self.image_id_to_filename = {}\n self.image_id_to_size = {}\n for image_data in instances_data['images']:\n image_id = image_data['id']\n filename = image_data['file_name']\n width = image_data['width']\n height = image_data['height']\n self.image_ids.append(image_id)\n self.image_id_to_filename[image_id] = filename\n self.image_id_to_size[image_id] = (width, height)\n\n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n object_idx_to_name = {}\n all_instance_categories = []\n for category_data in instances_data['categories']:\n category_id = category_data['id']\n category_name = category_data['name']\n all_instance_categories.append(category_name)\n object_idx_to_name[category_id] = category_name\n self.vocab['object_name_to_idx'][category_name] = category_id\n all_stuff_categories = []\n\n for category_data in stuff_data['categories']:\n category_name = category_data['name']\n category_id = category_data['id']\n all_stuff_categories.append(category_name)\n object_idx_to_name[category_id] = category_name\n self.vocab['object_name_to_idx'][category_name] = category_id\n\n if instance_whitelist is None:\n instance_whitelist = all_instance_categories\n if stuff_whitelist is None:\n stuff_whitelist = all_stuff_categories\n category_whitelist = set(instance_whitelist) | set(stuff_whitelist)\n\n # Add object data from instances\n self.image_id_to_objects = defaultdict(list)\n for object_data in instances_data['annotations']:\n image_id = object_data['image_id']\n _, _, w, h = object_data['bbox']\n W, H = self.image_id_to_size[image_id]\n box_area = (w * h) / (W * H)\n box_ok = box_area > min_object_size\n object_name = object_idx_to_name[object_data['category_id']]\n category_ok = object_name in category_whitelist\n other_ok = object_name != 'other' or include_other\n if box_ok and category_ok and other_ok:\n self.image_id_to_objects[image_id].append(object_data)\n\n # Add object data from stuff\n image_ids_with_stuff = set()\n for object_data in stuff_data['annotations']:\n image_id = object_data['image_id']\n image_ids_with_stuff.add(image_id)\n _, _, w, h = object_data['bbox']\n W, H = self.image_id_to_size[image_id]\n box_area = (w * h) / (W * H)\n box_ok = box_area > min_object_size\n object_name = object_idx_to_name[object_data['category_id']]\n category_ok = object_name in category_whitelist\n other_ok = object_name != 'other' or include_other\n if box_ok and category_ok and other_ok:\n self.image_id_to_objects[image_id].append(object_data)\n\n new_image_ids = []\n for image_id in self.image_ids:\n if image_id in image_ids_with_stuff:\n new_image_ids.append(image_id)\n self.image_ids = new_image_ids\n\n all_image_ids = set(self.image_id_to_filename.keys())\n image_ids_to_remove = all_image_ids - image_ids_with_stuff\n for image_id in image_ids_to_remove:\n self.image_id_to_filename.pop(image_id, None)\n self.image_id_to_size.pop(image_id, None)\n self.image_id_to_objects.pop(image_id, None)\n\n # COCO category labels start at 1, so use 0 for __image__\n self.vocab['object_name_to_idx']['__image__'] = 0\n\n # Build object_idx_to_name\n name_to_idx = self.vocab['object_name_to_idx']\n assert len(name_to_idx) == len(set(name_to_idx.values()))\n max_object_idx = max(name_to_idx.values())\n idx_to_name = ['NONE'] * (1 + max_object_idx)\n for name, idx in self.vocab['object_name_to_idx'].items():\n idx_to_name[idx] = name\n self.vocab['object_idx_to_name'] = idx_to_name\n\n # Prune images that have too few or too many objects\n new_image_ids = []\n total_objs = 0\n for image_id in self.image_ids:\n num_objs = len(self.image_id_to_objects[image_id])\n total_objs += num_objs\n if min_objects <= num_objs <= max_objects:\n new_image_ids.append(image_id)\n self.image_ids = new_image_ids\n\n self.register_augmented_relations()\n\n self.vocab[\"attributes\"] = {}\n self.vocab[\"attributes\"]['objects'] = self.vocab['object_name_to_idx']\n self.vocab[\"reverse_attributes\"] = {}\n for attr in self.vocab[\"attributes\"].keys():\n self.vocab[\"reverse_attributes\"][attr] = {v: k for k, v in self.vocab[\"attributes\"][attr].items()}", "def train():\n\n # Load camera parameters\n rcams = cameras.load_cameras()\n\n # Load 3d data and 2d projections\n full_train_set_3d, full_test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d =\\\n data_utils.read_3d_data( FLAGS.camera_frame, rcams, FLAGS.origin_bc, FLAGS.augment_data,\n FLAGS.procrustes, FLAGS.lowpass )\n \n # Read stacked hourglass 2D predictions\n full_train_set_2d, full_test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = \\\n data_utils.read_2d_predictions( FLAGS.origin_bc, FLAGS.augment_data )\n \n print(\"\\n[+] done reading and normalizing data\")\n # Getting the number of training and test subjects\n tr_subj = 0\n for v in full_train_set_3d.values():\n tr_subj += v.shape[0]\n te_subj = 0\n for v in full_test_set_3d.values():\n te_subj += v.shape[0]\n print(\"{0} training subjects, {1} test subjects\".format(tr_subj, te_subj))\n print(dim_to_use_2d)\n print(dim_to_use_3d)\n # Un-normalizing data for visualizations\n unNorm_ftrs2d = data_utils.unNormalize_dic(full_train_set_2d, data_mean_2d, data_std_2d, dim_to_use_2d)\n unNorm_ftrs3d = data_utils.unNormalize_dic(full_train_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n unNorm_ftes3d = data_utils.unNormalize_dic(full_test_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n # Visualize the data\n viz.visualize_train_sample(unNorm_ftrs2d, unNorm_ftrs3d, FLAGS.camera_frame)\n viz.visualize_files_oneatatime(unNorm_ftrs3d, unNorm_ftes3d)\n\n # Getting only the dimensions to use (get rid of body coxas, other limb, antennas, abdomen\n train_set_3d, train_set_2d, test_set_3d, test_set_2d = {}, {}, {}, {}\n for k in full_train_set_3d:\n (f, c) = k\n train_set_3d[k] = full_train_set_3d[k][:, dim_to_use_3d]\n train_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_train_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n for k in full_test_set_3d:\n (f, c) = k\n test_set_3d[k] = full_test_set_3d[k][:, dim_to_use_3d]\n test_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_test_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n \n print(\"3D data mean:\")\n print(data_mean_3d)\n print(\"3D data std:\")\n print(data_std_3d)\n\n print(\"2D data mean:\")\n print(data_mean_2d)\n print(\"2D data std:\")\n print(data_std_2d)\n \n input(\"Press Enter to continue...\")\n\n # Avoid using the GPU if requested\n device_count = {\"GPU\": 0} if FLAGS.use_cpu else {\"GPU\": 1}\n with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(\n device_count=device_count,\n allow_soft_placement=True )) as sess:\n\n # === Create the model ===\n print(\"[*] creating %d bi-layers of %d units.\" % (FLAGS.num_layers, FLAGS.linear_size))\n model = create_model( sess, FLAGS.batch_size )\n model.train_writer.add_graph( sess.graph )\n print(\"[+] model created\")\n \n #=== This is the training loop ===\n step_time, loss, val_loss = 0.0, 0.0, 0.0\n current_step = 0 if FLAGS.load <= 0 else FLAGS.load + 1\n previous_losses = []\n\n step_time, loss = 0, 0\n current_epoch = 0\n log_every_n_batches = 100\n losses, errors, joint_errors = [], [], []\n for _ in range( FLAGS.epochs ):\n current_epoch = current_epoch + 1\n\n # === Load training batches for one epoch ===\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( train_set_2d, train_set_3d, FLAGS.camera_frame, training=True )\n nbatches = len( encoder_inputs )\n print(\"[*] there are {0} train batches\".format( nbatches ))\n start_time, loss = time.time(), 0.\n # === Loop through all the training batches ===\n for i in range( nbatches ):\n\n if (i+1) % log_every_n_batches == 0:\n # Print progress every log_every_n_batches batches\n print(\"Working on epoch {0}, batch {1} / {2}...\".format( current_epoch, i+1, nbatches),end=\"\" )\n\n enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]\n step_loss, loss_summary, lr_summary, _ =\\\n model.step( sess, enc_in, dec_out, FLAGS.dropout, isTraining=True )\n\n if (i+1) % log_every_n_batches == 0:\n # Log and print progress every log_every_n_batches batchespixels = pixels / pixels[2,:]\n model.train_writer.add_summary( loss_summary, current_step )\n model.train_writer.add_summary( lr_summary, current_step )\n step_time = (time.time() - start_time)\n start_time = time.time()\n print(\"done in {0:.2f} ms\".format( 1000*step_time / log_every_n_batches ) )\n\n loss += step_loss\n current_step += 1\n # === end looping through training batches ===\n\n loss = loss / nbatches\n losses.append(loss)\n print(\"=============================\\n\"\n \"Global step: %d\\n\"\n \"Learning rate: %.2e\\n\"\n \"Train loss avg: %.4f\\n\"\n \"=============================\" % (model.global_step.eval(),\n model.learning_rate.eval(), loss) )\n # === End training for an epoch ===\n\n # === Testing after this epoch ===\n isTraining = False\n \n n_joints = len(data_utils.DIMENSIONS_TO_USE)\n if FLAGS.origin_bc:\n n_joints -= len(data_utils.ROOT_POSITIONS)\n\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( test_set_2d, test_set_3d, FLAGS.camera_frame, training=False)\n\n total_err, coordwise_err, joint_err, step_time, loss = evaluate_batches( sess, model,\n data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,\n data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,\n current_step, encoder_inputs, decoder_outputs, current_epoch )\n\n print(\"=============================\\n\"\n \"Step-time (ms): %.4f\\n\"\n \"Val loss avg: %.4f\\n\"\n \"Val error avg (mm): %.2f (%.2f, %.2f, %.2f)\\n\"\n \"=============================\" % ( 1000*step_time, loss, total_err,\n coordwise_err[0], coordwise_err[1], coordwise_err[2] ))\n\n for i in range(n_joints):\n # 6 spaces, right-aligned, 5 decimal places\n print(\"Error in joint {0:02d} (mm): {1:>5.2f}\".format(i+1, joint_err[i]))\n print(\"=============================\")\n errors.append(coordwise_err)\n joint_errors.append(joint_err)\n # Log the error to tensorboard\n summaries = sess.run( model.err_mm_summary, {model.err_mm: total_err} )\n model.test_writer.add_summary( summaries, current_step )\n\n # Save the model\n print( \"Saving the model... \", end=\"\" )\n start_time = time.time()\n model.saver.save(sess, os.path.join(train_dir, 'checkpoint'), global_step=current_step )\n print( \"done in {0:.2f} ms\".format(1000*(time.time() - start_time)) )\n\n # Reset global time and loss\n step_time, loss = 0, 0\n\n sys.stdout.flush()\n # Save losses for future plots\n def print_list_tofile(l, filename):\n with open(filename, 'wb') as f:\n pickle.dump(l, f)\n print_list_tofile(losses, train_dir+\"/losses.pkl\")\n print_list_tofile(errors, train_dir+\"/errors.pkl\")\n print_list_tofile(joint_errors, train_dir+\"/joint_errors.pkl\")", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def test_init(self, epochs):\n i = -1\n for p in self.P:\n for subband in self.SUBBANDS:\n i += 1\n\n # --- load model ----\n pref = self.model_dir + \"/\" + self.name % (subband, p)\n model = copy.deepcopy(self.model)\n model.model.load_weights(pref + \"_epochs_%d\" % epochs[i])\n self.NET.append(model)\n # --- end load model ----\n\n # --- load permutation ----\n self.permutation.append(\n np.load(self.model_dir + \"/permutation_\" + self.name %\n (subband, p) + \".npy\"))\n # --- end load permutation ----", "def load_pretrained_layers(self):\n # Current state of base\n state_dict = self.state_dict()\n param_names = list(state_dict.keys())\n\n # VGG base with pretrained weights\n pretrained_state_dict = torchvision.models.vgg16(pretrained=True).state_dict()\n pretrained_param_names = list(pretrained_state_dict.keys())\n\n # Copy pretrained weights to our current VGG model base\n for i, param in enumerate(param_names[:-4]): # excluding conv6 and conv7 parameters\n state_dict[param] = pretrained_state_dict[pretrained_param_names[i]]\n\n # Convert fc6, fc7 to convolutional layers, and subsample (by decimation) to sizes of conv6 and conv7\n # fc6\n conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view(4096, 512, 7, 7) # (4096, 512, 7, 7)\n conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] # (4096)\n state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) # (1024, 512, 3, 3)\n state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) # (1024)\n # fc7\n conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view(4096, 4096, 1, 1) # (4096, 4096, 1, 1)\n conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] # (4096)\n state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) # (1024, 1024, 1, 1)\n state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) # (1024)\n\n self.load_state_dict(state_dict)\n\n print(\"\\nLoaded base model.\\n\")", "def load_pascal(data_dir, split='train'):\n # Wrote this function\n # idx = 0\n # if idx >20:\n # idx+=1\n # break\n \"\"\"\n print(\"Begin Load Images ------------------------------------\")\n images = []\n # images_dict -> key: img_file_idx, value: rgb image ndarray (256*256*3)\n images_dict = {}\n # count\n for infile in glob.glob(\"./VOCdevkit/VOC2007/JPEGImages/*.jpg\"):\n # reshape the images to 256*256*3\n file, ext = os.path.splitext(infile)\n file_idx = file[-6:]\n\n try:\n im = Image.open(infile)\n resized_img = im.resize((256, 256), Image.ANTIALIAS)\n resized_arr = np.array(resized_img)\n images_dict[file_idx] = resized_arr.astype(np.float32)\n except IOError:\n print(\"Error\")\n\n save_obj(images_dict,\"images_dict\")\n \"\"\"\n # label_mat: 2d array, each annotation file is one label_col, multiple label_col mean multiple annotation files\n label_mat = []\n weight_mat = []\n image_mat = []\n\n images_dict = load_obj(\"images_dict\")\n print(\"Return Load Images ------------------------------------\")\n\n # for filename in os.listdir(\"./VOCdevkit/VOC2007/ImageSets/Main/\"):\n for filename in enumerate(CLASS_NAMES):\n\n with open(\"./VOCdevkit/VOC2007/ImageSets/Main/\"+filename[1] +\"_\"+split+\".txt\") as fp:\n print(fp)\n image_mat = []\n label_col = []\n weight_col = []\n line = fp.readline()\n cnt = 1\n while line:\n\n label_idx = line.strip()[:-3]\n try:\n # print(\"Line {}: {}\".format(label_idx, type(label_idx)))\n # Be aware!! '000005 ' is different from '000005', there is a space in the first string!!!\n # label_idx = '000005 ' label_idx[:-1]='000005'\n image_mat.append(images_dict[label_idx])\n except IOError:\n print(\"Error Line {}: {}\".format(label_idx, type(label_idx)))\n\n label_flag = int(line.strip()[-2:])\n\n if label_flag is 0 or label_flag is -1:\n label_col.append(np.int32(0))\n else:\n label_col.append(np.int32(1))\n\n if label_flag is 1 or label_flag is -1:\n weight_col.append(np.int32(1))\n else:\n weight_col.append(np.int32(0))\n\n line = fp.readline()\n cnt += 1\n np_label_col = np.asarray(label_col)\n label_mat.append(np_label_col)\n # print(np.shape(label_mat))\n np_weight_col = np.asarray(weight_col)\n weight_mat.append(np_weight_col)\n\n # print('image_mat {}: label_mat {}'.format(np.shape(image_mat), np.shape(label_mat)))\n np_image_mat = np.asarray(image_mat)\n np_label_mat = np.asarray(label_mat)\n np_weight_mat = np.asarray(weight_mat)\n # print('np_image_mat {}: np_label_mat {}'.format(np.shape(np_image_mat), np.shape(np_label_mat)))\n np_trans_label_mat = np_label_mat.transpose()\n np_trans_weight_mat = np_weight_mat.transpose()\n # print(np.shape(np_label_mat))\n # print(np.shape(np_weight_mat))\n print('np_trans_label_mat {}: np_trans_weight_mat {}'.format(np.shape(np_trans_label_mat), np.shape(np_trans_weight_mat)))\n print(\"Return Load Weights and Labels ------------------------------------\")\n return np_image_mat, np_trans_label_mat, np_trans_weight_mat", "def visualise_two_data_sets(x_arr, y_arr, x_arr_two, y_arr_two):\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=3)\n\n # Fit and transform x to visualise inside a 3D feature space\n x_visualisation = pca.fit_transform(x_arr)\n\n figure = plt.figure()\n axis = Axes3D(figure)\n\n axis.scatter(x_visualisation[y_arr == 0, 0], x_visualisation[y_arr == 0, 1], x_visualisation[y_arr == 0, 2],\n label=\"Class #0\",\n edgecolor=almost_black, facecolor=palette[0], linewidth=0.3, marker=\"o\")\n axis.scatter(x_visualisation[y_arr == 1, 0], x_visualisation[y_arr == 1, 1], x_visualisation[y_arr == 1, 2],\n label=\"Class #1\",\n edgecolor=almost_black, facecolor=palette[2], linewidth=0.3, marker=\"^\")\n axis.set_title(\"PCA to 3 components - data-set 1\")\n\n x_visualisation_two = pca.transform(x_arr_two)\n figure_two = plt.figure()\n axis_two = Axes3D(figure_two)\n axis_two.scatter(x_visualisation_two[y_arr_two == 0, 0], x_visualisation_two[y_arr_two == 0, 1],\n x_visualisation_two[y_arr_two == 0, 2],\n label=\"Class #0\", edgecolor=almost_black,\n facecolor=palette[0], linewidth=0.3, marker=\"o\")\n axis_two.scatter(x_visualisation_two[y_arr_two == 1, 0], x_visualisation_two[y_arr_two == 1, 1],\n x_visualisation_two[y_arr_two == 1, 2],\n label=\"Class #1\", edgecolor=almost_black,\n facecolor=palette[2], linewidth=0.3, marker=\"^\")\n axis_two.set_title(\"PCA to 3 components - data-set 2\")\n\n plt.show()", "def __c3dSeg(self, bg, seg, tgPng, scale, opacity):\n for axes in ['x', 'y', 'z']:\n cmd = 'c3d ' + bg + ' -scale ' + scale + ' ' + seg + ' '\n cmd += '-foreach -slice ' + axes + ' 50% -endfor '\n cmd += '-oli ' + os.path.join(self.toadDir, \"templates/lookup_tables/\") + 'FreeSurferColorLUT_ItkSnap.txt ' + opacity + ' -type uchar -omc ' + axes + '.png'\n self.launchCommand(cmd)\n cmd = 'pngappend x.png + y.png + z.png ' + tgPng\n self.launchCommand(cmd)\n cmd = 'rm x.png y.png z.png'\n self.launchCommand(cmd)", "def plot_config_3d(view, trace, marker_names):\n\n if view == \"3D Plot\":\n with st.expander(\"3D Plot Configuration\", expanded=True):\n col_plot_type, col_grid_res, col_fill, col_interp = st.columns(4)\n col_col_type, col_choice, col_preview, col_overlay = st.columns(4)\n trace[\"Chart_Type\"] = col_plot_type.selectbox(\"Plot Type\", [\"Contour\",\"3D Scatter\",\"Surface\",\"Heatmap\"], key = \"Chart_Type\")\n col_col_type.selectbox('Color Map Type', ['Sequential','Diverging'], key=\"Color_Set_Type\")\n\n if st.session_state[\"Color_Set_Type\"] == 'Sequential':\n color_map = list(sequential_color_dict().keys())\n else:\n color_map = list(diverging_color_dict().keys())\n\n color_set = col_choice.selectbox(\"Color Map\", color_map) \n if st.session_state[\"Color_Set_Type\"] == 'Sequential':\n st.session_state['Color_Palette'] = sequential_color_dict().get(color_set)\n else:\n st.session_state['Color_Palette'] = diverging_color_dict().get(color_set)\n\n colormap_preview = plot_color_set(st.session_state['Color_Palette'], color_set, view)\n col_preview.image(colormap_preview, use_column_width = True)\n\n if trace[\"Chart_Type\"] != '3D Scatter':\n trace[\"Grid_Res\"] = col_grid_res.number_input(\"Grid Resolution\", min_value=0.0, max_value=100000.0, value=50.0, step=0.5, key=\"Grid_Res\")\n trace[\"Fill_Value\"] = col_fill.selectbox(\"Fill Value\", [\"nan\",0], help=\"fill missing data with the selected value\", key = \"Fill_Value\")\n trace[\"Interp_Method\"] = col_interp.selectbox(\"Interpolation Method\", [\"linear\",\"nearest\",\"cubic\"], key = \"Interp_Method\")\n\n else:\n trace[\"Fill_Value\"] = None\n trace[\"Interp_Method\"] = None\n trace[\"Grid_Res\"] = None\n \n st.session_state[\"Overlay\"] = col_overlay.checkbox(\"Overlay Original Data\", help=\"Display scatter of original data overlayed on chart\")\n \n if st.session_state[\"Overlay\"] == True:\n st.subheader(\"Overlay\")\n col_overlay_alpha, col_overlay_marker, col_overlay_color = st.columns(3)\n overlay_alpha = col_overlay_alpha.slider(\"Opacity\",value=0.5,min_value=0.0, max_value=1.0, step=0.01, key = \"Overlay_Alpha\")\n overlay_marker = col_overlay_marker.selectbox(\"Style\", marker_names, help=\"https://plotly.com/python/marker-style/\", key = \"Overlay Marker\")\n overlay_color = col_overlay_color.color_picker('Pick a color ', '#000000', key = \"Overlay Color\")\n else:\n overlay_alpha = None\n overlay_marker = None\n overlay_color = None\n else:\n trace[\"Chart_Type\"] = None\n st.session_state['Color_Palette'] = None\n trace[\"Fill_Value\"] = None\n trace[\"Interp_Method\"] = None\n trace[\"Grid_Res\"] = None\n\n\n\n return trace[\"Chart_Type\"], trace[\"Fill_Value\"], trace[\"Interp_Method\"], trace[\"Grid_Res\"], st.session_state['Color_Palette'], st.session_state[\"Overlay\"], overlay_alpha, overlay_marker, overlay_color", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')", "def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)", "def three_sample_images():\n samples = samples_path()\n _truck = np.array(Image.open(os.path.join(samples, \"truck.png\")))\n _deer = np.array(Image.open(os.path.join(samples, \"deer.png\")))\n _frog = np.array(Image.open(os.path.join(samples, \"frog.png\")))\n truck = transforms.ToTensor()(_truck)\n deer = transforms.ToTensor()(_deer)\n frog = transforms.ToTensor()(_frog)\n return torch.stack([truck, deer, frog])", "def DataLoader(data_place):\n # Nd = []\n # Np = []\n # Nz = []\n # channel_num = []\n # images = []\n # id_labels = []\n # pose_labels = []\n\n # mycase\n # Nz = 50\n # channel_num = 3\n # images = np.load('{}/images.npy'.format(data_place))\n # id_labels = np.load('{}/ids.npy'.format(data_place))\n # pose_labels = np.load('{}/yaws.npy'.format(data_place))\n #\n # Np = int(pose_labels.max() + 1)\n # Nd = int(id_labels.max() + 1)\n #\n # return [images, id_labels, pose_labels, Nd, Np, Nz, channel_num]\n\n # mycase MultiPIE\n Nz = 50\n channel_num = 3\n image_attributes_df = pd.read_csv(data_place)\n\n Nd = int(np.max(image_attributes_df['Id'])+1)\n Np = int(np.max(image_attributes_df['pose'])+1)\n Ni = int(np.max(image_attributes_df['illum'])+1)\n\n return [image_attributes_df, Nd, Np, Ni, Nz, channel_num]", "def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"rb\") as f:\n self.scene = pickle.load(f)", "def my_training_set(DATASET_PATH, scenes=['level_1', 'level_2', 'level_3'], is_rgb = False):\n my_scenes = scenes\n\n# if is_rgb == True:\n# data_path = 'image_02/data'\n# else:\n# data_path = 'image_00/data'\n\n clips = []\n for scene in my_scenes:\n scene_path = join(DATASET_PATH, scene)\n for s in sorted(listdir(scene_path)):\n if isdir(join(scene_path, s)):\n img_folder = join(scene_path, s)\n all_frames = []\n # loop over all the images in the folder (0.png,1.png,..,199.png)\n dir_path = listdir(img_folder)\n dir_path = sorted(dir_path, key=lambda name: int(name[0:-4]))\n for i in dir_path:\n if str(join(img_folder, i))[-3:] == \"png\":\n img_path = join(img_folder, i)\n all_frames.append(img_path)\n # get the 10-frames sequences from the list of images after applying data augmentation\n for stride in range(1, 2):\n clips.extend(get_clips_by_stride(stride=stride, frames_list=all_frames, sequence_size=11))\n return clips", "def fig_3():\n epoch = 3\n N = 60000\n Nr = N\n K = 32\n n_iter = 256\n Nstar = 16\n data = 'dr10'\n factor = 100.\n features = ['psf_mag', 'model_colors', 'psf_minus_model']\n filters = ['r', 'ug gr ri iz', 'ugriz']\n message = 'pm_mc_pmm_r_all_all'\n model = 'xdmodel_%s_%d_%d_%d_%d_%s.pkl' % (data, Nr, K, n_iter, Nstar,\n message)\n model = os.environ['xddata'] + model\n figname = os.environ['xdplots'] + 'fig3.png'\n error_rates(epoch, model, features, filters, figname, idx=-3, N=10000)", "def loading_data(source_path_name, dataset_path, attentive, not_attentive, image_count, train_rate, dimension,\n next_instance, root):\n\n # dictionary to store the four destination path\n dest_path = {}\n for s in SETS:\n for d in SUB_DIRS:\n dest_path[f\"{s}_{d}\"] = os.path.join(os.path.join(dataset_path, s), d)\n\n train_img_count = math.ceil(int(image_count) * float(train_rate[0]) * 0.1)\n test_img_count = image_count - train_img_count\n\n def loading_faces(source_image_set_path, dest_image_set_path, source_image_set):\n \"\"\"\n This is function write data into destination directory.\n\n :param source_image_set_path: directory from where images are coming\n :param dest_image_set_path: directory we created to insert the valid images\n :param source_image_set: list of valid images\n \"\"\"\n dimensions_of_img = find_dimensions_not_attentive_imgs\n if 'attentive' in dest_image_set_path:\n dimensions_of_img = find_dimensions_attentive_imgs\n for image_name in source_image_set:\n\n # loading gray image\n gray_image = cv2.imread(source_image_set_path + \"/\" + image_name, 0)\n\n # find co-ordinates of faces in images\n y1, x2, y2, x1 = dimensions_of_img(*face_recognition.face_locations(gray_image)[0], np.shape(gray_image))\n\n # crop image and resize to particular dimension\n crop_img = gray_image[y1:y2, x1:x2]\n resize_crop_img = cv2.resize(crop_img, (int(dimension[0:3]), int(dimension[0:3])))\n\n # load images from source to destination directory\n cv2.imwrite(dest_image_set_path + \"/\" + image_name, resize_crop_img)\n\n # building progress bar\n next_instance.destroy()\n progress = ThemedTk(theme=\"aqua\")\n progress.title(\"Progress\")\n\n info_label = Label(progress, text=\"Building of Training set is on progress\", font=(\"Times New Roman\", 12, \"bold\"))\n info_label.pack(pady=10)\n progress_bar = Progressbar(progress, orient=HORIZONTAL, length=220, mode='determinate')\n progress_bar.pack(pady=20)\n\n progress_bar['value'] = 0\n progress.update()\n\n # create the dataset structure contain the training and testing set\n create_structure(dataset_path)\n\n # training of attentive images\n loading_faces(source_path_name[\"attentive\"], dest_path[\"train_set_attentive\"], attentive[:train_img_count])\n\n progress_bar['value'] = 25\n progress.update()\n\n # training of not attentive images\n loading_faces(source_path_name[\"not_attentive\"], dest_path[\"train_set_not_attentive\"],\n not_attentive[:train_img_count])\n\n progress_bar['value'] = 50\n info_label['text'] = 'Building of Testing set is on progress'\n progress.update()\n\n # testing of attentive images\n loading_faces(source_path_name[\"attentive\"], dest_path[\"test_set_attentive\"], attentive[-test_img_count:])\n\n progress_bar['value'] = 75\n progress.update()\n\n # testing of not attentive images\n loading_faces(source_path_name[\"not_attentive\"], dest_path[\"test_set_not_attentive\"],\n not_attentive[-test_img_count:])\n\n progress_bar['value'] = 100\n progress.update()\n info_label['text'] = 'Data Processing is completed'\n progress.destroy()\n root.deiconify()\n\n info = open(f\"{dataset_path}/dataset_info.txt\", \"a\")\n info.write(f\"source directory path - {source_path_name['attentive'].rsplit('//')[0]}\")\n info.write('\\n\\n######### dataset parameter ##########')\n info.write(f\"\\ndataset name - {dataset_path}\")\n info.write(f\"\\nimage count - {image_count}\")\n info.write(f\"\\ntrain rate - {train_rate}\")\n info.write(f\"\\ndimension - {dimension}\")\n\n info.close()\n\n messagebox.showinfo(\"info\", \"Data Processing is Completed\")", "def _figure_3():\n\n dataset_id = 3\n pkl_file = _pkl_file_path(dataset_id)\n with open(pkl_file, 'rb') as f:\n data = pickle.load(f)\n\n cdata = data[:, 33]\n seconds = np.arange(data.shape[0]) * 1. / 250\n\n plt.xlim(right=seconds[-1])\n plt.plot(seconds, cdata, color='black', linestyle=':')\n plt.ticklabel_format(useOffset=False)\n plt.xlabel('Second')\n plt.ylabel('Microstrain')\n plt.savefig('Figure3.png', dpi=300)\n plt.gcf().clear()", "def load(self):\r\n self.create_effect_classes()\r\n\r\n self._add_resource_descriptions_to_pools(self.create_external_resources())\r\n self._add_resource_descriptions_to_pools(self.create_resources())\r\n\r\n for meta, resource in resources.textures.load_pool():\r\n self._textures[meta.label] = resource\r\n\r\n for meta, resource in resources.programs.load_pool():\r\n self._programs[meta.label] = resource\r\n\r\n for meta, resource in resources.scenes.load_pool():\r\n self._scenes[meta.label] = resource\r\n\r\n for meta, resource in resources.data.load_pool():\r\n self._data[meta.label] = resource\r\n\r\n self.create_effect_instances()\r\n self.post_load()", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)", "def __init__(self, cfg, dataset_dir, list_flag, transforms, training):\n self.cfg = cfg.copy()\n self.dataset_dir = dataset_dir\n self.list_flag = list_flag\n self.transforms = transforms\n self.img_list_all = []\n self.training = training\n\n # Apollo 3d init\n Setting = namedtuple('Setting', ['image_name', 'data_dir'])\n setting = Setting([], self.dataset_dir)\n self.dataset = car_models.ApolloScape(setting)\n self._data_config = self.dataset.get_3d_car_config()\n self.car_id2name = car_models.car_id2name\n self.car_models = self.load_car_models()\n self.intrinsic_mat = self.get_intrinsic_mat()\n self.unique_car_models = np.array([2, 6, 7, 8, 9, 12, 14, 16, 18, 19, 20, 23, 25, 27, 28, 31, 32, 35, 37,\n 40, 43, 46, 47, 48, 50, 51, 54, 56, 60, 61, 66, 70, 71, 76])\n self.unique_car_names = [self.car_id2name[x].name for x in self.unique_car_models]\n # For evaluation use\n self.category_to_id_map = {'car': 1}\n self.eval_class = [1]\n self.eval_cat = {'car'}\n self.classes = ['__background__'] + [c for c in self.eval_cat]\n self.masker = Masker(threshold=0.5, padding=1)", "def _init_layers(self):\n self._init_predictor()\n if self.use_edge_fusion:\n self._init_edge_module()", "def visualise_data_pca_3d(self, component1, component2, component3, input_data=False):\n if input_data:\n self.__generate_input_data()\n pca_3d(array(self.input_data), component1, component2, component3, self.class_indices, self.path,\n 'high_dimension_data', self.legend)\n else:\n self.__generate_output_data()\n pca_3d(array(self.output_data), component1, component2, component3, self.class_indices, self.path,\n 'low_dimension_data', self.legend)", "def load_dataset(self, subset):\n assert subset in ('train', 'val')\n\n # Add classes\n for id, name in self.class_mapper.items():\n self.add_class('nodule', id, name)\n\n # Add images\n self.df = self.df_all[self.df_all['subset'] == subset]\n\n image_ids = set()\n for row in self.df.itertuples():\n image_id = (row.seriesuid, row.coordZ)\n path = os.path.join(cur_dir, 'data', 'train', '{}_{}.npy'.format(row.seriesuid, row.coordZ))\n if image_id in image_ids:\n continue\n self.add_image(\"nodule\", image_id=image_id, path=path)\n image_ids.add(image_id)", "def load_cnns(self):\n self.cnn1 = cnn_utils.CNN()\n self.cnn1.load_state_dict(torch.load(f'{self.model_dir}/model1.pt'))\n self.cnn1.eval()\n self.cnn2 = cnn_utils.CNN()\n self.cnn2.load_state_dict(torch.load(f'{self.model_dir}/model2.pt'))\n self.cnn2.eval()", "def show(data_set, number_points: int):\n print(f'info: Showing {number_points} as maximum.')\n sub_set_points = np.random.choice(range(data_set.shape[0]), size=min(data_set.shape[0], number_points))\n x = data_set[sub_set_points, 0]\n y = data_set[sub_set_points, 1]\n z = data_set[sub_set_points, 2]\n\n fig = plt.figure(figsize=(8, 8))\n ax = mplot3d.Axes3D(fig)\n ax.set_title('NMSLIB index 3D representation', fontsize=20)\n ax.scatter(xs=x, ys=y, zs=z)\n plt.show()", "def load_subsampled_clouds(self, subsampling_parameter):\n\n if 0 < subsampling_parameter <= 0.01:\n raise ValueError('subsampling_parameter too low (should be over 1 cm')\n\n # Create path for files\n tree_path = join(self.path, 'input_{:.3f}'.format(subsampling_parameter))\n if not exists(tree_path):\n makedirs(tree_path)\n\n # List of training files\n self.train_files = np.sort([join(self.train_path, f) for f in listdir(self.train_path) if f[-4:] == '.ply'])\n\n # Add test files\n self.test_files = np.sort([join(self.test_path, f) for f in listdir(self.test_path) if f[-4:] == '.ply'])\n\n if self.debug:\n self.train_files = self.train_files[-101:]\n self.test_files = self.test_files[:10]\n\n files = np.hstack((self.train_files, self.test_files))\n # Initiate containers\n self.input_trees = {'training': [], 'validation': [], 'test': []}\n self.input_colors = {'training': [], 'validation': [], 'test': []}\n self.input_vert_inds = {'training': [], 'validation': [], 'test': []}\n self.input_labels = {'training': [], 'validation': []}\n\n # Advanced display\n N = len(files)\n progress_n = 30\n fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%'\n print('\\nPreparing KDTree for all scenes, subsampled at {:.3f}'.format(subsampling_parameter))\n\n for i, file_path in enumerate(files):\n\n # get cloud name and split\n cloud_name = file_path.split('/')[-1][:-4]\n cloud_folder = file_path.split('/')[-2]\n if 'train' in cloud_folder:\n if cloud_name in self.validation_clouds:\n self.all_splits += [1]\n cloud_split = 'validation'\n else:\n self.all_splits += [0]\n cloud_split = 'training'\n else:\n cloud_split = 'test'\n\n if (cloud_split != 'test' and self.load_test) or (cloud_split == 'test' and not self.load_test):\n continue\n\n # Name of the input files\n KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))\n sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))\n\n # Check if inputs have already been computed\n if isfile(KDTree_file):\n\n # read ply with data\n data = read_ply(sub_ply_file)\n sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T\n sub_vert_inds = data['vert_ind']\n if cloud_split == 'test':\n sub_labels = None\n else:\n sub_labels = data['class']\n\n # Read pkl with search tree\n with open(KDTree_file, 'rb') as f:\n search_tree = pickle.load(f)\n\n else:\n # Read ply file\n data = read_ply(file_path)\n points = np.vstack((data['x'], data['y'], data['z'])).T\n colors = np.vstack((data['red'], data['green'], data['blue'])).T\n if cloud_split == 'test':\n int_features = data['vert_ind']\n else:\n int_features = np.vstack((data['vert_ind'], data['class'])).T\n\n # Subsample cloud\n sub_points, sub_colors, sub_int_features = grid_subsampling(points,\n features=colors,\n labels=int_features,\n sampleDl=subsampling_parameter)\n\n # Rescale float color and squeeze label\n sub_colors = sub_colors / 255\n if cloud_split == 'test':\n sub_vert_inds = np.squeeze(sub_int_features)\n sub_labels = None\n else:\n sub_vert_inds = sub_int_features[:, 0]\n sub_labels = sub_int_features[:, 1]\n\n # Get chosen neighborhoods\n search_tree = KDTree(sub_points, leaf_size=50)\n\n # Save KDTree\n with open(KDTree_file, 'wb') as f:\n pickle.dump(search_tree, f)\n\n # Save ply\n if cloud_split == 'test':\n write_ply(sub_ply_file,\n [sub_points, sub_colors, sub_vert_inds],\n ['x', 'y', 'z', 'red', 'green', 'blue', 'vert_ind'])\n else:\n write_ply(sub_ply_file,\n [sub_points, sub_colors, sub_labels, sub_vert_inds],\n ['x', 'y', 'z', 'red', 'green', 'blue', 'class', 'vert_ind'])\n\n # Fill data containers\n self.input_trees[cloud_split] += [search_tree]\n self.input_colors[cloud_split] += [sub_colors]\n self.input_vert_inds[cloud_split] += [sub_vert_inds]\n if cloud_split in ['training', 'validation']:\n self.input_labels[cloud_split] += [sub_labels]\n\n print('', end='\\r')\n print(fmt_str.format('#' * ((i * progress_n) // N), 100 * i / N), end='', flush=True)\n\n # Get number of clouds\n self.num_training = len(self.input_trees['training'])\n self.num_validation = len(self.input_trees['validation'])\n self.num_test = len(self.input_trees['test'])\n\n # Get validation and test reprojection indices\n self.validation_proj = []\n self.validation_labels = []\n self.test_proj = []\n self.test_labels = []\n i_val = 0\n i_test = 0\n\n # Advanced display\n N = self.num_validation + self.num_test\n print('', end='\\r')\n print(fmt_str.format('#' * progress_n, 100), flush=True)\n print('\\nPreparing reprojection indices for validation and test')\n\n for i, file_path in enumerate(files):\n\n # get cloud name and split\n cloud_name = file_path.split('/')[-1][:-4]\n cloud_folder = file_path.split('/')[-2]\n\n # Validation projection and labels\n if (not self.load_test) and 'train' in cloud_folder and cloud_name in self.validation_clouds:\n proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))\n if isfile(proj_file):\n with open(proj_file, 'rb') as f:\n proj_inds, labels = pickle.load(f)\n else:\n # Get original mesh\n mesh_path = file_path.split('/')\n mesh_path[-2] = 'training_meshes'\n mesh_path = '/'.join(mesh_path)\n vertex_data, faces = read_ply(mesh_path[:-4] + '_mesh.ply', triangular_mesh=True)\n vertices = np.vstack((vertex_data['x'], vertex_data['y'], vertex_data['z'])).T\n labels = vertex_data['class']\n\n # Compute projection inds\n proj_inds = np.squeeze(self.input_trees['validation'][i_val].query(vertices, return_distance=False))\n proj_inds = proj_inds.astype(np.int32)\n\n # Save\n with open(proj_file, 'wb') as f:\n pickle.dump([proj_inds, labels], f)\n\n self.validation_proj += [proj_inds]\n self.validation_labels += [labels]\n i_val += 1\n\n # Test projection\n if self.load_test and 'test' in cloud_folder:\n proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))\n if isfile(proj_file):\n with open(proj_file, 'rb') as f:\n proj_inds, labels = pickle.load(f)\n else:\n # Get original mesh\n mesh_path = file_path.split('/')\n mesh_path[-2] = 'test_meshes'\n mesh_path = '/'.join(mesh_path)\n vertex_data, faces = read_ply(mesh_path[:-4] + '_mesh.ply', triangular_mesh=True)\n vertices = np.vstack((vertex_data['x'], vertex_data['y'], vertex_data['z'])).T\n labels = np.zeros(vertices.shape[0], dtype=np.int32)\n\n # Compute projection inds\n proj_inds = np.squeeze(self.input_trees['test'][i_test].query(vertices, return_distance=False))\n proj_inds = proj_inds.astype(np.int32)\n\n with open(proj_file, 'wb') as f:\n pickle.dump([proj_inds, labels], f)\n\n self.test_proj += [proj_inds]\n self.test_labels += [labels]\n i_test += 1\n\n print('', end='\\r')\n\n\n print('\\n')\n\n return", "def main():\n\n # first figure: betas for each predictor\n fig, axes = plt.subplots(figsize=(8, 18), nrows=3)\n\n image_paths = {\n \"3T\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20161006_childVSall_depth_1.png\"\n ),\n \"7T\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20160212_childVSall_depth_1_sim2pt4.png\"\n ),\n \"7T_noise\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20160212_childVSall_depth_1_sim2pt4_noise.png\"\n ),\n }\n\n for ax, (_, image_path) in zip(axes, image_paths.items()):\n assert os.path.isfile(image_path)\n img = imread(image_path)\n img[np.where(np.sum(img, axis=2) == 0.0)] = 1.0\n\n ax.imshow(img[200:-100, 50:-50, :])\n ax.axis(\"off\")\n\n savefig(f\"{PATHS['figures']}/figure_3ac.png\")\n plt.close(fig)", "def c_3D(G, nombre):\n print(\"Pasando grafo a formato tridimensional...\")\n aDir = os.getcwd()\n d = dict(G.degree)\n \n Nodes = list(G.nodes)\n N = len(Nodes)\n Edges = pasar_na_num(G)\n\n Grafo = ig.Graph(Edges, directed=True)\n layt = Grafo.layout('kk', dim=3)\n Xn = [layt[k][0] for k in range(N)] # x-coordinates of nodes\n Yn = [layt[k][1] for k in range(N)] # y-coordinates\n Zn = [layt[k][2] for k in range(N)] # z-coordinates\n Xe = []\n Ye = []\n Ze = []\n\n for e in Edges:\n Xe += [layt[e[0]][0], layt[e[1]][0], None] # x-coordinates of edge ends\n Ye += [layt[e[0]][1], layt[e[1]][1], None]\n Ze += [layt[e[0]][2], layt[e[1]][2], None]\n\n trace1 = go.Scatter3d(x=Xe,\n y=Ye,\n z=Ze,\n mode='lines',\n line=go.scatter3d.Line(\n color=\"black\",\n colorscale=\"Blues\",\n width=3\n ),\n hoverinfo='none'\n )\n\n trace2 = go.Scatter3d(x=Xn,\n y=Yn,\n z=Zn,\n mode='markers',\n name='notes and chords',\n marker=dict(symbol='circle',\n size=6,\n color=list(d.values()),\n colorscale='Greens',\n line=dict(color='rgb(50,50,50)', width=0.5)\n ),\n text=Nodes,\n hoverinfo=\"text\"\n )\n\n axis = dict(showbackground=False,\n showline=False,\n zeroline=False,\n showgrid=False,\n showticklabels=False,\n title=''\n )\n\n layout = go.Layout(\n title=\"Grafo de la partitura {0}\".format(nombre),\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)',\n width=1000,\n height=1000,\n showlegend=False,\n scene=dict(\n xaxis=dict(axis),\n yaxis=dict(axis),\n zaxis=dict(axis),\n ),\n margin=dict(\n t=100\n ),\n hovermode='closest',\n )\n\n data = [trace1, trace2]\n figure = go.Figure(data=data, layout=layout)\n figure.write_html(\"{0}/3DGraphs/3D_Graph_{1}.html\".format(aDir,nombre))\n print(\"Listo.\")\n figure.show()", "def display_dataset(path, save, dset='sum'):\n # List datasets\n files_surf = os.listdir(path[0])\n files_surf.sort()\n files_deep = os.listdir(path[1])\n files_deep.sort()\n files_calc = os.listdir(path[2])\n files_calc.sort()\n\n # Corrected names\n files = os.listdir(r'Y:\\3DHistoData\\Subvolumes_2mm')\n files.sort()\n\n k = 0\n # Loop for displaying images\n for fsurf, fdeep, fcalc in zip(files_surf, files_deep, files_calc):\n # Load images\n im_surf = loadh5(path[0], fsurf, dset)\n im_deep = loadh5(path[1], fdeep, dset)\n im_calc = loadh5(path[2], fcalc, dset)\n # Create figure\n fig = plt.figure(dpi=300)\n ax1 = fig.add_subplot(131)\n ax1.imshow(im_surf, cmap='gray')\n plt.title(fsurf + ', Surface')\n ax2 = fig.add_subplot(132)\n ax2.imshow(im_deep, cmap='gray')\n plt.title('Deep')\n ax3 = fig.add_subplot(133)\n ax3.imshow(im_calc, cmap='gray')\n plt.title('Calcified')\n if save is not None:\n while files[k] == 'Images' or files[k] == 'MeanStd':\n k += 1\n\n # Save figure\n if not os.path.exists(save):\n os.makedirs(save, exist_ok=True)\n plt.tight_layout()\n fig.savefig(os.path.join(save, files[k]), bbox_inches=\"tight\", transparent=True)\n plt.close()\n\n # Save h5\n if not os.path.exists(save + '\\\\MeanStd\\\\'):\n os.makedirs(save + '\\\\MeanStd\\\\', exist_ok=True)\n\n h5 = h5py.File(save + \"\\\\MeanStd\\\\\" + files[k] + '.h5', 'w')\n h5.create_dataset('surf', data=im_surf)\n h5.create_dataset('deep', data=im_deep)\n h5.create_dataset('calc', data=im_calc)\n h5.close()\n else:\n plt.show()\n k += 1" ]
[ "0.64956695", "0.6306034", "0.60686326", "0.59560525", "0.58952993", "0.57739794", "0.56377673", "0.5542612", "0.5534125", "0.5517615", "0.5504378", "0.5468626", "0.54292387", "0.5424756", "0.5412685", "0.5403881", "0.5403201", "0.5379721", "0.5374727", "0.5370032", "0.53621066", "0.53575253", "0.5333057", "0.5281881", "0.5262017", "0.52594256", "0.5258809", "0.5246744", "0.5239302", "0.52308345", "0.5182771", "0.51710546", "0.51696086", "0.5165819", "0.5163041", "0.5138865", "0.5137607", "0.51199436", "0.51196796", "0.51138145", "0.5112942", "0.51069057", "0.5089593", "0.5079355", "0.50734895", "0.50548494", "0.50504476", "0.5048201", "0.5029298", "0.50262576", "0.5024661", "0.5021029", "0.50165784", "0.5005346", "0.4994048", "0.49905306", "0.4987621", "0.49862856", "0.49837047", "0.4980642", "0.497271", "0.49704495", "0.49695098", "0.4963418", "0.49616045", "0.4941879", "0.49413025", "0.49409273", "0.4939731", "0.49299562", "0.4927572", "0.49240032", "0.49228948", "0.4916131", "0.49106225", "0.4910429", "0.49071276", "0.48942804", "0.4894136", "0.4891686", "0.48873895", "0.48841906", "0.48822054", "0.4880064", "0.48791727", "0.48787642", "0.4877593", "0.48768222", "0.48737684", "0.48699275", "0.48694938", "0.48679036", "0.48632652", "0.48612025", "0.48610234", "0.48596355", "0.4854044", "0.48535264", "0.48490217", "0.48386052" ]
0.50833565
43
Format function for Matplotlib formatter.
def _formatFreq(self, value, pos): inv = 999 if value: inv = 1/value return "1/%0.2f" % inv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def float_format(self):\n ...", "def asformat(self, format):", "def format(self, *args, **kwargs) -> String:\n pass", "def add_formatter(self, fmt):\n if fmt and not isfunction(fmt):\n raise TypeError(\"custom format function must be a type of function\")\n\n if fmt and fmt.__code__.co_argcount < 2:\n raise TypeError(\"custom format function requires at least 2 arguments\")\n\n self.formatter = fmt", "def get_formatter(self, **kwargs):\n config = dict([\n (attr, getattr(self, attr))\n for attr in [\n \"include_sign\",\n \"group_with_commas\",\n \"num_decimal_places\",\n ]\n ])\n config.update(kwargs)\n return \"\".join([\n \"{\",\n config.get(\"field_name\", \"\"),\n \":\",\n \"+\" if config[\"include_sign\"] else \"\",\n \",\" if config[\"group_with_commas\"] else \"\",\n \".\", str(config[\"num_decimal_places\"]), \"f\",\n \"}\",\n ])", "def format(self) -> str:", "def formatted(self) -> str:\r\n ...", "def format(self):\n ...", "def __format__(self, *args, **kwargs): # real signature unknown\r\n pass", "def format_value(self, value: float) -> str:\r\n ...", "def format_value(self, value: float) -> str:\r\n ...", "def format(self, *args, **kwargs):\n raise NotImplementedError()", "def ScalingFormatter(scaling=1, fmtstr='{x:g}'):\n # Try to find string scaling as attributes in `typhon.constants`.\n if isinstance(scaling, str):\n scaling = getattr(constants, scaling)\n\n @FuncFormatter\n def formatter(x, pos):\n return fmtstr.format(x=x / scaling)\n\n return formatter", "def __format__(self, format_spec: str = \"\") -> str:\n if not format_spec:\n format_spec = \".4G\"\n array_string = np.array2string(\n self.data_in_display_units,\n formatter={\"float_kind\": lambda x: format(x, format_spec)},\n )\n return f\"{array_string} {self.display_unit}\"", "def __format__(self, formatstr):\n return self.df.__format__(formatstr)", "def format(self, *args, **kwargs):\n return self._format(args, kwargs)", "def __format__(self, formatstr):\n if formatstr.strip() == '': # Defualt behaviour mirrors self.__str__()\n formatstr = '+.3f'\n\n string = \\\n \"{:\" + formatstr +\"} \" + \\\n \"{:\" + formatstr +\"}i \" + \\\n \"{:\" + formatstr +\"}j \" + \\\n \"{:\" + formatstr +\"}k\"\n return string.format(self.q[0], self.q[1], self.q[2], self.q[3])", "def func_plot_fmt_map():\n # line_styles = ['o-', 'x-', '*-', '-_', 'D-', 'h-', '+-', 's-', 'v-', \n # ',-', '1-']\n func_tuples = get_func_tuples()\n M = {k:v for (k, _, v) in func_tuples}\n return M", "def format(self, *args: str, **kwargs: str) -> ANSI:\n return ANSI(FORMATTER.vformat(self.value, args, kwargs))", "def oldformat(self, ticks, numlabels=None, char_width=None):\n labels = []\n if len(ticks) == 0:\n return []\n\n d = abs(ticks[-1] - ticks[0])\n for x in ticks:\n if abs(x)<1e4 and x==int(x):\n labels.append('%d' % x)\n continue\n\n if d < 1e-2: fmt = '%1.3e'\n elif d < 1e-1: fmt = '%1.3f'\n elif d > 1e5: fmt = '%1.1e'\n elif d > 10 : fmt = '%1.1f'\n elif d > 1 : fmt = '%1.2f'\n else: fmt = '%1.3f'\n s = fmt % x\n tup = s.split('e')\n if len(tup)==2:\n mantissa = tup[0].rstrip('0').rstrip('.')\n sign = tup[1][0].replace('+', '')\n exponent = tup[1][1:].lstrip('0')\n if sign or exponent:\n s = '%se%s%s' %(mantissa, sign, exponent)\n else:\n s = mantissa\n else:\n s = s.rstrip('0').rstrip('.')\n labels.append(s)\n return labels", "def __call__(self, x: FloatArrayLike) -> Sequence[str]:\n if self.style == \"new\":\n return [self.fmt.format(val) for val in x]\n elif self.style == \"old\":\n return [self.fmt % val for val in x]\n else:\n raise ValueError(\"style should be either 'new' or 'old'\")", "def __mdformat(self, dat):\n\t\tif type(dat) is types.FloatType:\n\t\t\treturn round(dat, 3)\n\t\telse:\n\t\t\treturn dat", "def get_format(self):\n pass", "def __makeFormatString(self):\n self.__formatString = \"\"\n for f in self.__columns:\n self.__formatString += \"%(\"+ f + \")-\" + str(self.__widths[f]) + \\\n \"s \"", "def __format__(self, format_spec):\n return self.as_tensor().__format__(format_spec)", "def sformatf(cls, msg, *args):\n #formats = {\"%t\": \"%d\", \"%0t\": \"%0d\"}\n #for s in formats:\n # msg = msg.replace(s, formats[s])\n #return sformatf(msg, *args)\n # TODO substitute old types %s/%d etc with {}\n #new_msg = cls.STR_RE.sub(r'{:\\1}', msg)\n #print(\"new_msg is \" + new_msg)\n for s in cls.formats:\n if s == \"%h\" or s == \"%0h\":\n msg = msg.replace(s, \"{:X}\")\n else:\n msg = msg.replace(s, \"{}\")\n return msg.format(*args)", "def __format__(self, format_specification=''):\n return super().__format__(format_specification=format_specification)", "def __format__(self, fmt):\n if not isinstance(fmt, str):\n raise TypeError(\"must be str, not %s\" % type(fmt).__name__)\n if len(fmt) != 0:\n return self.strftime(fmt)\n return str(self)", "def format( self ) :\n\n return( self.__format )", "def newFormatter(*args, **kw):\n originalResult = originalFormatter(*args, **kw)\n if all():\n originalResult += ' %r' % all()\n return originalResult", "def default_formatter(self, data):\n return data", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def x_formatter_cb( self, ax ):\n ax.set_xlim( xmin=self.begin_num,xmax=self.end_num )\n dl = common.PrettyDateLocator()\n df = common.PrettyDateFormatter( dl )\n ax.xaxis.set_major_locator( dl )\n ax.xaxis.set_major_formatter( df )\n ax.xaxis.set_clip_on(False)\n sf = common.PrettyScalarFormatter( )\n ax.yaxis.set_major_formatter( sf )\n labels = ax.get_xticklabels()", "def test_format(self):\n MAX_LINES_val = pprint.MAX_LINES()\n pprint.MAX_LINES.set(7)\n c1 = Column(name='a', data=np.arange(2000), dtype=float,\n format='%6.2f')\n assert str(c1) == (' a \\n-------\\n 0.00\\n'\n ' 1.00\\n ...\\n1998.00\\n1999.00')\n pprint.MAX_LINES.set(MAX_LINES_val)", "def get_format(self):\n format = QtGui.QTextCharFormat()\n\n # Set foreground color\n if self.foreground_color is not None:\n color = self.color_map[self.foreground_color][self.intensity]\n format.setForeground(QtGui.QColor(color))\n\n # Set background color\n if self.background_color is not None:\n color = self.color_map[self.background_color][self.intensity]\n format.setBackground(QtGui.QColor(color))\n\n # Set font weight/style options\n if self.bold:\n format.setFontWeight(QtGui.QFont.Bold)\n else:\n format.setFontWeight(QtGui.QFont.Normal)\n format.setFontItalic(self.italic)\n format.setFontUnderline(self.underline)\n\n return format", "def fmt_point(point):\n assert len(point) == 2\n return f\"({point[0]},{point[1]})\"", "def __repr__(self) -> str:\r\n\r\n saida = \"Format: \"\r\n x = self.getformat()\r\n for _ in range(len(x)):\r\n saida = f\"{saida}{x[_]}\"\r\n if _ < len(x)-1:\r\n saida += \", \"\r\n saida += \"\\n\"\r\n return saida", "def formatter(question: dict):\n fmt = question.get('possibilities', {}).get('format')\n if fmt == 'date':\n return pd.to_datetime\n elif fmt == 'num':\n return lambda x: x\n else:\n raise ValueError(f\"Question format {fmt} unknown\")", "def set_format(self, column_label, formatter):\n assert callable(formatter), 'formatter must be a function'\n self._formats[column_label] = formatter\n return self", "def tickLabelFormatter(ticks, style):\n \n from hep.draw.latex import scientificNotation\n\n # Compute the absolute size of the largest tick.\n largest = max(map(abs, ticks))\n if largest == 0:\n # All zero.\n return lambda v: \"none\"\n\n # Find the scale factor.\n range = max(ticks) - min(ticks)\n scale = min(largest, range)\n if scale == 0:\n scale = 1\n else:\n scale = log10(scale)\n if scale > 5:\n # Format using scientific notation.\n return lambda v: \"$\" + scientificNotation(v, 2) + \"$\"\n elif scale > 2:\n # Format as an integer.\n return lambda v: \"%d\" % v\n elif scale > -3:\n # Format as a floating-point number.\n return lambda v: \"%.*f\" % (int(2.5 - scale), v)\n else:\n # Format using scientific notation.\n return lambda v: \"$\" + scientificNotation(v, 2) + \"$\"", "def latex_formatter(self, counts, prefix):\n prefix = prefix.replace('_', ' ')\n output = r'{}'.format(prefix)\n for count in counts:\n output += r' & \\num[group-separator={{,}}]{{{}}}'.format(int(count))\n output += r' & \\num[group-separator={{,}}]{{{}}} \\\\'.format(int(counts.sum()))\n output += '\\n'\n return output", "def format(self, data):", "def _format(self):\n min_value = self.replacements.get(str(self.min), str(self.min))\n max_value = self.replacements.get(str(self.max), str(self.max))\n l_brace = '(' if min_value.find('inf') != -1 else '['\n r_brace = ')' if max_value.find('inf') != -1 else ']'\n\n return '{l_brace}{min_value}, {max_value}{r_brace}'.format(\n l_brace=l_brace, r_brace=r_brace,\n min_value=min_value, max_value=max_value)", "def set_formatter(self, formatter):\n self.format = formatter", "def format(self, *args: object, **kwargs: object) -> HTML:\n return HTML(FORMATTER.vformat(self.value, args, kwargs))", "def point_format(self) -> PointFormat:\n return self.points.point_format", "def myformat(table):\n m = 0\n table = sorted(table, key=itemgetter(0))\n for t in table:\n t = str(t)\n if len(t[0]) > m:\n m = len(t[0])\n m += 10\n fstr = \"{0:}\" + m*\" \" + \"{1:}\"\n s = \"\"\n for x in table:\n try:\n a = float(x[0])\n b = float(x[1])\n s += \"{0:.5f}{1:{width}}\".format(a, b, width=m) + \"\\n\"\n except IndexError:\n pass\n return s\n \"\"\"\n out = \"\"\n for pair in table:\n out += str(pair[0]) + 5*\" \" + str(pair[1]) + \"\\n\"\n return out\"\"\"", "def format(self, value):\r\n metric = {\r\n \"degree\": u'\\N{DEGREE SIGN}',\r\n \"percent\": u'%',\r\n \"meter\": u'm',\r\n \"klux\": u' L',\r\n \"none\": ''\r\n }[self.unit]\r\n if self.unit == \"percent\":\r\n value *= 100.0\r\n return u\"{:3.1f}{}\".format(value, metric)", "def format(self):\n return self._format", "def update_format(self, record):\n prefix = \"\\u001b[\"\n color = f\"{prefix}{self.color_map[record.levelno]}m\"\n bold = f\"{prefix}1m\"\n gray = f\"{prefix}1m{prefix}30m\"\n reset = f\"{prefix}0m\"\n self._style._fmt = (\n f\"%(asctime)s\"\n f\" {gray}│{reset} {color}%(levelname)-8s{reset} {gray}│{reset} \"\n )\n if hasattr(record, \"function\"):\n self._style._fmt += (\n f\"{gray}%(indent)s{reset}\"\n f\"{bold}%(function)s{reset}{gray}:{reset}\"\n \" %(message)s\"\n )\n else:\n self._style._fmt += \"%(indent)s%(message)s\"", "def format(self, ticks, numlabels=None, char_width=None, fill_ratio=0.3):\n return map(str, map(int, ticks))", "def _render_thing(self, thing):\n function = \"{:}\".format\n if (type(thing) in self.fmatdict):\n function = self.fmatdict[type(thing)]\n return function(thing).strip()", "def get_formatter(self, group):\n return getattr(self, \"format_\" + group + \"_standings\")", "def __str__(self):\n return self.fmt.format(*self.args, **self.kwargs)", "def _format_point(self, point):\n return (point + self.draw_offset).intify()", "def _format(val, valtype, floatfmt, intfmt, missingval=\"\", has_invisible=True): # noqa\n if val is None:\n return missingval\n\n if valtype is str:\n return f\"{val}\"\n elif valtype is int:\n return format(val, intfmt)\n elif valtype is bytes:\n try:\n return str(val, \"ascii\")\n except (TypeError, UnicodeDecodeError):\n return str(val)\n elif valtype is float:\n is_a_colored_number = has_invisible and isinstance(val, (str, bytes))\n if is_a_colored_number:\n raw_val = _strip_ansi(val)\n formatted_val = format(float(raw_val), floatfmt)\n return val.replace(raw_val, formatted_val)\n else:\n return format(float(val), floatfmt)\n else:\n return f\"{val}\"", "def _make_formatter(*args, **kwargs):\n # pylint: disable = no-else-return\n\n assert not(args and kwargs)\n\n if args:\n # tuples are given for the whole command string but applied per token.\n # We need to supply only the tuples which are needed for the current\n # token.\n args = list(args[::-1])\n pcents = _re.compile(r'%[^%]').findall\n\n def formatter(value):\n \"\"\" Tuple formatter \"\"\"\n count = len(pcents(value))\n torepl = []\n while len(torepl) < count:\n torepl.append(args.pop())\n return value % tuple(torepl)\n return formatter\n\n elif kwargs:\n return lambda x: x % kwargs\n\n return lambda x: x", "def xFormat(self, formatFnc, label=None, options=None, isPyData=False):\r\n return self", "def set_formatter_string(config: dict):\n formatter_str = \"%(levelname)s %(name)s\"\n\n if config.get(\"formatter\"):\n return config[\"formatter\"]\n\n if config.get(\"extended\"):\n formatter_str += \".%(funcName)s():\"\n\n if config.get(\"timestamp\"):\n formatter_str = \"%(asctime)s \" + formatter_str\n\n formatter_str += \" %(message)s\"\n\n return formatter_str", "def format(self, message):", "def format(self) -> str:\n return self._format", "def format(self) -> str:\n return self._format", "def _apply_eng_formatter(ax, which):\n if which == \"x\":\n axis = ax.xaxis\n current_min, current_max = ax.get_xlim()\n elif which == \"y\":\n axis = ax.yaxis\n current_min, current_max = ax.get_ylim()\n else:\n raise ValueError(f\"Can only format x-axis or y-axis, not {which}\")\n\n if current_max > 4000 or current_min < -1000:\n axis.set_major_formatter(EngFormatter())", "def format_data(self)->float: \n try:\n formatted = chr(self.data[0])\n for i in range(1, len(self.data)): \n formatted = formatted + (chr(self.data[i])) \n return str(formatted)\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)", "def format(self) -> str:\n return pulumi.get(self, \"format\")", "def str_fmt(x):\n if isinstance(x, (list, tuple, np.ndarray)):\n return [str_fmt(x) for x in x]\n if x <= 0.1:\n return f'${x:.2f}$'\n return f'${x:.1f}$' if x <= 1 else f'${int(x)}$'", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def parser_formatter(format_class, **kwargs):\n try:\n return lambda prog: format_class(prog, **kwargs)\n except TypeError:\n return format_class", "def parser_formatter(format_class, **kwargs):\n try:\n return lambda prog: format_class(prog, **kwargs)\n except TypeError:\n return format_class", "def reformat(ctx):\n pass", "def _formatter_func(self, tup):\n formatter_funcs = [level._formatter_func for level in self.levels]\n return tuple(func(val) for func, val in zip(formatter_funcs, tup))", "def __format__(self, format_spec):\n # Reject anything that isn't an s\n if format_spec[-1] != 's':\n raise ValueError('{} format specifier not understood for this object',\n format_spec[:-1])\n # Output in this example will be (<a>,<b>,<c>)\n raw = \"(\" + \",\".join([str(self.a), str(self.b), str(self.c)]) + \")\"\n # Honor the format language by using the inbuilt string format\n # Since we know the original format_spec ends in an 's'\n # we can take advantage of the str.format method with a\n # string argument we constructed above\n return \"{r:{f}}\".format( r=raw, f=format_spec )", "def engFormat(f):\n if f == 0.0:\n value = 0.0\n exponent = 0\n else:\n exponent = math.log10(-f if f < 0 else f)\n if exponent < 0:\n exponent = -int(math.ceil(-exponent))\n else:\n exponent = int(math.floor(exponent))\n for i in range(3):\n if (exponent % 3) == 0:\n break\n exponent = exponent - 1\n value = f * 10 ** -exponent\n # Choose a format to maintain the number of useful digits we print.\n if abs(value) < 10:\n fmt = \"%6.3f%s\"\n elif abs(value) < 100:\n fmt = \"%6.2f%s\"\n else:\n fmt = \"%6.1f%s\"\n\n return fmt % (value, (\"\" if exponent == 0 else \"e%d\" % exponent))", "def formatter(formatter_name):\n\n def _formatter_decorator(f):\n def _formatter_wrapper(*wrapper_args, **wrapper_kwargs):\n ctx = wrapper_args[1]\n if not ctx.json and formatter_name in _formatter_functions:\n ctx.format_function = _formatter_functions[formatter_name]\n return f(*wrapper_args, **wrapper_kwargs)\n\n return _formatter_wrapper\n\n return _formatter_decorator", "def format(\n self, *args,\n r0=None, theta0=None, thetadir=None,\n thetamin=None, thetamax=None, thetalim=None,\n rmin=None, rmax=None, rlim=None,\n rlabelpos=None, rscale=None, rborder=None,\n thetalocator=None, rlocator=None, thetalines=None, rlines=None,\n thetaformatter=None, rformatter=None,\n thetalabels=None, rlabels=None,\n thetalocator_kw=None, rlocator_kw=None,\n thetaformatter_kw=None, rformatter_kw=None,\n **kwargs\n ):\n rc_kw, rc_mode, kwargs = _parse_format(**kwargs)\n with rc.context(rc_kw, mode=rc_mode):\n # Not mutable default args\n thetalocator_kw = thetalocator_kw or {}\n thetaformatter_kw = thetaformatter_kw or {}\n rlocator_kw = rlocator_kw or {}\n rformatter_kw = rformatter_kw or {}\n # Flexible input\n if rlim is not None:\n if rmin is not None or rmax is not None:\n _warn_proplot(\n f'Conflicting keyword args rmin={rmin}, rmax={rmax}, '\n f'and rlim={rlim}. Using \"rlim\".'\n )\n rmin, rmax = rlim\n if thetalim is not None:\n if thetamin is not None or thetamax is not None:\n _warn_proplot(\n f'Conflicting keyword args thetamin={thetamin}, '\n f'thetamax={thetamax}, and thetalim={thetalim}. '\n f'Using \"thetalim\".'\n )\n thetamin, thetamax = thetalim\n thetalocator = _notNone(\n thetalines, thetalocator, None,\n names=('thetalines', 'thetalocator'))\n thetaformatter = _notNone(\n thetalabels, thetaformatter, None,\n names=('thetalabels', 'thetaformatter'))\n rlocator = _notNone(rlines, rlocator, None,\n names=('rlines', 'rlocator'))\n rformatter = _notNone(rlabels, rformatter,\n None, names=('rlabels', 'rformatter'))\n\n # Special radius settings\n if r0 is not None:\n self.set_rorigin(r0)\n if rlabelpos is not None:\n self.set_rlabel_position(rlabelpos)\n if rscale is not None:\n self.set_rscale(rscale)\n if rborder is not None:\n self.spines['polar'].set_visible(bool(rborder))\n # Special azimuth settings\n if theta0 is not None:\n self.set_theta_zero_location(theta0)\n if thetadir is not None:\n self.set_theta_direction(thetadir)\n\n # Iterate\n for (\n x, r, axis,\n min_, max_,\n locator, formatter,\n locator_kw, formatter_kw,\n ) in zip(\n ('x', 'y'), ('theta', 'r'), (self.xaxis, self.yaxis),\n (thetamin, rmin), (thetamax, rmax),\n (thetalocator, rlocator), (thetaformatter, rformatter),\n (thetalocator_kw, rlocator_kw),\n (thetaformatter_kw, rformatter_kw)\n ):\n # Axis limits\n # Try to use public API where possible\n if min_ is not None:\n getattr(self, 'set_' + r + 'min')(min_)\n else:\n min_ = getattr(self, 'get_' + r + 'min')()\n if max_ is not None:\n getattr(self, 'set_' + r + 'max')(max_)\n else:\n max_ = getattr(self, 'get_' + r + 'max')()\n\n # Spine settings\n kw = rc.fill({\n 'linewidth': 'axes.linewidth',\n 'color': 'axes.edgecolor',\n }, context=True)\n sides = ('inner', 'polar') if r == 'r' else ('start', 'end')\n spines = [self.spines[s] for s in sides]\n for spine, side in zip(spines, sides):\n spine.update(kw)\n\n # Grid and grid label settings\n # NOTE: Not sure if polar lines inherit tick or grid props\n kw = rc.fill({\n 'color': x + 'tick.color',\n 'labelcolor': 'tick.labelcolor', # new props\n 'labelsize': 'tick.labelsize',\n 'grid_color': 'grid.color',\n 'grid_alpha': 'grid.alpha',\n 'grid_linewidth': 'grid.linewidth',\n 'grid_linestyle': 'grid.linestyle',\n }, context=True)\n axis.set_tick_params(which='both', **kw)\n # Label settings that can't be controlled with set_tick_params\n kw = rc.fill({\n 'fontfamily': 'font.family',\n 'weight': 'tick.labelweight'\n }, context=True)\n for t in axis.get_ticklabels():\n t.update(kw)\n\n # Tick locator, which in this case applies to gridlines\n # NOTE: Must convert theta locator input to radians, then back\n # to degrees.\n if locator is not None:\n if r == 'theta' and (\n not isinstance(locator, (str, mticker.Locator))):\n # real axis limts are rad\n locator = np.deg2rad(locator)\n locator = axistools.Locator(locator, **locator_kw)\n locator.set_axis(axis) # this is what set_locator does\n grids = np.array(locator())\n if r == 'r':\n grids = grids[(grids >= min_) & (grids <= max_)]\n self.set_rgrids(grids)\n else:\n grids = np.rad2deg(grids)\n grids = grids[(grids >= min_) & (grids <= max_)]\n if grids[-1] == min_ + 360: # exclusive if 360 degrees\n grids = grids[:-1]\n self.set_thetagrids(grids)\n # Tick formatter and toggling\n if formatter is not None:\n formatter = axistools.Formatter(formatter, **formatter_kw)\n axis.set_major_formatter(formatter)\n\n # Parent method\n super().format(*args, **kwargs)", "def format(self):\n return self.getparam(\"FORMAT\")", "def format(self):\n return self.getparam(\"FORMAT\")", "def TEXT(number, format_type):\n raise NotImplementedError()", "def format_line(cls, reponame, package_counts, percent, repomd):\n return '{0} {1} {2} {3}'.format(reponame, package_counts, percent, repomd)", "def format(self):\n groups = [g + \".\" for g in self.groups]\n params = [\";\" + p.format() for p in self.params]\n groups_name_params = \"\".join(groups) + self.name + \"\".join(params)\n return groups_name_params + \":\" + self.format_value() + CRLF", "def set_sig_figs(n):\n u.default_format = '.' + str(n) + 'g'\n pd.options.display.float_format = ('{:,.' + str(n) + '}').format", "def number_formatter(number, pos=None):\n magnitude = 0\n while abs(number) >= 1000:\n magnitude += 1\n number /= 1000.0\n return '%.1f%s' % (number, ['', 'K', 'M', 'B', 'T', 'Q'][magnitude])", "def _translate_fmts(self):\n fmt_info = []\n fmt_append = fmt_info.append\n \n isvalid = self._is_valid_fmt\n typlist = self._typlist\n isstrvar = self._isstrvar\n default_fmts = self._default_fmts\n \n for i, fmt in enumerate(self._fmtlist):\n fmt = fmt.strip()\n \n iscalendar = (fmt[1] == 't' or fmt[1:3] == '-t')\n \n if iscalendar or not isvalid(fmt):\n if isstrvar(i):\n wid = min(typlist[i], 10)\n fmt_append(('s', \"{{:>{}s}}\".format(wid), wid))\n continue\n else:\n fmt = default_fmts[typlist[i]]\n \n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n align, _, wid = m.group(1), m.group(2), m.group(3)\n new_align = (\"<\" if align == \"-\" \n else \"^\" if align == \"~\" else \">\")\n new = \"\".join((\"{:\", new_align, wid, \"s}\"))\n fmt_append(('s', new, int(wid)))\n elif last_char == 'H' or last_char == 'L': # binary\n fmt_append((last_char, fmt, int(fmt[1:-1])))\n elif last_char == 'x': # hexadecimal\n fmt_append(('x', fmt, 21))\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n align, _, wid, delim, prec, type, com = (m.group(1), m.group(2), \n m.group(3), m.group(4),\n m.group(5), m.group(6),\n m.group(7))\n aln = \"<\" if align == \"-\" else \">\"\n sep = \",\" if com is not None else \"\"\n if type == \"g\" and int(prec) == 0:\n new = \"\".join((\"{:\", aln, wid, sep, type, \"}\"))\n else:\n new = \"\".join((\"{:\", aln, wid, sep, \".\", prec, type, \"}\"))\n fmt_append((type, new, int(wid), delim, com))\n \n return fmt_info", "def get_formatter(style):\n if style == 'authoryear':\n return AuthorYearFormatter\n return AuthorYearFormatter", "def format(self, record):\n msg = logging.Formatter.format(self, record)\n label, color = self.label(record)\n if self.strip:\n return \"{:10s}{}\".format(label, sub(\"\\033\\\\[[0-9]+m\", \"\", msg, 0))\n else:\n return \"\\033[1;{}m{:10s}\\033[0m{}\".format(color, label, msg)", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def _fmt(x, pos):\n a, b = '{:.2e}'.format(x).split('e')\n b = int(b)\n return r'${} \\times 10^{{{}}}$'.format(a, b)", "def format(self) -> Optional[pulumi.Input['FlowLogFormatParametersArgs']]:\n return pulumi.get(self, \"format\")", "def __init__(self, fmt, datefmt=None):\n logging.Formatter.__init__(self, fmt, datefmt)", "def __gen_fmt_str__(self, fmt):\n return '=' + (self.num_pts_recv * (fmt + ' '))", "def getFormatString(self):\n return ['S', 'P', 'w']", "def normalize_format(fmt):\n # Remove shape '()' at the forefront which is equivalent to an scalar\n if fmt[:2] == '()':\n fmt = fmt[2:]\n # Accept 'S' as a synonym of 'a'\n if fmt.find('S') >= 0:\n fmt = fmt.replace('S', 'a')\n return fmt", "def reformat():\n toolkit.reformat()", "def __repr__(self: GtinFormat) -> str:\n return f\"GtinFormat.{self.name}\"", "def _format (color, style=''):\n _format = QtGui.QTextCharFormat()\n if color != '':\n _format.setForeground(getattr(QtCore.Qt, color))\n if 'bold' in style:\n _format.setFontWeight(QtGui.QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n return _format", "def formatter(in_tuple):\n length = len(in_tuple)\n form_string = (\"the {} numbers are: \" + \", \".join([\"{}\"]*length)).format(length, *in_tuple)\n return form_string.format(in_tuple)", "def strfdate(self, fmt):\n pattern = r'%({})'.format(reduce(lambda x, y: '{}|{}'.format(x, y), FORMAT_MAP.keys()))\n for f in re.findall(pattern, fmt):\n fmt = fmt.replace('%{}'.format(f), FORMAT_MAP[f](self))\n return fmt", "def formatter(t: tuple):\n s = 'The {} numbers are: ' + '{}, '*(len(t)-1) + '{}'\n return s.format(len(t),*t)", "def __call__(self, x, pos=0):\r\n ind = int(np.round(x))\r\n if ind >= len(self.values) or ind < 0:\r\n return ''\r\n return '%3.1f [%d]' % (self.values[ind], ind)", "def __str__(self) -> str:\n return (\n f'[{self.x:g} {self.y:g} {self.z:g} '\n f'{self.offset:g}] {self.scale:g}'\n )", "def format_data(self, data):" ]
[ "0.6947464", "0.67461205", "0.65912247", "0.64709014", "0.64154315", "0.635182", "0.63386416", "0.63360775", "0.6247381", "0.62060654", "0.62060654", "0.6194569", "0.61453986", "0.6124619", "0.60353225", "0.60240173", "0.6023951", "0.60086197", "0.5962071", "0.5960708", "0.59420264", "0.59004515", "0.5899012", "0.5893569", "0.5889922", "0.58749235", "0.58674693", "0.5862834", "0.58400184", "0.58064485", "0.57950264", "0.5771977", "0.57640696", "0.57474816", "0.5740803", "0.5737269", "0.5722641", "0.5720287", "0.5703715", "0.56966007", "0.56965625", "0.5684961", "0.5674914", "0.5662088", "0.5656199", "0.5651374", "0.56325084", "0.56267214", "0.56243795", "0.56098366", "0.56018806", "0.55981845", "0.5595527", "0.5587158", "0.5570837", "0.55579436", "0.5557612", "0.5555771", "0.55488807", "0.5542995", "0.55287033", "0.55287033", "0.5519359", "0.55170995", "0.5507848", "0.5507739", "0.5504616", "0.54821205", "0.54821205", "0.54782915", "0.54659605", "0.5445396", "0.5435647", "0.54311264", "0.5428595", "0.5425652", "0.5425652", "0.54176956", "0.5404917", "0.53895545", "0.5367838", "0.5366894", "0.5354114", "0.5350499", "0.53500533", "0.5347649", "0.5345529", "0.53426176", "0.53393656", "0.53379047", "0.53303844", "0.532869", "0.5317921", "0.53107905", "0.53017175", "0.52981514", "0.529721", "0.52964205", "0.5280113", "0.5279462", "0.5278893" ]
0.0
-1
Build or update a Ticker metrics using a Quotecast object. Only the metrics which can be converted to float are supported. But that should be enough to handle all the real use cases.
def build_ticker_from_quotecast( quotecast: Quotecast, references: Dict[int, List[str]] = None, ticker: Ticker = None, ) -> Ticker: if references is None: references = dict() if ticker is None: ticker = Ticker() # SETUP PRODUCTS & METRICS message_array = json.loads(quotecast.json_data) for message in message_array: if message["m"] == "un": reference = message["v"][0] value = message["v"][1] product, metric = references[reference] ticker.products[product].metrics[metric] = value elif message["m"] == "us": reference = message["v"][0] value = message["v"][1] product, metric = references[reference] if value[4] == "-": date = datetime.datetime.strptime( value, "%Y-%m-%d", ) value = datetime.datetime.timestamp(date) ticker.products[product].metrics[metric] = value elif value[2] == ":": time = datetime.time.fromisoformat(value) value = time.hour * 3600 + time.minute * 60 + time.second ticker.products[product].metrics[metric] = value else: # NOT CONVERTIBLE TO FLOAT raise RuntimeWarning( "Unsupported string metric : " f"{metric} = {message}" ) elif message["m"] == "a_req": references[message["v"][1]] = message["v"][0].rsplit( sep=".", maxsplit=1, ) elif message["m"] == "a_rel": delete_list = [] for reference in references: if ".".join(references[reference]) == message["v"][0]: delete_list.append(reference) for reference in delete_list: del references[reference] elif message["m"] == "h": pass elif message["m"] == "ue": pass elif message["m"] == "d": raise AttributeError(f"Subscription rejected : {message}") else: raise AttributeError(f"Unknown metric : {message}") # SETUP PRODUCT LIST ticker.product_list.extend(ticker.products) # SETUP METADATA ticker.metadata.MergeFrom(quotecast.metadata) return ticker
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n self.data.update()\n stats = self.data.stats\n ticker = self.data.ticker\n\n if self.type == \"exchangerate\":\n self._attr_state = ticker[self._currency].p15min\n self._attr_unit_of_measurement = self._currency\n elif self.type == \"trade_volume_btc\":\n self._attr_state = f\"{stats.trade_volume_btc:.1f}\"\n elif self.type == \"miners_revenue_usd\":\n self._attr_state = f\"{stats.miners_revenue_usd:.0f}\"\n elif self.type == \"btc_mined\":\n self._attr_state = str(stats.btc_mined * 0.00000001)\n elif self.type == \"trade_volume_usd\":\n self._attr_state = f\"{stats.trade_volume_usd:.1f}\"\n elif self.type == \"difficulty\":\n self._attr_state = f\"{stats.difficulty:.0f}\"\n elif self.type == \"minutes_between_blocks\":\n self._attr_state = f\"{stats.minutes_between_blocks:.2f}\"\n elif self.type == \"number_of_transactions\":\n self._attr_state = str(stats.number_of_transactions)\n elif self.type == \"hash_rate\":\n self._attr_state = f\"{stats.hash_rate * 0.000001:.1f}\"\n elif self.type == \"timestamp\":\n self._attr_state = stats.timestamp\n elif self.type == \"mined_blocks\":\n self._attr_state = str(stats.mined_blocks)\n elif self.type == \"blocks_size\":\n self._attr_state = f\"{stats.blocks_size:.1f}\"\n elif self.type == \"total_fees_btc\":\n self._attr_state = f\"{stats.total_fees_btc * 0.00000001:.2f}\"\n elif self.type == \"total_btc_sent\":\n self._attr_state = f\"{stats.total_btc_sent * 0.00000001:.2f}\"\n elif self.type == \"estimated_btc_sent\":\n self._attr_state = f\"{stats.estimated_btc_sent * 0.00000001:.2f}\"\n elif self.type == \"total_btc\":\n self._attr_state = f\"{stats.total_btc * 0.00000001:.2f}\"\n elif self.type == \"total_blocks\":\n self._attr_state = f\"{stats.total_blocks:.0f}\"\n elif self.type == \"next_retarget\":\n self._attr_state = f\"{stats.next_retarget:.2f}\"\n elif self.type == \"estimated_transaction_volume_usd\":\n self._attr_state = f\"{stats.estimated_transaction_volume_usd:.2f}\"\n elif self.type == \"miners_revenue_btc\":\n self._attr_state = f\"{stats.miners_revenue_btc * 0.00000001:.1f}\"\n elif self.type == \"market_price_usd\":\n self._attr_state = f\"{stats.market_price_usd:.2f}\"", "def set_metrics(self):", "def __init__(self):\n super(ASYMMETRIC, self).__init__(quant_type=Constants.QZ_ASYMMETRIC)", "def test_update_derived_metric(self):\n pass", "def compute_track_metrics(use_async=CELERY_ENABLED):\n\n # arbitrary field check to not update already loaded tracks\n for track in Track.objects.filter(duration__is_null=False):\n if use_async:\n async_set_metrics.delay(track)\n else:\n sleep(2)\n track.set_metrics()", "def map_to_ticker(self, raw_ticker: HitbtcRawTickerModel) -> HitbtcTickerModel:\n\n symbol = raw_ticker[\"symbol\"]\n low = Decimal(raw_ticker[\"low\"])\n high = Decimal(raw_ticker[\"high\"])\n volume = Decimal(raw_ticker[\"volume\"])\n volume_quote = Decimal(raw_ticker[\"volumeQuote\"])\n timestamp = raw_ticker[\"timestamp\"]\n raw_ask = raw_ticker[\"ask\"]\n ask = Decimal(raw_ask) if raw_ask is not None else raw_ask\n raw_bid = raw_ticker[\"bid\"]\n bid = Decimal(raw_bid) if raw_bid is not None else raw_bid\n raw_last = raw_ticker[\"last\"]\n last = Decimal(raw_last) if raw_last is not None else raw_last\n raw_open = raw_ticker[\"open\"]\n open_ = Decimal(raw_open) if raw_open is not None else raw_open\n\n ticker = HitbtcTickerModel(\n symbol=symbol,\n low=low,\n high=high,\n volume=volume,\n volume_quote=volume_quote,\n timestamp=timestamp,\n ask=ask,\n bid=bid,\n last=last,\n open=open_)\n\n return ticker", "def calculate_metrics_single_ticker_via_celery(tuple, tca_request, dummy_market):\n\n return tca_ticker_loader.calculate_metrics_single_ticker(tuple, tca_request, dummy_market)", "def to_metrics(self, res: Union[float, Dict[str, float]]) -> float:\n if isinstance(res, dict):\n return list(res.values())[0]\n if isinstance(res, (tuple, list)):\n return res[0]\n return res", "def metrics_builder(metrics_dict):\n df = pd.DataFrame(metrics_dict[\"combined_delay\"].mean(axis=0), columns=[\"Metrics\"])\n df.loc[\"Max_Actual_Delay\"] = metrics_dict[\"combined_delay\"][\"Actual_Delay\"].loc[metrics_dict[\"actual_max_index\"]]\n df.loc[\"Min_Actual_Delay\"] = metrics_dict[\"combined_delay\"][\"Actual_Delay\"].loc[metrics_dict[\"actual_min_index\"]]\n df.loc[\"Max_Predicted_Delay\"] = metrics_dict[\"combined_delay\"][\"Predicted_Delay\"].loc[\n metrics_dict[\"predicted_max_index\"]]\n df.loc[\"Min_Predicted_Delay\"] = metrics_dict[\"combined_delay\"][\"Predicted_Delay\"].loc[\n metrics_dict[\"predicted_min_index\"]]\n df.loc[\"Mean_Absolute_Error\"] = metrics_dict[\"MAE\"]\n df.loc[\"R2\"] = metrics_dict[\"R2\"]\n df.loc[\"Median_Absolute_Error\"] = metrics_dict[\"MEDAE\"]\n df.loc[\"Root_Mean_Squared_Error\"] = metrics_dict[\"RMSE\"]\n df.loc[\"Mean_Squared_Log_Error\"] = metrics_dict[\"MSLE\"]\n df = df.rename(index={\"Actual_Delay\": \"Actual_Delay_Mean\", \"Predicted_Delay\": \"Predicted_Delay_Mean\",\n \"Difference_In_Delay\": \"Difference_In_Delay_Mean\"})\n return df", "def _measurement_update(self):\n pass", "def create_metric(self) -> EvalMetric:\n pass", "async def update_trend_data(self, dt=None):\n for scale in valid_scales:\n await self.get_trend_data(scale, dt)", "def send_metrics(timestamp: Optional[float] = None) -> bool:\n\n def new_point(metric_name: str, result: float):\n series = monitoring_v3.types.TimeSeries()\n series.metric.type = f\"custom.googleapis.com/{metric_name}\"\n\n point = series.points.add()\n point.interval.end_time.seconds = now\n\n if isinstance(result, float):\n point.value.double_value = result\n else:\n point.value.int64_value = result\n return series\n\n now = int(time.time())\n prev_minute_tstamp = timestamp or (now - (now % 60) - 60)\n metrics_pattern = f\"{Monitoring.ACC_PREFIX}_{prev_minute_tstamp}_*\"\n monitoring_keys = redis_client.keys(metrics_pattern)\n all_series = []\n for metric_key in monitoring_keys:\n raw_value = redis_client.get(metric_key)\n values: List[str] = raw_value.split(\"|\") # type: ignore\n metric_name = values.pop(0) # metric name\n op = values.pop(0) # operation - SUM or AVG\n typ = values.pop(0) # INT or FLOAT\n if typ == \"INT\":\n result = sum(map(int, values))\n if op == \"AVG\":\n result = result // len(values)\n else:\n result = sum(map(float, values)) # type: ignore\n if op == \"AVG\":\n result = result / len(values) # type: ignore\n\n all_series.append(new_point(metric_name, result))\n if op == \"AVG\": # create count for AVG metric too\n all_series.append(new_point(f\"{metric_name}_COUNT\", len(values)))\n\n try:\n monitor_client.create_time_series(project_path, all_series)\n except InvalidArgument:\n logging.exception(\"mark_point failed\")\n return False\n else:\n return True", "def conform_input_data(rowdict):\n # rowdict['Value'] = float(rowdict['Value'])\n rowdict['TimeStamp'] = TS_to_date(rowdict['TimeStamp'][:19])\n for floatcolumn in ['LowPx','OpenPx','ClosePx','QuoteCount','HighPx','TradeCount']:\n if floatcolumn in rowdict:\n rowdict[floatcolumn] = float(rowdict[floatcolumn])\n return rowdict", "def _calculate_custom_data(self):\n if self.limit is not None:\n self.data['pct'] = self.usage * 100.0 / self.limit\n if self.units == 'hours':\n self.time = timedelta(hours=self.usage)\n self.data['name'] = self.id", "def update_tick(self, tick: TickData):\n new_minute = False\n self.last_price = tick.last_price\n self.open_interest = tick.open_interest\n self.volume = tick.volume\n\n # 更新均价线\n self.molecule = self.molecule + tick.last_price * tick.volume\n self.denominator = self.denominator + tick.volume\n try:\n self.average_price = self.molecule / self.denominator\n except ZeroDivisionError:\n self.average_price = tick.last_price\n\n if self.last_volume is None:\n self.last_volume = tick.volume\n if self.local_symbol is None:\n self.local_symbol = tick.local_symbol\n if not self.bar:\n new_minute = True\n elif self.bar.datetime.minute != tick.datetime.minute:\n self.bar.datetime = self.bar.datetime.replace(\n second=0, microsecond=0\n )\n self.bar.interval = 1\n event = Event(type=EVENT_BAR, data=self.bar)\n self.rpo.put(event)\n [self.update_bar(x, getattr(self, \"min_{}_bar\".format(x)), self.bar) for x in self.XMIN]\n new_minute = True\n if new_minute:\n if self.app.config.get(\"SHARED_FUNC\"):\n shared = SharedData(last_price=round(self.last_price, 2), datetime=tick.datetime,\n local_symbol=self.local_symbol,\n open_interest=self.open_interest, average_price=round(self.average_price, 2),\n volume=self.volume - self.last_volume, gateway_name=tick.gateway_name)\n event = Event(type=EVENT_SHARED, data=shared)\n self.rpo.put(event)\n self.last_volume = tick.volume\n\n self.bar = BarData(\n symbol=tick.symbol,\n exchange=tick.exchange,\n datetime=tick.datetime,\n gateway_name=tick.gateway_name,\n open_price=tick.last_price,\n high_price=tick.last_price,\n low_price=tick.last_price,\n close_price=tick.last_price,\n )\n else:\n self.bar.high_price = max(self.bar.high_price, tick.last_price)\n self.bar.low_price = min(self.bar.low_price, tick.last_price)\n self.bar.close_price = tick.last_price\n self.bar.datetime = tick.datetime\n\n if self.last_tick:\n volume_change = tick.volume - self.last_tick.volume\n self.bar.volume += max(volume_change, 0)\n self.last_tick = tick", "def _get_measurements_with_derived_metrics(self, measurements):\n\n now = time.time()\n\n def metrics_available(*names):\n return all(name in self._event_names and name in measurements\n and name in self._prev_measurements for name in names)\n\n def delta(*names):\n return [measurements[name] - self._prev_measurements[name] for name in names]\n\n # if specific pairs are available calculate derived metrics\n if self._prev_measurements is not None:\n time_delta = now - self._prev_ts\n\n if metrics_available(MetricName.INSTRUCTIONS, MetricName.CYCLES):\n inst_delta, cycles_delta = delta(MetricName.INSTRUCTIONS,\n MetricName.CYCLES)\n if cycles_delta > 0:\n measurements[DerivedMetricName.IPC] = float(inst_delta) / cycles_delta\n\n if time_delta > 0:\n measurements[DerivedMetricName.IPS] = float(inst_delta) / time_delta\n\n if metrics_available(MetricName.INSTRUCTIONS, MetricName.CACHE_MISSES):\n inst_delta, cache_misses_delta = delta(MetricName.INSTRUCTIONS,\n MetricName.CACHE_MISSES)\n if inst_delta > 0:\n measurements[DerivedMetricName.CACHE_MISSES_PER_KILO_INSTRUCTIONS] = \\\n float(cache_misses_delta) * 1000 / inst_delta\n\n if metrics_available(MetricName.CACHE_REFERENCES, MetricName.CACHE_MISSES):\n cache_ref_delta, cache_misses_delta = delta(MetricName.CACHE_REFERENCES,\n MetricName.CACHE_MISSES)\n if cache_ref_delta > 0:\n cache_hits_count = cache_ref_delta - cache_misses_delta\n measurements[DerivedMetricName.CACHE_HIT_RATIO] = (\n float(cache_hits_count) / cache_ref_delta)\n\n self._prev_measurements = measurements\n self._prev_ts = now\n\n return measurements", "def _bs_data_transform(self, qtls, qtlhdrs):\n data0 = self._data_transform()\n data0 = data0[data0[\"origin\"] != \"total\"]\n data1 = self._get_quantiles_by_devp(qtls, qtlhdrs)\n data1 = data1[data1[\"origin\"] != \"total\"]\n data = data0.merge(data1, on=[\"origin\", \"dev\"], how=\"left\")\n\n # Remove qtlhdrs values where rectype==\"actual\".\n for qtlhdr in qtlhdrs:\n data[qtlhdr] = np.where(\n data[\"rectype\"].values == \"actual\", np.NaN, data[qtlhdr].values\n )\n\n # Determine the first forecast period by origin, and set q-fields to actuals.\n increment = np.unique(self.ldfs.index[1:] - self.ldfs.index[:-1])[0]\n data[\"_ff\"] = np.where(\n data[\"rectype\"].values == \"forecast\",\n data[\"dev\"].values, data[\"dev\"].values.max() + increment\n )\n data[\"_minf\"] = data.groupby([\"origin\"])[\"_ff\"].transform(\"min\")\n for hdr in qtlhdrs:\n data[hdr] = np.where(\n np.logical_and(\n data[\"rectype\"].values == \"forecast\",\n data[\"_minf\"].values == data[\"dev\"].values\n ), data[\"loss\"].values, data[hdr].values\n )\n\n data = data.drop([\"_ff\", \"_minf\"], axis=1).reset_index(drop=True)\n dfv = data[[\"origin\", \"dev\", \"rectype\", \"loss\"]]\n dfl = data[[\"origin\", \"dev\", \"rectype\", qtlhdrs[0]]]\n dfu = data[[\"origin\", \"dev\", \"rectype\", qtlhdrs[-1]]]\n dfl[\"rectype\"] = qtlhdrs[0]\n dfl = dfl.rename({qtlhdrs[0]: \"loss\"}, axis=1)\n dfu[\"rectype\"] = qtlhdrs[-1]\n dfu = dfu.rename({qtlhdrs[-1]: \"loss\"}, axis=1)\n return(pd.concat([dfv, dfl, dfu]).sort_index().reset_index(drop=True))", "def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result", "def update_tick(self, tick: TickData):\n new_minute = False\n\n # Filter tick data with 0 last price\n if not tick.last_price:\n return\n\n if not self.bar:\n new_minute = True\n elif self.bar.datetime.minute != tick.datetime.minute:\n self.bar.datetime = self.bar.datetime.replace(\n second=0, microsecond=0\n )\n self.bar.datetime = self.local_to_timezone(self.bar.datetime)\n self.on_bar(self.bar)\n\n new_minute = True\n\n if new_minute:\n self.bar = BarData(\n symbol=tick.symbol,\n exchange=tick.exchange,\n interval=Interval.MINUTE,\n datetime=tick.datetime,\n gateway_name=tick.gateway_name,\n open_price=tick.last_price,\n high_price=tick.last_price,\n low_price=tick.last_price,\n close_price=tick.last_price,\n open_interest=tick.open_interest\n )\n else:\n self.bar.high_price = max(self.bar.high_price, tick.last_price)\n self.bar.low_price = min(self.bar.low_price, tick.last_price)\n self.bar.close_price = tick.last_price\n self.bar.open_interest = tick.open_interest\n self.bar.datetime = tick.datetime\n\n if self.last_tick:\n volume_change = tick.volume - self.last_tick.volume\n self.bar.volume += max(volume_change, 0)\n\n self.last_tick = tick", "def _update_from_data(self, data):\n try:\n self.channelId = data[\"channelId\"]\n except (KeyError, TypeError):\n raise ValueError(\"Foretold data missing or invalid\")\n\n # If floatCdf is not available, we can just keep it as None\n try:\n self.floatCdf = data[\"previousAggregate\"][\"value\"][\"floatCdf\"]\n except (KeyError, TypeError):\n self.floatCdf = None", "def Update(self):\n print(f\"Updating {self.name} from yfinance API...\")\n import yfinance as yf\n import datetime\n stock = yf.Ticker(self._symbol)\n if (self.name == None or self.name == self.symbol) and stock.info is not None:\n if \"shortName\" in stock.info:\n self.name = stock.info['shortName']\n yhistory = stock.history(period=\"max\")\n print(yhistory)\n\n dividends = []\n for date, row in yhistory.iterrows():\n dividend_today = row['Dividends']\n dividends.append((date, dividend_today))\n if dividend_today != 0.:\n while date - dividends[0][0] > datetime.timedelta(days=360):\n dividends.remove(dividends[0])\n else:\n while date - dividends[0][0] > datetime.timedelta(days=370):\n dividends.remove(dividends[0])\n\n annualDividend = 0.\n for dividend in dividends:\n annualDividend += dividend[1]\n \n self.AddSnapshot(price=row['Open'], date=date, dividend=dividend_today, annualDividend=annualDividend)\n #self.AddSnapshot(price=row['Close'], date=date, annualDividend=annualDividend)\n\n try:\n self.short_percent_of_float = stock.info['shortPercentOfFloat']\n except(KeyError):\n self.short_percent_of_float = 0.\n try:\n self.pe_ratio = stock.info['forwardPE']\n except(KeyError, TypeError):\n self.pe_ratio = float('inf')\n\n print(f\"History for {self.name} updated.\")", "def _build_target_quantile_values_op(self):\n batch_size = tf.shape(self._replay.rewards)[0]\n ###### Munchausen-specific\n replay_action_one_hot = tf.one_hot(\n self._replay.actions, self.num_actions, 1., 0., name='action_one_hot')\n # tau * ln pi_k+1 (s')\n replay_next_log_policy = utils.stable_scaled_log_softmax(\n self._replay_next_target_q_values, self.tau, axis=1)\n # tau * ln pi_k+1(s)\n replay_log_policy = utils.stable_scaled_log_softmax(\n self._replay_target_q_values, self.tau, axis=1)\n replay_next_policy = utils.stable_softmax( # pi_k+1(s')\n self._replay_next_target_q_values, self.tau, axis=1)\n\n tau_log_pi_a = tf.reduce_sum( # ln pi_k+1(a|s)\n replay_log_policy * replay_action_one_hot, axis=1)\n\n tau_log_pi_a = tf.clip_by_value(\n tau_log_pi_a, clip_value_min=self.clip_value_min, clip_value_max=0)\n\n munchuasen_term = self.alpha * tau_log_pi_a\n #########\n\n # Shape of rewards: (num_tau_prime_samples x batch_size) x 1.\n rewards = self._replay.rewards[:, None] + munchuasen_term[Ellipsis, None]\n rewards = tf.tile(rewards, [self.num_tau_prime_samples, 1])\n\n is_terminal_multiplier = 1. - tf.cast(self._replay.terminals, tf.float32)\n # Incorporate terminal state to discount factor.\n # size of gamma_with_terminal: (num_tau_prime_samples x batch_size) x 1.\n gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier\n gamma_with_terminal = tf.tile(gamma_with_terminal[:, None],\n [self.num_tau_prime_samples, 1])\n\n # shape: (batch_size * num_tau_prime_samples) x num_actions\n replay_next_policy_ = tf.tile(replay_next_policy,\n [self.num_tau_prime_samples, 1])\n replay_next_log_policy_ = tf.tile(replay_next_log_policy,\n [self.num_tau_prime_samples, 1])\n\n # shape: (batch_size * num_tau_prime_samples) x 1\n replay_quantile_values = tf.reshape(\n self._replay_net_target_quantile_values,\n [batch_size * self.num_tau_prime_samples, self.num_actions])\n\n # shape: (batch_size * num_tau_prime_samples) x num_actions\n weighted_logits = (\n replay_next_policy_ * (replay_quantile_values\n - replay_next_log_policy_))\n\n # shape: (batch_size * num_tau_prime_samples) x 1\n target_quantile_values = tf.reduce_sum(weighted_logits, axis=1,\n keepdims=True)\n\n return rewards + gamma_with_terminal * target_quantile_values", "def convert(self, data, *args, **kwargs):\n\n # all of this is still quite ugly and verrrry specific...\n json_data = {}\n for hit in data[\"hits\"][\"hits\"]:\n # pprint(hit)\n\n # get the PQ\n pq = hit.get(\"_source\", {}).get(\"metadata\", {}).get(\"PanDAQueue\", None)\n if not pq:\n continue\n\n # get the list of all benchmark results\n latest_list = (\n hit.get(\"inner_hits\", {})\n .get(\"most_recent\", {})\n .get(\"hits\", {})\n .get(\"hits\", [])\n )\n if len(latest_list) == 0:\n continue\n\n # get the average of the latest benchmark results.\n # Only results not older than 7d, and a maximum of 50 results (whichever value is hit first).\n # If we have no values more recent than 7d, simply use the last available one (that PQ is probably not online anymore anyway)\n values = []\n for d in latest_list:\n date = datetime.datetime.strptime(\n d.get(\"_source\", {}).get(\"timestamp\", \"\"), \"%Y-%m-%dT%H:%M:%SZ\"\n )\n two_days_ago = datetime.datetime.now() - datetime.timedelta(days=2)\n seven_days_ago = datetime.datetime.now() - datetime.timedelta(days=7)\n\n if date > two_days_ago:\n # we are within the last two days, so we take all the measurements we can get!\n values.append(d)\n elif (date < two_days_ago) and (date > seven_days_ago):\n # we are between 2 and 7 days ago, so take only values if we don't have 25 values already\n if len(values) < 30:\n values.append(d)\n elif date < seven_days_ago:\n # we are further away than 7 days, so take a maximum of 5 values from here if we don't have 5 yet\n if len(values) < 10:\n values.append(d)\n\n to_average = [\n i.get(\"_source\", {})\n .get(\"profiles\", {})\n .get(\"fastBmk\", {})\n .get(\"value\", 0.0)\n for i in values\n ]\n json_data[pq] = {\n \"avg_value\": float(sum(to_average)) / len(to_average),\n \"measurements\": len(to_average),\n }\n # print(len(to_average))\n\n return json_data", "def update_temperature_values(self):\n year = self._current_date.year\n month = self._current_date.month\n\n self.ensure_temperatures(dt.date(year, month, 15))\n self.set_temperature_arrays(dt.date(year, month, 15))", "def test_tag_rates_on_duplicate_metric_per_cost_type(self):\n tag_values_kwargs = [{\"value\": 0.2}]\n cost_model = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [\n {\"metric\": {\"name\": metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR}},\n {\"metric\": {\"name\": metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR}},\n ],\n \"currency\": \"USD\",\n }\n cost_model[\"rates\"][0][\"tag_rates\"] = format_tag_rate(tag_key=\"k1\", tag_values=tag_values_kwargs)\n cost_model[\"rates\"][1][\"tag_rates\"] = format_tag_rate(tag_key=\"k2\", tag_values=tag_values_kwargs)\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=cost_model, context=self.request_context)\n self.assertTrue(serializer.is_valid(raise_exception=True))\n serializer.save()\n serializer.data", "def build_ticker_sample(\n number: int = 10,\n metric_list: List[str] = [\"l1\", \"l2\", \"l3\"],\n):\n\n ticker = Ticker()\n\n # SETUP METADATA\n ticker.metadata.response_datetime.GetCurrentTime()\n ticker.metadata.request_duration.FromNanoseconds(random.randrange(5 * 10 ** 9))\n\n # SETUP EXTRA-DATA\n for i in range(number):\n for metric in metric_list:\n ticker.products[i].metrics[metric] = random.uniform(0.0, 100.0)\n\n return ticker", "def get_quote(self, ticker):\r\n key = 'GLC0GTVKR51SY1V'\r\n quote_url = 'https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=' + ticker.upper() + '&apikey=' + key\r\n key_metrics_url = 'https://www.alphavantage.co/query?function=OVERVIEW&symbol=' + ticker.upper() + '&apikey=' + key\r\n\r\n quote_response = requests.get(quote_url)\r\n string = quote_response.json()\r\n\r\n key_metrics_response= requests.get(key_metrics_url)\r\n metrics_str = key_metrics_response.json()\r\n color_tag = None\r\n\r\n if quote_response and 'Global Quote' in string:\r\n\r\n current_price = round(float(string['Global Quote']['05. price']), 2)\r\n change = round(float(string['Global Quote']['09. change']), 2)\r\n change_pct = string['Global Quote']['10. change percent'][:5] + \"%\"\r\n previous_price = round(float(string['Global Quote']['08. previous close']), 2)\r\n\r\n yearly_high = metrics_str['52WeekHigh']\r\n mark_cap = round(int(metrics_str['MarketCapitalization'])/10E8, 2)\r\n mark_cap_str = str(mark_cap) + \"B\"\r\n\r\n if ticker not in self.holdings:\r\n self.holdings[ticker] = current_price\r\n tuples = [ticker, current_price, change, change_pct, yearly_high, mark_cap_str]\r\n\r\n if current_price > previous_price:\r\n color_tag = 'green'\r\n else:\r\n color_tag = 'red'\r\n self.treeview.insert(parent='', index='end', values=tuples, tags=(color_tag,))\r\n return current_price\r\n else:\r\n return None", "def upload_metrics(metrics_dict, project, dataset, table):\n # Credentials will be loaded from envvar $GOOGLE_APPLICATION_CREDENTIALS.\n bq_client = bigquery.Client(project=project)\n table_ref = bq_client.dataset(dataset).table(table)\n errors = bq_client.insert_rows_json(table_ref, metrics_dict)\n return errors", "def update(self):\n\n self.stats = statistics.get()\n self.ticker = exchangerates.get_ticker()", "def __init__(self, duration, value):\n super().__init__(duration)\n self._value = float(value)", "def SuperTrend(df, period, multiplier, ohlc=['open', 'high', 'low', 'close']):\n\n ATR(df, period, ohlc=ohlc) \n atr = 'ATR_' + str(period) \n st = 'ST_' + str(period) + '_' + str(multiplier) \n stx = 'STX_' + str(period) + '_' + str(multiplier) \n \"\"\" \n SuperTrend Algorithm : \n BASIC UPPERBAND = (HIGH + LOW) / 2 + Multiplier * ATR \n BASIC LOWERBAND = (HIGH + LOW) / 2 - Multiplier * ATR \n FINAL UPPERBAND = IF( (Current BASICUPPERBAND < Previous FINAL UPPERBAND) or (Previous Close > Previous FINAL UPPERBAND)) \n THEN (Current BASIC UPPERBAND) ELSE Previous FINALUPPERBAND) \n FINAL LOWERBAND = IF( (Current BASIC LOWERBAND > Previous FINAL LOWERBAND) or (Previous Close < Previous FINAL LOWERBAND)) \n THEN (Current BASIC LOWERBAND) ELSE Previous FINAL LOWERBAND) \n SUPERTREND = IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close <= Current FINAL UPPERBAND)) THEN \n Current FINAL UPPERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close > Current FINAL UPPERBAND)) THEN \n Current FINAL LOWERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close >= Current FINAL LOWERBAND)) THEN \n Current FINAL LOWERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close < Current FINAL LOWERBAND)) THEN \n Current FINAL UPPERBAND \n \"\"\" \n # Compute basic upper and lower bands \n df['basic_ub'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 + multiplier * df[atr] \n df['basic_lb'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 - multiplier * df[atr]\n\n # Compute final upper and lower bands \n df['final_ub'] = 0.00 \n df['final_lb'] = 0.00 \n for i in range(period, len(df)): \n df['final_ub'].iat[i] = df['basic_ub'].iat[i] if df['basic_ub'].iat[i] < df['final_ub'].iat[i - 1] or df['Close'].iat[i - 1] > df['final_ub'].iat[i - 1] else df['final_ub'].iat[i - 1] \n df['final_lb'].iat[i] = df['basic_lb'].iat[i] if df['basic_lb'].iat[i] > df['final_lb'].iat[i - 1] or df['Close'].iat[i - 1] < df['final_lb'].iat[i - 1] else df['final_lb'].iat[i - 1] \n # Set the Supertrend value \n df[st] = 0.00 \n for i in range(period, len(df)): \n df[st].iat[i] = df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df['Close'].iat[i] <= df['final_ub'].iat[i] else 0\n df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df['Close'].iat[i] > df['final_ub'].iat[i] else 0\n df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df['Close'].iat[i] >= df['final_lb'].iat[i] else 0\n df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df['Close'].iat[i] < df['final_lb'].iat[i] else 0.00 \n # Mark the trend direction up/down \n df[stx] = np.where((df[st] > 0.00), np.where((df[ohlc[3]] < df[st]), 'down', 'up'), np.NaN)\n\n # Remove basic and final bands from the columns \n df.drop(['basic_ub', 'basic_lb', 'final_ub', 'final_lb'], inplace=True, axis=1) \n df.fillna(0, inplace=True)\n\n return df", "def make_metric(name):\n return {\n \"type\": \"Metric\",\n \"name\": name,\n \"value\": \"\",\n \"units\": \"\",\n \"rating\": \"\",\n \"notes\": \"\",\n \"comment\": \"\",\n }", "def _standardise_dtypes_and_units(cube: Cube) -> None:\n\n def as_correct_dtype(obj: ndarray, required_dtype: dtype) -> ndarray:\n \"\"\"\n Returns an object updated if necessary to the required dtype\n\n Args:\n obj:\n The object to be updated\n required_dtype:\n The dtype required\n\n Returns:\n The updated object\n \"\"\"\n if obj.dtype != required_dtype:\n return obj.astype(required_dtype)\n return obj\n\n cube.data = as_correct_dtype(cube.data, get_required_dtype(cube))\n for coord in cube.coords():\n if coord.name() in TIME_COORDS and not check_units(coord):\n coord.convert_units(get_required_units(coord))\n req_dtype = get_required_dtype(coord)\n # ensure points and bounds have the same dtype\n if np.issubdtype(req_dtype, np.integer):\n coord.points = round_close(coord.points)\n coord.points = as_correct_dtype(coord.points, req_dtype)\n if coord.has_bounds():\n if np.issubdtype(req_dtype, np.integer):\n coord.bounds = round_close(coord.bounds)\n coord.bounds = as_correct_dtype(coord.bounds, req_dtype)", "def test_historical_metric_value(self):\n\n class Node:\n my_metric = Metric(Int64)\n\n exp_value = 42\n\n node = Node()\n my_metric = get_metric_object(node, 'my_metric')\n node.my_metric = Historical(exp_value)\n\n self.assertTrue(my_metric.is_historical(node))\n self.assertEqual(node.my_metric, exp_value)", "def generate_metrics_data(metricsquery: List, resultsquery: Dict, deltaminutes: int = 5, Region_name: str = None) -> Dict:\r\n cloudwatch=client('cloudwatch', region_name=Region_name) \r\n paginator = cloudwatch.get_paginator('get_metric_data')\r\n metricsgroup=grouper(metricsquery)\r\n resultsquery['ApiCalls']=0 \r\n for mqs in metricsgroup:\r\n for response in paginator.paginate(MetricDataQueries=mqs, StartTime=datetime.now()-timedelta(minutes=deltaminutes),EndTime=datetime.now()):\r\n for results in response['MetricDataResults']:\r\n resultsquery[results['Id']].append({'results':results})\r\n resultsquery['ApiCalls']+=1\r\n return resultsquery", "def __init__(self, _date: datetime=None, open: float=None, high: float=None, low: float=None, close: float=None, adj_close: float=None, volume: float=None, sma: float=None, ema: float=None, bb_top: float=None, bb_bottom: float=None, percent_b: float=None, rsi: float=None, cci: float=None, trade: float=None, holding: float=None, cash: float=None, value: float=None): # noqa: E501\n self.swagger_types = {\n '_date': datetime,\n 'open': float,\n 'high': float,\n 'low': float,\n 'close': float,\n 'adj_close': float,\n 'volume': float,\n 'sma': float,\n 'ema': float,\n 'bb_top': float,\n 'bb_bottom': float,\n 'percent_b': float,\n 'rsi': float,\n 'cci': float,\n 'trade': float,\n 'holding': float,\n 'cash': float,\n 'value': float\n }\n\n self.attribute_map = {\n '_date': 'date',\n 'open': 'open',\n 'high': 'high',\n 'low': 'low',\n 'close': 'close',\n 'adj_close': 'adj_close',\n 'volume': 'volume',\n 'sma': 'sma',\n 'ema': 'ema',\n 'bb_top': 'bb_top',\n 'bb_bottom': 'bb_bottom',\n 'percent_b': 'percent_b',\n 'rsi': 'rsi',\n 'cci': 'cci',\n 'trade': 'trade',\n 'holding': 'holding',\n 'cash': 'cash',\n 'value': 'value'\n }\n self.__date = _date\n self._open = open\n self._high = high\n self._low = low\n self._close = close\n self._adj_close = adj_close\n self._volume = volume\n self._sma = sma\n self._ema = ema\n self._bb_top = bb_top\n self._bb_bottom = bb_bottom\n self._percent_b = percent_b\n self._rsi = rsi\n self._cci = cci\n self._trade = trade\n self._holding = holding\n self._cash = cash\n self._value = value", "def _update_suggested_precision(self) -> None:\n assert self.registry_entry\n\n device_class = self.device_class\n display_precision = self.suggested_display_precision\n default_unit_of_measurement = (\n self.suggested_unit_of_measurement or self.native_unit_of_measurement\n )\n unit_of_measurement = self.unit_of_measurement\n\n if (\n display_precision is not None\n and default_unit_of_measurement != unit_of_measurement\n and device_class in UNIT_CONVERTERS\n ):\n converter = UNIT_CONVERTERS[device_class]\n\n # Scale the precision when converting to a larger or smaller unit\n # For example 1.1 Wh should be rendered as 0.0011 kWh, not 0.0 kWh\n ratio_log = log10(\n converter.get_unit_ratio(\n default_unit_of_measurement, unit_of_measurement\n )\n )\n ratio_log = floor(ratio_log) if ratio_log > 0 else ceil(ratio_log)\n display_precision = max(0, display_precision + ratio_log)\n\n if display_precision is None and (\n DOMAIN not in self.registry_entry.options\n or \"suggested_display_precision\" not in self.registry_entry.options\n ):\n return\n sensor_options: Mapping[str, Any] = self.registry_entry.options.get(DOMAIN, {})\n if (\n \"suggested_display_precision\" in sensor_options\n and sensor_options[\"suggested_display_precision\"] == display_precision\n ):\n return\n\n registry = er.async_get(self.hass)\n sensor_options = dict(sensor_options)\n sensor_options.pop(\"suggested_display_precision\", None)\n if display_precision is not None:\n sensor_options[\"suggested_display_precision\"] = display_precision\n registry.async_update_entity_options(\n self.entity_id, DOMAIN, sensor_options or None\n )", "def _get_units(self, q) -> unyt.Unit:\n try:\n units = q.units\n except AttributeError:\n units = unyt.dimensionless\n return unyt.Unit(units, registry=self.registry)", "def get_device_property_values(self, **kwargs):\n\n results = self.get_empty_device_properties_dict()\n sweet_spots = kwargs.get('qubit_sweet_spots', {})\n if _device_db_client_module_missing:\n log.warning(\n \"Assembling the dictionary of high-level device \"\n \"property values requires the module 'device-db-client', which \"\n \"was not imported successfully.\")\n elif self.analysis:\n # Get the analysis parameters dictionary\n analysis_params_dict = self.analysis.proc_data_dict[\n 'analysis_params_dict']\n # For RamseyStep, the keys in `analysis_params_dict` are qubit names\n for qubit_name, qubit_results in analysis_params_dict.items():\n # This transition is not stored in RamseyAnalysis, so we must\n # get it from the settings parameters\n transition = self.get_param_value('transition_name',\n qubit=qubit_name)\n node_creator = db_utils.ValueNodeCreator(\n qubits=qubit_name,\n timestamp=self.analysis.timestamps[0],\n sweet_spots=sweet_spots.get(qubit_name),\n transition=transition,\n )\n # T2 Star Time for the exponential decay\n if 'exp_decay' in qubit_results.keys(\n ) and 'T2_star' in qubit_results['exp_decay'].keys():\n results['property_values'].append(\n node_creator.create_node(\n property_type='T2_star',\n value=qubit_results['exp_decay']['T2_star']))\n\n # Updated qubit frequency\n if 'exp_decay' in qubit_results.keys(\n ) and f\"new_{transition}_freq\" in qubit_results[\n 'exp_decay'].keys():\n results['property_values'].append(\n node_creator.create_node(\n property_type='freq',\n value=qubit_results['exp_decay']\n ['new_{transition}_freq']))\n\n if 'T2_echo' in qubit_results.keys():\n results['property_values'].append(\n node_creator.create_node(\n property_type='T2_echo',\n value=qubit_results['T2_echo']))\n return results", "def calculate_metrics(metrics_data: List[Tuple[Metric, DataType]]) -> List[float]:\n pass", "def get_market_trade_holder_and_calculate_metrics_single_ticker_via_celery(tca_request, dummy_market):\n\n #from celery import group\n\n #return group(get_market_trade_holder_via_celery(_tca_request), get_trade_order_holder_via_celery(_tca_request))\n return tca_ticker_loader.calculate_metrics_single_ticker(tca_ticker_loader.get_market_trade_order_holder(tca_request),\n tca_request, dummy_market)", "def __init__(self,\n func=None,\n refresh_period=0,\n window_length=None,\n clean_nans=True,\n sids=None,\n fields=None,\n compute_only_full=True,\n bars='daily',\n downsample=False):\n if func is not None:\n self.compute_transform_value = func\n else:\n self.compute_transform_value = self.get_value\n\n self.clean_nans = clean_nans\n self.compute_only_full = compute_only_full\n # no need to down sample if the bars are already daily\n self.downsample = downsample and (bars == 'minute')\n\n # How many bars are in a day\n self.bars = bars\n if self.bars == 'daily':\n self.bars_in_day = 1\n elif self.bars == 'minute':\n self.bars_in_day = int(6.5 * 60)\n else:\n raise ValueError('%s bars not understood.' % self.bars)\n\n # The following logic is to allow pre-specified sid filters\n # to operate on the data, but to also allow new symbols to\n # enter the batch transform's window IFF a sid filter is not\n # specified.\n if sids is not None:\n if isinstance(sids, (string_types, Integral)):\n self.static_sids = set([sids])\n else:\n self.static_sids = set(sids)\n else:\n self.static_sids = None\n\n self.initial_field_names = fields\n if isinstance(self.initial_field_names, string_types):\n self.initial_field_names = [self.initial_field_names]\n self.field_names = set()\n\n self.refresh_period = refresh_period\n\n check_window_length(window_length)\n self.window_length = window_length\n\n self.trading_days_total = 0\n self.window = None\n\n self.full = False\n # Set to -inf essentially to cause update on first attempt.\n self.last_dt = pd.Timestamp('1900-1-1', tz='UTC')\n\n self.updated = False\n self.cached = None\n self.last_args = None\n self.last_kwargs = None\n\n # Data panel that provides bar information to fill in the window,\n # when no bar ticks are available from the data source generator\n # Used in universes that 'rollover', e.g. one that has a different\n # set of stocks per quarter\n self.supplemental_data = None\n\n self.rolling_panel = None\n self.daily_rolling_panel = None", "def update_metrics(self, metrics, predictions, labels):\n return", "def __init__(self, metrics_params):\n self.met_obs_start_dt = metrics_params['met_obs_start_dt']\n self.met_obs_end_dt = metrics_params['met_obs_end_dt']\n self.num_sats = metrics_params['num_sats']\n self.num_targ = metrics_params['num_targ']\n self.all_targ_IDs = metrics_params['all_targ_IDs']\n self.min_obs_dv_dlnk_req = metrics_params['min_obs_dv_dlnk_req']\n self.latency_calculation_params = metrics_params['latency_calculation_params']\n self.targ_id_ignore_list = metrics_params['targ_id_ignore_list']\n self.aoi_units = metrics_params['aoi_units']\n self.sats_emin_Wh = metrics_params['sats_emin_Wh']\n self.sats_emax_Wh = metrics_params['sats_emax_Wh']\n self.sats_dmin_Gb = metrics_params.get('sats_dmin_Gb',None)\n self.sats_dmax_Gb = metrics_params.get('sats_dmax_Gb',None)\n\n # the amount by which the minimum data volume is allowed to be lower than self.min_obs_dv_dlnk_req\n self.min_obs_dv_dlnk_req_slop = self.min_obs_dv_dlnk_req*0.01\n\n # if two downlink times are within this number of seconds, then they are counted as being at the same time for the purposes of AoI calculation\n self.dlnk_same_time_slop_s = metrics_params['timestep_s'] - 1", "def calculate_op_convert_info(\n self,\n seen_q_op_info: SeenQOpInfo,\n ) -> OpConvertInfo:\n # calculate new op\n maybe_new_op = get_quantized_op(\n seen_q_op_info, self.idx_to_seen_q_op_infos)\n\n # calculate quant infos\n arg_quant_infos, arg_dequant_infos, any_arg_quant_or_dequant_needed = \\\n get_input_args_quant_dequant_info(\n seen_q_op_info, self.tensor_id_to_scale_zp)\n\n # get packed param name, if applicable\n packed_param_name = self._get_packed_param_name(seen_q_op_info)\n\n # calculate scale and zp for output\n # TODO: instead of always doing this if there is an observer,\n # calculate whether this is needed based on the op and dtypes\n additional_kwargs = {}\n needs_scale_zp = converted_func_needs_scale_zp(seen_q_op_info)\n if needs_scale_zp:\n cur_seen_q_op_info = seen_q_op_info\n\n # if this is a start of a fusion pattern, get the observer\n # from the end of the fusion\n is_start_of_fusion = seen_q_op_info.fusion_info and \\\n seen_q_op_info.fusion_info.is_first_element\n if is_start_of_fusion:\n cur_seen_q_op_info = get_seen_q_op_info_of_end_of_fusion(\n seen_q_op_info, self.idx_to_seen_q_op_infos)\n\n output_tensor_infos = cur_seen_q_op_info.output_tensor_infos\n tensor_id = output_tensor_infos[0].id\n scale, zp = self.tensor_id_to_scale_zp[tensor_id]\n additional_kwargs.update({'scale': scale, 'zero_point': zp})\n\n any_arg_kwarg_modification_needed = bool(\n any_arg_quant_or_dequant_needed or\n packed_param_name is not None or\n len(additional_kwargs)\n ) # the cast to bool is to make mypy recognize this as a bool\n\n return maybe_new_op, arg_quant_infos, arg_dequant_infos, \\\n packed_param_name, additional_kwargs, any_arg_quant_or_dequant_needed, \\\n any_arg_kwarg_modification_needed", "def convert(self, value):\n\n\t\tif self.converter is not None:\n\t\t\treturn self.converter(value)\n\t\telif self.units is not None:\n\t\t\tq = Quantity(value)\n\t\t\tq.assert_dimensions(self.units)\n\n\t\t\treturn q\n\t\telse:\n\t\t\treturn value", "def test_is_historical(self):\n\n class Node:\n my_metric = Metric(Int64)\n\n exp_value = 42\n\n node = Node()\n node.my_metric = Historical(exp_value)\n my_metric = get_metric_object(node, 'my_metric')\n tahu_metric = my_metric.tahu_metric(node)\n self.assertTrue(tahu_metric.is_historical)\n self.assertEqual(tahu_metric.long_value, 42)", "def __init__(self, ts_df, time_format=\"%Y-%m-%d %H:%M:%S\", freq='D',\n fill_method='ffill',\n n_test=0, n_val=0,\n hyper_params=None,\n test='adf',\n trend=None,\n seasonal=False,\n seasonal_periods=1,\n **kwds):\n self._ts_df_cols = ['ds', 'y']\n\n self.ts_df = ts_df\n self.time_format = time_format\n self.freq = freq\n self.fill_method = fill_method.lower()\n self.n_test = int(n_test)\n self.n_val = int(n_val)\n self.transform = None\n self._boxcox_lmbda = None\n\n self._mode = ''\n\n self._train_dt = None\n self._test_dt = None\n self._val_dt = None\n\n self.model_fit = None\n self.fittedvalues = None\n self.residuals = None\n self.rmse = 0\n self._gs = tsa.GridSearchClass()\n self.hyper_params = hyper_params\n self.best_model = dict()\n\n \"\"\"\n self.rmse_test = 0\n self.rmse_val = 0\n \"\"\"\n\n self.upper_whisker_res = None\n self.lower_conf_int = None\n self.upper_conf_int = None\n\n self.forecast = None\n self.residuals_forecast = None\n\n self._res_decomp = None\n self._arr_seasonal = None\n self._arr_trend = None\n self._arr_baseline = None\n\n self._test = test\n self._trend = trend\n if self._trend is not None:\n self._trend = self._trend.lower()\n self._seasonal = seasonal\n if isinstance(self._seasonal, str):\n self._seasonal = self._seasonal.lower()\n self._seasonal_periods = seasonal_periods\n\n self._uvts_cls_logger = Logger('uvts_cls')\n\n UVariateTimeSeriesClass.assertions(self)\n # work with ts_df\n self.ts_df = self.ts_df.reset_index()\n self.ts_df.columns = self._ts_df_cols\n self.ts_df['y'] = self.ts_df['y'].apply(np.float64, errors='coerce')\n self.ts_df.set_index('ds', inplace=True)\n self._uvts_cls_logger.info(\n \"Received time series data of range: \" + str(min(self.ts_df.index)) + ' - ' + str(\n max(self.ts_df.index)) + \" and shape: \" + str(self.ts_df.shape))\n\n if not isinstance(self.ts_df.index, pd.DatetimeIndex):\n self._uvts_cls_logger.warning(\"Time conversion required...\")\n self.ts_df = self.ts_df.reset_index()\n try:\n self.ts_df['ds'] = self.ts_df['ds'].apply(\n lambda x: datetime.datetime.strptime(\n str(x).translate({ord('T'): ' ', ord('Z'): None})[:-1],\n self.time_format))\n except ValueError as e:\n self._uvts_cls_logger.warning(\"Zulu time conversion not successful: {}\".format(e))\n self._uvts_cls_logger.warning(\"Will try without assuming zulu time...\")\n try:\n self.ts_df['ds'] = self.ts_df['ds'].apply(\n lambda x: datetime.datetime.strptime(str(x), self.time_format))\n except ValueError as e:\n self._uvts_cls_logger.info(\"Time conversion not successful. Check your time_format: {}\".format(e))\n sys.exit(\"STOP\")\n else:\n self._uvts_cls_logger.info(\"Time conversion successful!\")\n else:\n self._uvts_cls_logger.info(\"Time conversion successful!\")\n # set index\n self.ts_df.set_index('ds', inplace=True)\n #\n self.ts_df.index = pd.to_datetime(self.ts_df.index)\n self.ts_df.sort_index(inplace=True)\n # resample\n self.ts_resample()\n UVariateTimeSeriesClass.assertions(self, post=True)\n #\n if self.n_val > len(self.ts_df) - self.n_test:\n self.n_val = len(self.ts_df) - self.n_test\n\n if self.n_test == 0 and self.n_val == 0:\n self._mode = 'forecast'\n elif self.n_test > 0:\n self._mode = 'test'\n elif self.n_test == 0 and self.n_val > 0:\n self._mode = 'validate'\n \n # delegate just for good programming style here\n super(UVariateTimeSeriesClass, self).__init__(**kwds)", "def quantify(self, samples=None):\n from scipy import stats\n\n if samples:\n ratios = [q['ratio'] for s,q in self.quantification.items() if s in samples]\n else:\n ratios = [q['ratio'] for q in self.quantification.values()]\n\n # Filter out 0 and NaN\n ratios = np.array([r for r in ratios if r != 0])\n ratios = ratios[~np.isnan(ratios)]\n\n log_ratios = np.log(ratios)\n\n t, p = stats.ttest_1samp(log_ratios, 0)\n\n self.avg_ratio = np.mean(ratios)\n self.p_value = p", "async def test_precision(hass: HomeAssistant) -> None:\n assert await async_setup_component(\n hass,\n \"sensor\",\n {\n \"sensor\": [\n {\n \"platform\": \"statistics\",\n \"name\": \"test_precision_0\",\n \"entity_id\": \"sensor.test_monitored\",\n \"state_characteristic\": \"mean\",\n \"sampling_size\": 20,\n \"precision\": 0,\n },\n {\n \"platform\": \"statistics\",\n \"name\": \"test_precision_3\",\n \"entity_id\": \"sensor.test_monitored\",\n \"state_characteristic\": \"mean\",\n \"sampling_size\": 20,\n \"precision\": 3,\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n for value in VALUES_NUMERIC:\n hass.states.async_set(\n \"sensor.test_monitored\",\n str(value),\n {ATTR_UNIT_OF_MEASUREMENT: UnitOfTemperature.CELSIUS},\n )\n await hass.async_block_till_done()\n\n mean = sum(VALUES_NUMERIC) / len(VALUES_NUMERIC)\n state = hass.states.get(\"sensor.test_precision_0\")\n assert state is not None\n assert state.state == str(int(round(mean, 0)))\n state = hass.states.get(\"sensor.test_precision_3\")\n assert state is not None\n assert state.state == str(round(mean, 3))", "async def update_measures(self):\n\n def function():\n return self._api.get_measures()\n\n self._measures = await self.call(function, throttle_domain=\"update_measures\")\n\n return self._measures", "def get_metric(self, metric, existing_dict=None):\n metric_key, metric_type, metric_name, metric_help = metric\n metric_dict = {\n 'name': metric_name,\n 'type': metric_type,\n 'help': metric_help,\n 'values': OrderedDict()\n }\n values = self.r.hgetall(metric_key) # new values\n # print \"values: %r\" % values\n metric_dict['values'] = values\n\n if existing_dict:\n # we're updating a metric we've already seen\n # print \"existing dict: %r\" % existing_dict\n for value in values:\n # print \"checking value: %r\" % value\n # value = json.loads(value)\n if value in existing_dict['values']:\n if metric_type == 'counter' or metric_type == 'histogram':\n # Combine the values if it's a counter or histogram\n # TODO: sort histogram buckets\n # TODO: append _bucket to histogram bucket names\n existing_dict['values'][value] = float(\n values[value]) + float(\n existing_dict['values'][value])\n elif metric_type == 'gauge':\n # use the last value we see for a gauge - # TODO: is there a better way? we could average it\n existing_dict['values'][value] = float(values[value])\n else:\n existing_dict['values'][value] = float(values[value])\n metric_dict['values'] = existing_dict['values']\n\n if metric_type == 'histogram':\n # json decode all of the labels\n samples = [json.loads(x, object_pairs_hook=OrderedDict) for x in metric_dict['values']]\n # we need to sort the values by the bucket labeled \"le\"\n sorted_keys = sorted(samples, key=lambda b: b['le'])\n # and then we need to store the values again json encoded\n vals = metric_dict['values']\n metric_dict['values'] = OrderedDict()\n for k in sorted_keys:\n kn = json.dumps(k, sort_keys=True)\n metric_dict['values'][kn] = vals[kn]\n\n return metric_dict", "def ticker_wrapper(ticker):", "def __init__(self, params, tech, dt):\n # financials_df = financials.fin_inputs\n ValueStream.__init__(self, tech, 'FR', dt)\n # self.fr_energyprice = params['energyprice']\n self.krd_avg = params['kd']\n self.kru_avg = params['ku']\n self.combined_market = params['CombinedMarket'] # boolean: true if storage bid as much reg up as reg down\n self.price = params['energy_price'] # TODO: require RT market price instead of DA\n self.p_regu = params['regu_price']\n self.p_regd = params['regd_price']\n self.growth = params['growth']\n self.energy_growth = params['energyprice_growth']\n self.duration = params['duration']\n\n self.variable_names = {'regu_c', 'regd_c', 'regu_d', 'regd_d'}\n self.variables = pd.DataFrame(columns=self.variable_names)\n # regulation up due to charging, regulation down due to charging, regulation up due to discharging, regulation down due to discharging", "def _metrics_to_series(metrics) -> pd.Series:\n row = pd.Series(metrics[\"latency\"])\n for index, value in row.items():\n row[index] = value / 1000000.0\n\n row[\"qps\"] = metrics[\"qps\"]\n row[\"completed_queries\"] = metrics[\"completed_queries\"]\n row[\"failed_queries\"] = metrics[\"failed_queries\"]\n row[\"scenario\"] = metrics[\"scenario\"]\n\n if \"actual_qps\" in metrics:\n row[\"actual_qps\"] = metrics[\"actual_qps\"]\n\n return row", "def __init__(self):\n super().__init__()\n self.metric = 'FMEASR'", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def test_temperature_to_metric(self):\n self.assertEqual(\n 25,\n METRIC_SYSTEM.temperature(25, METRIC_SYSTEM.temperature_unit))\n self.assertEqual(\n 26.7,\n METRIC_SYSTEM.temperature(80, IMPERIAL_SYSTEM.temperature_unit))", "def _convert_value(self, value, unit, axis):\n if hasattr(value, 'units'):\n return value.to(unit).magnitude\n else:\n return self._reg.Quantity(value, axis.get_units()).to(unit).magnitude", "def _add_converted_units(self, dataframe, parameter, key='VALUE'):\n convert_unit = self.parameters.get_converter(parameter)\n try:\n dataframe[key] = dataframe['DATA_VALUE'].apply(convert_unit)\n except KeyError:\n log.warn(\"Missing 'VALUE': no unit conversion.\")\n else:\n dataframe.unit = self.parameters.unit(parameter)", "def _float_metric_value(metric):\n return metric.result().numpy().astype(float)", "def update(self):\n self.ticker.update()\n self._values = self.ticker.values", "def __on_update_bookticker(self, action, bookticker):\n self.best_bid_price = float(bookticker['b'])\n self.best_ask_price = float(bookticker['a'])", "def prepareQuery(self, qid):\r\n \r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n\r\n if self.granularity == 'day':\r\n extractTime = \"TO_CHAR(t.START_DATE, 'yyyy,mm,dd'), TO_CHAR(t.END_DATE, 'yyyy,mm,dd')\"\r\n elif self.granularity == 'year':\r\n extractTime = \"EXTRACT(YEAR FROM t.START_DATE), EXTRACT(YEAR FROM t.END_DATE)\"\r\n \r\n cursor.execute(\"SELECT t.TYPE, t.GEOMETRY.Get_WKT(), \" + extractTime + \",\" + \\\r\n\"t.DATE_TYPE, t.Z_MIN, t.Z_MAX FROM \" + self.queriesTable + \"\"\" t \r\nWHERE id = \"\"\" + qid + \"\"\" AND dataset = '\"\"\" + self.dataset.lower() + \"'\")\r\n\r\n self.qtype, self.wkt, self.start_date, self.end_date, self.timeType, self.ozmin, self.ozmax = cursor.fetchall()[0]\r\n\r\n if self.wkt is not None:\r\n self.wkt = str(self.wkt)\r\n connection.close()\r\n \r\n # Setting up the missing variables along with transformations to the time encoding. \r\n if self.granularity == 'day':\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n self.end_date = map(int, self.end_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], \r\n self.start_date[1], self.start_date[2]) * self.scale, \r\n reader.daySinceEpoch(self.end_date[0], \r\n self.end_date[1], self.end_date[2]) * self.scale]]\r\n elif self.end_date is None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], self.start_date[1], self.start_date[2]) * self.scale, None]]\r\n else:\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n times = [[self.start_date * self.scale, self.end_date * self.scale]]\r\n elif self.end_date is None:\r\n times = [[self.start_date * self.scale, None]]\r\n\r\n if self.ozmin is None or self.ozmax is None: #no selectivity on z\r\n zmin = int(round((self.minz - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.maxz - self.offz)/self.scalez, 0))\r\n else:\r\n zmin = int(round((self.ozmin - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.ozmax - self.offz)/self.scalez, 0))\r\n\r\n # Preparing the different types of queries: Space and space - time\r\n continuous = True\r\n if self.wkt:\r\n if self.qtype.replace(' ', '').lower() != 'nn-search':\r\n ordinates = list(loads(self.wkt).exterior.coords)\r\n else:\r\n ordinates = list(loads(self.wkt).coords)\r\n \r\n if self.case == 1: #lxyt\r\n geometry = Polygon(self.list2ScaleOffset(ordinates)).wkt\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[0] #0, 0\r\n else:\r\n coarser = self.params[1] #4, 4\r\n \r\n elif self.case == 2: #lxyzt\r\n geometry = Polygon3D(Polygon(self.list2ScaleOffset(ordinates)), zmin, zmax)\r\n\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[2] #4, 4\r\n else:\r\n coarser = self.params[3] #3, 3\r\n\r\n elif self.case == 3: #dxyt\r\n geom = Polygon(self.list2ScaleOffset(ordinates)) \r\n if times[0][1] is None:\r\n continuous = False\r\n times[0][1] = times[0][0]\r\n coarser = self.params[4] #1, 8\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n continuous = False\r\n coarser = self.params[5] #-2, 1\r\n else:\r\n coarser = self.params[5] - 7\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[6] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[7] #3, 8\r\n \r\n if self.timeType == 'discrete' and (self.start_date is not None) and (self.end_date is not None):\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1]) \r\n \r\n elif self.case == 4: #dxyzt\r\n geom = Polygon(self.list2ScaleOffset(ordinates))\r\n if times[0][1] == None:\r\n continuous = False\r\n coarser = self.params[8] #4, 9\r\n times[0][1] = times[0][0]\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n coarser = self.params[9] #0, 2\r\n else:\r\n coarser = self.params[9] - 4\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[10] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[11] #4, 9\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else:\r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n else: #time queries\r\n if self.case == 1:\r\n geometry = []\r\n \r\n elif self.case == 2:\r\n geometry = []\r\n \r\n elif self.case == 3:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny), (self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n \r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[12] #3, 7\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[13] #0, 3\r\n else:\r\n coarser = self.params[14] #3, 8\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1])\r\n\r\n elif self.case == 4:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny),(self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[15] #4, 12\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[16] #1, 3\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[17] #4, 11\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else: \r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n\r\n \"\"\"The final lines have to do with the way of posing the query to the \r\n database. Two options are possible:\r\n (a) sql: A SQL query is posed to the database. The number of ranges is\r\n limited by a maximum number.\r\n (b) join: The table is joined explicitly with a table containing the \r\n ranges.\"\"\"\r\n if geometry == []:\r\n mortonWhere, self.mortonJoinWhere, ranges, rangeTab, morPrep, insert, Levels = ('', '', 0, None, 0, 0, 0)\r\n else:\r\n if self.method == 'join':\r\n rangeTab = (self.rangeTable + qid).upper()\r\n ranges, morPrep, insert, Levels = self.join(geometry, coarser, rangeTab, continuous)\r\n mortonWhere = self.mortonJoinWhere\r\n elif self.method == 'sql':\r\n rangeTab, insert = None, 0\r\n mortonWhere, ranges, morPrep, Levels = self.sql(geometry, coarser, continuous)\r\n \r\n # if deep the time is in the morton code\r\n if self.integration == 'deep' or (self.start_date is None and self.end_date is None and self.integration == 'loose'): \r\n timeWhere = ''\r\n elif self.integration == 'loose': \r\n timeWhere = whereClause.addTimeCondition(times, 'time', self.timeType)\r\n \r\n return whereClause.getWhereStatement([timeWhere, mortonWhere]), ranges, morPrep, insert, Levels, rangeTab", "def __new__(cls, value_str):\n\n value_str = value_str.strip()\n if value_str[0] == '[' and value_str[-1] == ']':\n value = literal_eval(value_str)\n else:\n value_str = value_str.split()\n assert len(value_str) >= 1\n\n try:\n value = float(value_str[0])\n except ValueError:\n value = value_str[0]\n\n #False positive, pylint does not see attributes\n #defined in __new__\n #pylint: disable=attribute-defined-outside-init\n if isinstance(value, float):\n if len(value_str) != 1:\n if len(value_str) == 4:\n unit = Unit(value_str[3])\n else:\n unit = Unit('')\n\n if value_str[1] == '+/-':\n plus_error = float(value_str[2])\n minus_error = plus_error\n else:\n plus_error = float(value_str[1])\n minus_error = abs(float(value_str[2]))\n result = super().__new__(cls, value, unit)\n result.plus_error = plus_error\n result.minus_error = minus_error\n return result", "async def test_float_data_type(hass, mock_hub):\n register_config = {\n CONF_COUNT: 2,\n CONF_REGISTER_TYPE: CALL_TYPE_REGISTER_HOLDING,\n CONF_DATA_TYPE: DATA_TYPE_FLOAT,\n CONF_SCALE: 1,\n CONF_OFFSET: 0,\n CONF_PRECISION: 5,\n }\n await run_test(\n hass,\n mock_hub,\n register_config,\n SENSOR_DOMAIN,\n register_words=[16286, 1617],\n expected=\"1.23457\",\n )", "async def test_floats_get_rounded_correctly(hass, mock_hub):\n register_config = {\n CONF_COUNT: 1,\n CONF_DATA_TYPE: DATA_TYPE_INT,\n CONF_SCALE: 1.5,\n CONF_OFFSET: 0,\n CONF_PRECISION: 0,\n }\n await run_test(\n hass,\n mock_hub,\n register_config,\n SENSOR_DOMAIN,\n register_words=[1],\n expected=\"2\",\n )", "def line_to_metric(line):\n # convert line of csv to list\n # sigh, the first field is not quoted\n (date, time, fields) = line.split(' ', 2)\n metric = {'timestamp':\" \".join((date, time))}\n fields = fields.split(',')\n fields = [field.strip() for field in fields]\n # convert list of k=v to dict\n fields = [field.split('=') for field in fields]\n metric.update(dict(fields))\n # normalize keys, deserialize values, add derived values\n # would be more rigorous to have a seprate table for derived values\n metric['callerid'] = metric.pop('CALLERID(number)')\n metric['uniqueid'] = metric.pop('UNIQUEID')\n metric['channel'] = metric.pop('CHANNEL')\n # deserialize values\n metric['timestamp'] = datetime.datetime(\n *map(int, re.split('[^\\d]', metric['timestamp'])[:-1]))\n # split ext from eg SIP/668-000002f1 SIP/callcentric-default-000002f3\n (_proto, extension) = metric['channel'].split('/')\n extension = '-'.join(extension.split('-')[:-1])\n metric['channel_extension'] = extension\n return metric", "def __init__(self, tsa, tsastats=None):\n self.__data = {}\n self.__keys = tuple(tsa.keys())\n self.__value_keynames = tuple(tsa.value_keynames)\n for value_keyname in self.__value_keynames:\n try:\n self.__data[value_keyname] = Quantile(tsa, value_keyname, tsastats=tsastats)\n except QuantileError as exc:\n logging.exception(exc)\n logging.error(\"skipping value_key %s\", value_keyname)", "def calculate_measurement_value(data_model, metric: Dict, sources, scale: Scale) -> Optional[str]:\n\n def percentage(numerator: int, denominator: int, direction: Direction) -> int:\n \"\"\"Return the rounded percentage: numerator / denominator * 100%.\"\"\"\n if denominator == 0:\n return 0 if direction == \"<\" else 100\n return int((100 * Decimal(numerator) / Decimal(denominator)).to_integral_value(ROUND_HALF_UP))\n\n def value_of_entities_to_ignore(source) -> int:\n \"\"\"Return the value of the ignored entities, i.e. entities that have marked as fixed, false positive or\n won't fix. If the entities have a measured attribute, return the sum of the measured attributes of the ignored\n entities, otherwise return the number of ignored attributes. For example, if the metric is the amount of ready\n user story points, the source entities are user stories and the measured attribute is the amount of story\n points of each user story.\"\"\"\n entities = source.get(\"entity_user_data\", {}).items()\n ignored_entities = [\n entity[0] for entity in entities if entity[1].get(\"status\") in (\"fixed\", \"false_positive\", \"wont_fix\")]\n source_type = metric[\"sources\"][source[\"source_uuid\"]][\"type\"]\n if attribute := get_measured_attribute(data_model, metric[\"type\"], source_type):\n entity = data_model[\"sources\"][source_type][\"entities\"].get(metric[\"type\"], {})\n attribute_type = get_attribute_type(entity, attribute)\n convert = dict(float=float, integer=int, minutes=int)[attribute_type]\n value = sum(\n convert(entity[attribute]) for entity in source[\"entities\"] if entity[\"key\"] in ignored_entities)\n else:\n value = len(ignored_entities)\n return int(value)\n\n if not sources or any(source[\"parse_error\"] or source[\"connection_error\"] for source in sources):\n return None\n values = [int(source[\"value\"]) - value_of_entities_to_ignore(source) for source in sources]\n addition = metric[\"addition\"]\n add = dict(max=max, min=min, sum=sum)[addition]\n if scale == \"percentage\":\n metric_type = data_model[\"metrics\"][metric[\"type\"]]\n direction = metric.get(\"direction\") or metric_type[\"direction\"]\n totals = [int(source[\"total\"]) for source in sources]\n if addition == \"sum\":\n values, totals = [sum(values)], [sum(totals)]\n values = [percentage(value, total, direction) for value, total in zip(values, totals)]\n return str(add(values)) # type: ignore", "def apply_filter_metrics(self, pack_nr, filter_metrics):\n current_pack_metrics = ast.literal_eval(self.list_pack[pack_nr]['metrics'])\n\n for i in filter_metrics:\n if i in current_pack_metrics:\n filter_metrics[i] = current_pack_metrics[i]\n\n self.list_pack[pack_nr]['metrics'] = filter_metrics", "def test_create_derived_metric(self):\n pass", "def __q_gxg__(series, q, tmin=None, tmax=None, by_year=True):\n if tmin is not None:\n series = series.loc[tmin:]\n if tmax is not None:\n series = series.loc[:tmax]\n series = series.resample('d').median()\n if by_year:\n return (series\n .resample('a')\n .apply(lambda s: s.quantile(q))\n .mean()\n )\n else:\n return series.quantile(q)\n\n # noinspection PyIncorrectDocstring,PyIncorrectDocstring", "def calc_fgs_cr_mag_and_err(self):\n\n # Set values based on guider\n if self.guider == 1:\n throughput_dict = THROUGHPUT_G1\n cr_conversion = CR_CONVERSION_G1\n elif self.guider == 2:\n throughput_dict = THROUGHPUT_G2\n cr_conversion = CR_CONVERSION_G2\n else:\n raise ValueError(\"Guider value must be an integer either 1 or 2\")\n\n # Calculate magnitude/countrate\n self.fgs_countrate, self.fgs_magnitude, self.band_dataframe = \\\n self._calc_fgs_cr_mag(to_compute='both', band_series=self._all_calculated_mag_series,\n guider_throughput=throughput_dict, guider_gain=cr_conversion,\n return_dataframe=True)\n\n # Band Magnitude Error\n cr_err_list = []\n mag_err_list = []\n for band in self._present_calculated_mags:\n band_data_with_err = copy.deepcopy(self._all_calculated_mag_series)\n band_data_with_err[band] += self._all_calculated_mag_err_series[band+'Err']\n cr_band_err, mag_band_err = self._calc_fgs_cr_mag(to_compute='both',\n band_series=band_data_with_err,\n guider_throughput=throughput_dict,\n guider_gain=cr_conversion)\n cr_err_list.append(cr_band_err - self.fgs_countrate)\n mag_err_list.append(mag_band_err - self.fgs_magnitude)\n\n # Throughput Error - 5%\n new_throughput = {key: val * 1.05 for key, val in throughput_dict.items()}\n cr_tput_err, mag_tput_err = self._calc_fgs_cr_mag(to_compute='both',\n band_series=self._all_calculated_mag_series,\n guider_throughput=new_throughput,\n guider_gain=cr_conversion)\n cr_err_list.append(cr_tput_err - self.fgs_countrate)\n mag_err_list.append(mag_tput_err - self.fgs_magnitude)\n\n # Gain Error - 5%\n new_gain = cr_conversion * 1.05\n cr_gain_err, mag_gain_err = self._calc_fgs_cr_mag(to_compute='both',\n band_series=self._all_calculated_mag_series,\n guider_throughput=throughput_dict,\n guider_gain=new_gain)\n cr_err_list.append(cr_gain_err - self.fgs_countrate)\n\n # Integral Error - 5%\n cr_err_list.append(self.fgs_countrate * 0.05)\n mag_err_list.append(self.fgs_magnitude * 0.05)\n\n # Combine Error\n self.fgs_countrate_err = np.sqrt(np.sum(i**2 for i in cr_err_list))\n self.fgs_magnitude_err = np.sqrt(np.sum(i**2 for i in mag_err_list))\n\n return self.fgs_countrate, self.fgs_countrate_err, self.fgs_magnitude, self.fgs_magnitude_err", "def do_transform(self):\r\n if not self.transform:\r\n return\r\n try:\r\n self.latest_value = utils.Transform(\r\n expr=self.transform, value=self.latest_value,\r\n timedelta=self.time_between_updates().total_seconds()).result()\r\n except (TypeError, ValueError):\r\n logger.warn(\"Invalid transformation '%s' for metric %s\",\r\n self.transfrom, self.pk)\r\n self.transform = ''", "def merge_tickers(\n ticker1: Ticker,\n ticker2: Ticker,\n update_only: bool = False,\n):\n\n if update_only is True:\n for ticker2_product in ticker2.products:\n if ticker2_product in ticker1.products:\n ticker1.products[ticker2_product].metrics.update(\n ticker2.products[ticker2_product].metrics\n )\n else:\n for ticker2_product in ticker2.products:\n ticker1.products[ticker2_product].metrics.update(\n ticker2.products[ticker2_product].metrics\n )", "def __q_gxg__(series, q, tmin=None, tmax=None, by_year=True):\n if tmin is not None:\n series = series.loc[tmin:]\n if tmax is not None:\n series = series.loc[:tmax]\n series = series.resample('d').median()\n if by_year:\n return (series\n .resample('a')\n .apply(lambda s: s.quantile(q))\n .mean()\n )\n else:\n return series.quantile(q)", "def __init__(\n self,\n timeseries,\n freq,\n ch_name,\n units,\n trigger_idx,\n num_timepoints_found=None,\n thr=None,\n time_offset=0,\n ):\n self.timeseries = deepcopy(is_valid(timeseries, list, list_type=np.ndarray))\n self.freq = deepcopy(\n has_size(is_valid(freq, list, list_type=(int, float)), self.ch_amount, 0.0)\n )\n self.ch_name = deepcopy(has_size(ch_name, self.ch_amount, \"unknown\"))\n self.units = deepcopy(has_size(units, self.ch_amount, \"[]\"))\n self.trigger_idx = deepcopy(is_valid(trigger_idx, int))\n if trigger_idx == 0:\n self.auto_trigger_selection()\n else:\n if ch_name[trigger_idx] not in TRIGGER_NAMES:\n LGR.info(\n \"Trigger channel name is not in our trigger channel name alias list. \"\n \"Please make sure you choose the proper channel.\"\n )\n\n self.num_timepoints_found = deepcopy(num_timepoints_found)\n self.thr = deepcopy(thr)\n self.time_offset = deepcopy(time_offset)\n self._time_resampled_to_trigger = None", "def update_metric(self, metric, value):\n if self.is_number(value):\n self.logger.debug(\"Collected raw metric: %s = %s\" % (metric, value))\n self.raw_metrics[metric] = value", "def add_quality(df):\n df = pd.concat([df, convert_quality(df['quality'])], \n axis=1)\n\n df['Q_min'] = df.filter(regex='Q_\\d+', axis=1).min(axis=1)\n df['Q_mean'] = df.filter(regex='Q_\\d+', axis=1).mean(axis=1)\n return df", "def test_get_qc_metrics(integrated_ff):\n\n key, ff_env = integrated_ff['ff_key'], integrated_ff['ff_env']\n uuid = '331106bc-8535-3338-903e-854af460b544'\n qc_metrics = ff_utils.get_associated_qc_metrics(uuid, key=key, ff_env=ff_env)\n assert len(qc_metrics.keys()) == 1\n assert '131106bc-8535-4448-903e-854abbbbbbbb' in qc_metrics\n target_qc = qc_metrics['131106bc-8535-4448-903e-854abbbbbbbb']\n assert 'QualityMetric' in target_qc['values']['@type']\n assert target_qc['organism'] == 'human'\n assert target_qc['experiment_type'] == 'Dilution Hi-C'\n assert target_qc['experiment_subclass'] == 'Hi-C'\n assert target_qc['source_file_association'] == 'processed_files'\n assert target_qc['source_experiment'] == '4DNEXO67APV1'\n assert target_qc['source_experimentSet'] == '4DNESOPFAAA1'\n assert target_qc['biosource_summary'] == \"GM12878\"\n\n kwargs = { # do same as above w/ kwargs, specify to include raw files this time\n 'key': key,\n 'ff_env': ff_env,\n 'include_raw_files': True\n }\n qc_metrics = ff_utils.get_associated_qc_metrics(uuid, **kwargs)\n assert len(qc_metrics.keys()) == 2\n assert '131106bc-8535-4448-903e-854abbbbbbbb' in qc_metrics\n assert '4c9dabc6-61d6-4054-a951-c4fdd0023800' in qc_metrics\n assert 'QualityMetric' in qc_metrics['131106bc-8535-4448-903e-854abbbbbbbb']['values']['@type']\n assert 'QualityMetric' in qc_metrics['4c9dabc6-61d6-4054-a951-c4fdd0023800']['values']['@type']", "async def query(self, metric):\n metric_name = metric.spec.provider.metric\n\n url = self.metrics_provider.spec.influx.url\n token = self.metrics_provider.spec.influx.token\n org = self.metrics_provider.spec.influx.org\n bucket_name = self.metrics_provider.spec.influx.bucket\n\n client = InfluxDBClient(url=url, token=token, org=org)\n query_api = client.query_api()\n\n query = f'''\n from(bucket:\"{bucket_name}\")\n |> range(start: -1h)\n |> filter(fn: (r) => r._measurement == \"{metric_name}\")\n |> last()\n '''\n\n try:\n loop = asyncio.get_event_loop()\n result = await loop.run_in_executor(None, query_api.query, query)\n for table in result:\n for record in table.records:\n response = record.values['_value']\n return float(response)\n\n except Exception as err:\n metric_provider_name = self.metrics_provider.metadata.name\n raise MetricsProviderError(\n f\"Failed to query InfluxDB with provider {metric_provider_name!r}\"\n ) from err\n\n raise MetricError(f\"Metric {metric_name!r} not in InfluxDB response\")", "def add_support_for_floats_to_dynamodb():\n\n # Ignore loss of precision rather than raising exception\n DYNAMODB_CONTEXT.clear_traps()\n\n # Keep a reference to the original serialization methods\n boto3_serialize_orig = TypeSerializer.serialize\n boto3_deserialize_orig = TypeDeserializer.deserialize\n\n # Wrap serialization methods to support floats\n def boto3_serialize(self, value):\n if isinstance(value, float):\n value = Decimal(value)\n return boto3_serialize_orig(self, value)\n\n def boto3_deserialize(self, value):\n value = boto3_deserialize_orig(self, value)\n if isinstance(value, Decimal):\n value = float(value)\n return value\n\n # Replace the serialization methods with wrapped versions\n TypeSerializer.serialize = boto3_serialize\n TypeDeserializer.deserialize = boto3_deserialize", "def update(self, y_true: list[Number], y_pred: list[Number]) -> ForecastingMetric:", "def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n\n dataframe.loc[\n (\n (qtpylib.crossed_above(dataframe['ema'],dataframe['ema2']))\n ),'buy'] = 1\n\n return dataframe", "def get_quant(q):\n\n try:\n e_q = eval(q)\n except:\n return None\n\n if isinstance(e_q, (int,float,complex)):\n return e_q\n \n return None", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def __init__(self, quasar, name, start_date, end_date):\n self.quasar = quasar\n self.name = name\n self.start = start_date\n self.end = end_date\n\n self.sampling_freq = 120 # Hz\n\n self.cache = [[None, None] for x in range(CACHE_ENTRIES)]", "def populate_buy_trend(dataframe: DataFrame, metadata: dict) -> DataFrame:\n conditions = []\n\n conditions.append(\n ((dataframe['bull'] > 0) & qtpylib.crossed_below(dataframe['rsi'], params['bull-buy-rsi-value'])) |\n (~(dataframe['bull'] > 0) & qtpylib.crossed_below(dataframe['rsi'], params['bear-buy-rsi-value']))\n )\n\n conditions.append(dataframe['volume'] > 0)\n\n dataframe.loc[\n reduce(lambda x, y: x & y, conditions),\n 'buy'] = 1\n\n return dataframe", "def test_create_api_metrics(mocker, response, result):\n mocker.patch.object(demisto, 'results')\n mocker.patch('CommonServerPython.is_demisto_version_ge', return_value=True)\n mocker.patch('MicrosoftApiModule.is_demisto_version_ge', return_value=True)\n mocker.patch.object(demisto, 'callingContext', {'context': {'ExecutedCommands': [{'moduleBrand': 'msgraph'}]}})\n client = retry_on_rate_limit_client(True)\n client.create_api_metrics(response)\n\n metric_results = demisto.results.call_args_list[0][0][0]\n assert metric_results.get('Contents') == 'Metrics reported successfully.'\n assert metric_results.get('APIExecutionMetrics') == result", "def __populate_historical_trade_data(self):\n\n trade_data = self.__transactions.pivot_table(\n index=\"Date\",\n columns=[\"Ticker\"],\n values=[\n \"Quantity\",\n \"Investment\",\n ],\n aggfunc={\"Quantity\": np.sum, \"Investment\": np.sum},\n )\n\n # Make historical prices columns a multi-index. This helps the merging.\n self.portfolio_historical_prices.columns = pd.MultiIndex.from_product(\n [[\"Close\"], self.portfolio_historical_prices.columns]\n )\n\n trade_data = pd.merge(\n trade_data,\n self.portfolio_historical_prices,\n how=\"outer\",\n left_index=True,\n right_index=True,\n )\n\n trade_data[\"Close\"] = trade_data[\"Close\"].fillna(method=\"ffill\")\n trade_data.fillna(0, inplace=True)\n\n trade_data[\"Quantity\"] = trade_data[\"Quantity\"].cumsum()\n trade_data[\"Investment\"] = trade_data[\"Investment\"].cumsum()\n trade_data[\"Investment\", \"Total\"] = trade_data[\"Investment\"].sum(axis=1)\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Investment delta\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"Investment\"].diff(periods=1).fillna(trade_data[\"Investment\"]))\n\n # End Value = Quantity * Close\n trade_data[pd.MultiIndex.from_product([[\"End Value\"], self.tickers_list])] = (\n trade_data[\"Quantity\"][self.tickers_list]\n * trade_data[\"Close\"][self.tickers_list]\n )\n\n trade_data.loc[:, (\"End Value\", \"Total\")] = trade_data[\"End Value\"][\n self.tickers_list\n ].sum(axis=1)\n\n # Initial Value = Previous End Value + Investment changes\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Initial Value\"], self.tickers_list + [\"Total\"]]\n )\n ] = 0\n\n trade_data[\"Initial Value\"] = trade_data[\"End Value\"].shift(1) + trade_data[\n \"Investment\"\n ].diff(periods=1)\n\n # Set first day Initial Value as the Investment (NaNs break first period)\n for t in self.tickers_list + [\"Total\"]:\n trade_data.at[trade_data.index[0], (\"Initial Value\", t)] = trade_data.iloc[\n 0\n ][\"Investment\"][t]\n\n trade_data = trade_data.reindex(\n columns=[\n \"Quantity\",\n \"Investment\",\n \"Investment delta\",\n \"Close\",\n \"Initial Value\",\n \"End Value\",\n ],\n level=0,\n )\n self.historical_trade_data = trade_data", "def get_metric(self, data_row: pd.Series) -> float:", "def _convert_metrics_to_kv(self, per_class, micro, macro, weighted) -> Dict[str, float]:\n kv_metrics = {}\n for aggregation_name, aggregated_metrics in zip(\n (\"_micro\", \"_macro\", \"_weighted\"), (micro, macro, weighted)\n ):\n metrics = {\n f\"{metric_name}/{aggregation_name}\": metric_value\n for metric_name, metric_value in zip(\n (\"precision\", \"recall\", \"f1\"), aggregated_metrics[:-1]\n )\n }\n kv_metrics.update(metrics)\n\n per_class_metrics = {\n f\"{metric_name}/class_{i:02d}\": metric_value[i]\n for metric_name, metric_value in zip(\n (\"precision\", \"recall\", \"f1\", \"support\"), per_class\n )\n for i in range(self.num_classes) # noqa: WPS361\n }\n kv_metrics.update(per_class_metrics)\n return kv_metrics", "def __init__(self, metricName, timeResolutions = (86400,)):\n self.metric = metricName\n self.timeResolutions = timeResolutions", "def __init__(self):\n super().__init__()\n self.metric = 'TP'", "def _get_eval_metric(self):\n raise NotImplementedError", "def _to(\n value: Union[\"Value\", \"ValueArray\"], units: Union[Unit, str], inplace: bool\n) -> Any:\n if value.units == units:\n return value\n\n if value.units is None:\n raise RuntimeError(\"Cannot convert with units=None\")\n\n try:\n units = next(\n imp_unit\n for imp_unit in value.implemented_units\n if units.lower() in imp_unit.aliases\n )\n\n except StopIteration:\n raise TypeError(\n f\"No viable unit conversion from {value.units} -> {units}\"\n )\n\n if not (isinstance(value, Value) or isinstance(value, ValueArray)):\n raise ValueError(\n f\"Cannot convert {value} to new units. Must be one of\"\n f\" Value of ValueArray\"\n )\n\n if isinstance(value, Value) and inplace:\n raise ValueError(\n \"Cannot modify a value inplace as floats are immutable\"\n )\n\n # Convert to the base unit, then to the new units\n c = float(units.conversion / value.units.conversion)\n\n new_value = value if inplace else value.copy()\n new_value *= c\n new_value.units = units\n\n return None if inplace else new_value", "def calcMetrics(TP, P, T, percent=True):\r\n precision = TP / P if P else 0\r\n recall = TP / T if T else 0\r\n FB1 = 2 * precision * recall / (precision + recall) if precision + recall else 0\r\n if percent:\r\n return 100 * precision, 100 * recall, 100 * FB1\r\n else:\r\n return precision, recall, FB1", "def update(self):\n public_client = client.Public()\n try:\n self.values = public_client.ticker(base=self.currency, quote=self.display_currency)\n except Exception as e:\n _LOGGER.error(e)" ]
[ "0.504665", "0.49457982", "0.482276", "0.47894225", "0.47741964", "0.47666577", "0.4716032", "0.46845242", "0.46796387", "0.46367455", "0.46182653", "0.4578131", "0.4567336", "0.4567215", "0.45670658", "0.4566002", "0.4552697", "0.45424986", "0.45123693", "0.44995657", "0.44877443", "0.44838825", "0.44828242", "0.44786343", "0.44393143", "0.4434736", "0.4430184", "0.44293934", "0.44234604", "0.44196582", "0.44084162", "0.43941602", "0.43778703", "0.43736842", "0.4365877", "0.43553302", "0.43521357", "0.43511426", "0.4350856", "0.43427205", "0.43411538", "0.4332191", "0.43295893", "0.43294027", "0.4309061", "0.4303353", "0.42907333", "0.42902413", "0.4289388", "0.4287284", "0.42858467", "0.4284688", "0.428323", "0.42824253", "0.4281963", "0.42802745", "0.42756805", "0.42662048", "0.4264962", "0.42639035", "0.42580366", "0.42577907", "0.42553818", "0.42482546", "0.42461833", "0.42446995", "0.42433968", "0.4239446", "0.42374963", "0.42371905", "0.42358828", "0.42342883", "0.42237315", "0.42193455", "0.4217983", "0.4212947", "0.4209421", "0.42083195", "0.42030832", "0.42008722", "0.42006922", "0.41999573", "0.41944164", "0.41929042", "0.41899544", "0.4189869", "0.41857687", "0.4178727", "0.41784877", "0.4177032", "0.41764137", "0.41751176", "0.41741157", "0.4173521", "0.4171889", "0.4171349", "0.41708684", "0.4170354", "0.4168693", "0.41675884" ]
0.6156283
0
Rebuild the request from history (self.__references).
def rebuild_request(self) -> Quotecast.Request: references = self.references request = Quotecast.Request() for vwd_id, metric in references.values(): request.subscriptions[vwd_id].append(metric) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rebuild(self):\n _logger.info( \"Rebuilding the API Caches...\" )\n\n # fill out the data structures\n self._buildApiTypesList()\n #_buildMayaTypesList()\n \n self._buildMayaReservedTypes(force=True)\n\n self._buildApiRelationships()\n\n # merge in the manual overrides: we only do this when we're rebuilding or in the pymelControlPanel\n _logger.info( 'merging in dictionary of manual api overrides')\n self._mergeClassOverrides()", "def _problem_update_history(self, _):\n self._update_reward_values()\n self.history.curr_reward.append(self.curr_reward)\n self.history.curr_best_reward.append(self.curr_best_reward)", "def _rebuild(self, *args, **kwargs):\n handle = self._args.copy() # Original constructor arguments\n argnames = [i for i in self._traversable if i not in kwargs]\n handle.update(OrderedDict([(k, v) for k, v in zip(argnames, args)]))\n handle.update(kwargs)\n return type(self)(**handle)", "def _clone_rip(self, memo):\n # references lists of definitions need to be vacated except those that were cloned.\n for definition in self._definitions:\n new_references = set()\n for ref in definition._references:\n if ref in memo.values():\n new_references.add(ref)\n for instance in definition._children:\n instance._reference._references.add(instance)\n\n definition._references = new_references", "def history(self, history):\n self._history = history", "def reset(self):\n self._current_request = {}\n return self", "def reset(self):\n raise NotImplemented('Do not call WithHistory directly')", "def history(self, history):\n\n self._history = history", "def rebuild(context):\n clean(context)\n build(context, cache=False)", "def refresh(self):\n self.dto = self.res.get()\n log.debug(f\"Refreshed {self.url}\")", "def _push_history(self):\n self._history.append(self._state)", "def rebuild(self):\n self.from_samples(self.samples)", "def change_history(self, new_reflist, modification_msg):\n self.visual.log(\"New reference list wrt: [{}], yielded {} items.\".format(modification_msg, len(new_reflist)))\n self.push_reference_list(new_reflist, modification_msg)\n # unselect stuff -- it's meaningless now\n self.unselect()", "def _update_head_history(self):\n # pylint: disable=broad-except\n try:\n head = [h for h in self._git.heads if h.name == self.head][0]\n self.head_hash = head.commit.hexsha\n self.head_history = [\n {\n \"commit\": str(c.newhexsha),\n \"timestamp\": c.time[0],\n \"message\": c.message,\n \"author\": {\"name\": c.actor.name, \"email\": c.actor.email},\n }\n for c in head.log()[::-1]\n ]\n except Exception as err:\n self.log.warn(\"Git head update error, ignoring: %s\", err, exc_info=True)\n self.head_history = []", "def refresh(cls):\n # Flip the order of the links so that the first URL listed is the\n # highest priority and will take precedence\n for url in current_app.config['MATLAB_DOC_LINKS'][::-1]:\n resp = requests.get(url)\n soup = BeautifulSoup(resp.text, 'html.parser')\n\n terms = soup.findAll('td', {'class': 'term'})\n links = [term.find('a') for term in terms]\n\n for link in links:\n\n function = link.text.rstrip()\n\n doc = cls.query.filter_by(name=function).first()\n doc_url = urljoin(url, link['href'])\n\n # Create an entry if one doesn't already exist\n if doc is None:\n doc = cls(name=function)\n\n doc.link = doc_url\n doc.save()\n\n # Make sure to remove i and j entries\n toremove = cls.query.filter(or_(cls.name == 'i', cls.name == 'j')).all()\n for item in toremove:\n item.delete()\n\n return cls.query.all()", "def reindex(self):\n if self.channels is None:\n return\n\n self.data = None\n\n keep_indices = self.channels.new_indices_in_old()\n self.channels.reindex()\n\n if self.parms is not None:\n self.parms = self.integration.get_dependents(\n self.get_config_name())\n\n channel_attributes = self.channel_dependent_attributes\n\n for attribute, value in self.__dict__.items():\n if attribute not in channel_attributes:\n continue\n if not isinstance(value, np.ndarray):\n continue\n setattr(self, attribute, value[keep_indices])", "def fill_from_cache(self):\n move_count = min(\n len(self._replacement_cache),\n constants.K - len(self._contacts)\n )\n\n for _ in range(move_count):\n self.add_contact(self._replacement_cache.pop())", "def reload(self):\n # type: () -> None\n parsed_requirements = self.parse()\n self.requirements = parsed_requirements[0]\n self.index_urls = parsed_requirements[1]\n self.nested_cfiles = parsed_requirements[2]\n self.nested_rfiles = parsed_requirements[3]", "def process_request(self, request):\n super(HistoryChangesetMiddleware, self).process_request(request)\n if request.META.get('REQUEST_METHOD') in ('GET', 'HEAD'):\n return\n request.changeset = None\n request.close_changeset = False\n # Default is to update cached objects as they are modified\n request.delay_cache = False\n\n changeset_id = request.GET.get('use_changeset')\n if changeset_id:\n changeset = Changeset.objects.get(id=changeset_id)\n if changeset.user != request.user:\n message = (\n 'Changeset %s has a different user.' % changeset_id)\n return self.bad_request(request, message)\n if changeset.closed:\n message = 'Changeset %s is closed.' % changeset_id\n return self.bad_request(request, message)\n request.changeset = changeset\n # Wait until changeset is manually closed to schedule cache updates\n request.delay_cache = True", "def reset(self):\n self.history = []\n self.frame = {}\n self.params = {}\n self.form = {}", "def _update(self):\n num_new_evals = (self.metamodel.model_evaluations - self._last_rebuild)\n if num_new_evals >= self.rebuild_interval:\n self._built = True\n self._last_rebuild = self.metamodel.model_evaluations\n\n # Rebuild relevance function and make it usable on arrays.\n self._relevance_function = self._construct_relevance_function()\n rel_fun = np.vectorize(self._relevance_function)\n\n # Learn relevance prediction model\n data = self.metamodel.history.get_model_evaluations()\n relevance_values = rel_fun(data[:, -1])\n self._predictor.fit(data[:, :-1], relevance_values)\n return", "def refresh_history(self):\n\n self.old_jobs = self.secretary_bot.history_bullshit_filter(self.old_jobs)\n self.jobs_save(self.old_jobs, 'overwrite')", "def resequence(self):\n self.history.sort(key=lambda x: x[0])\n self.reset()\n for key, attribute in self.history:\n self.set_current(attribute)\n self.latest = key", "def reindex(self):", "def reindex(self):", "def reBuild(self): # redefine the rebuild method for loss function (polymorphism)\n self.updateRange()\n self.buildLine()\n self.normalize() # normalize loss function to have total area of 1 ", "def _invalidate_branch_cache(self):\n self._cached_overlapping_branch_list = None", "def __build_history(self, obj: Object) -> dict:\n previous_history = dict(obj.history)\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}", "def __build_history(self, obj: Object) -> dict:\n previous_history = obj.history\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}", "def _refresh_buffers(self) -> None:", "def reverse(self) -> None:\n new_objects = list(self)\n new_objects.reverse()\n self.objects = new_objects", "def update_path(self):\r\n if len(self.queue) == 0:\r\n return\r\n self.path[:] = []\r\n current = self.peek_queue()[0]\r\n while current in self.previous:\r\n self.path.append(current)\r\n current = self.previous[current]", "def clear(self):\n self.changeHistory = []", "def update_buffer(self):\n if not self.recur_rule:\n return\n frequency = getattr(rrule, \n self.recur_rule.get_frequency_display().upper())\n today = datetime.datetime.today()\n two_years_hence = today + datetime.timedelta(731)\n until = self.recur_rule.until or two_years_hence\n recur = rrule.rrule(frequency, dtstart=self.time, \n interval=self.recur_rule.interval, until=until)\n recur_set = rrule.rruleset()\n recur_set.rrule(recur)\n for exclusion in self.recur_rule.exclusion_set.all():\n recur_set.exdate(exclusion.date)\n existing_tasks = Task.objects.filter(recur_rule=self.recur_rule)\n existing_dates = [task.time for task in existing_tasks]\n for task_date in recur_set:\n # don't re-create existing tasks\n if task_date in existing_dates:\n continue\n task = Task(job=self.job, time=task_date, hours=self.hours,\n member=self.member, account=self.account, \n recur_rule=self.recur_rule)\n task.save()", "def rebuilder(self):\n return self._rebuilder", "def get_old_references(self, header, include=None):\n header = dict(header) # make a copy\n instrument = self.get_instrument(header)\n imap = self.get_imap(instrument)\n return imap.get_old_references(header, include)", "def FreshStart(self):\n # Create a vector holding historical data for the purpose of plotting.\n # The length may vary because the sampling speed of different are\n # sensors may vary.\n\n self.history = {'time': collections.deque( [], self.history_length ),\\\n 'data': collections.deque( [], self.history_length )\n }", "def latest_ref(self):", "def reindex(self):\n raise NotImplementedError()", "def get_recent_history(session=None): \n from model_old_schema.reference import Reference, RefBad\n\n def f(session):\n min_date = datetime.date.today() - datetime.timedelta(days=10)\n refs = session.query(Reference).filter_by(created_by = session.user).filter(Reference.date_created >= min_date)\n refbads = session.query(RefBad).filter_by(created_by = session.user).filter(Reference.date_created >= min_date)\n \n history = {}\n today = datetime.date.today()\n for i in range(10):\n new_date = today - datetime.timedelta(days=i)\n history[new_date] = HistoryEntry(new_date)\n \n for ref in refs:\n if ref.date_created in history:\n history[ref.date_created].inc_ref_count()\n \n for refbad in refbads:\n if refbad.date_created in history:\n history[refbad.date_created].inc_refbad_count()\n \n return history\n \n return f if session is None else f(session)", "def restore(self, key, history):\n self.goal, used = key\n self._used = []\n for row in used:\n self._used.append(list(row))\n self.history = list(history)", "def __init__(self, history=None):\n\n self.__history = history if history else []", "def _invalidate_http_cache(self):\n self._requests_cache = {}", "def remove_refs(self):\n\n self.reference = None\n self.url = None", "def rebuilder(self, rebuilder):\n\n self._rebuilder = rebuilder", "def __add_current_fen_to_history(self):\n self.history = np.hstack((self.history, self.fen()))", "def update(self):\n # TO DO for updating urls if changed\n pass", "def rebuild_index(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n with get_db_connection() as db:\n c = db.cursor()\n execute_with_retry(db, c, self._rebuild_index)", "def addHistory(self):\r\n \r\n data = self.get()\r\n \r\n if data == '':\r\n return\r\n elif len(self.history) != 0 and self.history[0] == data:\r\n return\r\n \r\n if len(self.history) == self.historySize:\r\n self.history.pop()\r\n \r\n self.history.insert(0, data)", "def history():", "def reconstruct(self, data):\n self.recons = self.trf.reconstruct(data)", "def Rebuild(self, targets, arguments):\n self.Clean(targets, [])\n self.Build(targets, arguments)", "def fillCache(self):\n items = self.source.getRecent()\n items.reverse() # make sure the most recent ones are added last to the cache\n for item in items:\n self.cache.append(item.title)", "async def get_refs_all(self, lastUpdate):\n await asyncio.gather(\n *tuple(\n asyncio.ensure_future(self.get_refs_each(item, lastUpdate))\n for item in self.criteria\n ),\n return_exceptions=True\n )", "def update(self):\r\n self.data = [self.make_item_tuple(i) for i in self.query]\r\n self._fetched = True\r\n query_cache.set(self.iden, self.data)", "def _update_dates_from_history(self, keep_updated_at: bool = False):\n updated_at = self.updated_at\n state_history = self.state_history\n\n def number_of_transitions(transition_name):\n \"\"\"Return the number of times one transition happened.\"\"\"\n total = [t for t in state_history if t['transition'] == transition_name]\n return len(total)\n\n # updated refused times\n self.refused_times = number_of_transitions('refuse')\n\n def updated_if_changed(attr, t_list, first=False):\n \"\"\"Update only if changed.\"\"\"\n existing = getattr(self, attr)\n new = get_transition_date_from_history(\n t_list, state_history, first=first\n )\n if new != existing:\n setattr(self, attr, new)\n\n # Set first deliver date\n transitions = ('deliver',)\n updated_if_changed('deliver_date', transitions, True)\n\n # Set last deliver date\n transitions = ('deliver',)\n updated_if_changed('last_deliver_date', transitions, False)\n\n # Set acceptance date\n transitions = ('accept', 'refuse')\n updated_if_changed('accept_date', transitions, False)\n\n if keep_updated_at:\n self.updated_at = updated_at", "def calculate_incremental(self):\n tmp = [x for x in self.data if x not in self._last_data]\n\n # consecutive refreshes are compared with latest block with atual data, not with latest empty diff\n if self.data:\n self._last_data = self.data\n\n self.data = tmp\n\n logging.debug(f'Sending incremental changes from {len(self._last_data)} messages to {len(self.data)}')", "def fit_history(self) -> FitResultHelper:\n pass", "def rulesetsRefreshed(self):\n self.remoteBots.allowSync = True\n self.remoteBots.syncRequests()", "def build_index(self):\n self.rebuild_index()", "def refresh(self):\n self.fetch(False)", "def _clear_caches(self):\n self._brushes = {}\n self._formats = {}", "def rebase(self, *arguments, **kwargs):\n return self.get_output('rebase', *arguments, **kwargs)", "def RecordHistory( self ):\n if not self.restoringHistory:\n record = self.activated_node\n if self.historyIndex < -1:\n try:\n del self.history[self.historyIndex+1:]\n except AttributeError, err:\n pass\n if (not self.history) or record != self.history[-1]:\n self.history.append( record )\n del self.history[:-200]\n self.historyIndex = -1", "def _reference(self, hit):\n hit = hit.deepcopy()\n hit[\"x\"] = 0.\n hit[\"px\"] = 0.\n return hit", "def clone(self):\n return _libsbml.ModelHistory_clone(self)", "def reload(self):", "def reload(self):", "def reload(self):\n self._populate(self.hierarchy[-1])", "def gen_rebatch(self, *args, **kwargs):\n _action = self._action_list[0]\n self._rest_batch = None\n while True:\n if self._rest_batch is None:\n cur_len = 0\n batches = []\n else:\n cur_len = len(self._rest_batch)\n batches = [self._rest_batch]\n self._rest_batch = None\n while cur_len < _action['batch_size']:\n try:\n new_batch = _action['pipeline'].next_batch(*args, **kwargs)\n except StopIteration:\n break\n else:\n batches.append(new_batch)\n cur_len += len(new_batch)\n if len(batches) == 0:\n break\n else:\n if _action['merge_fn'] is None:\n batch, self._rest_batch = batches[0].merge(batches, batch_size=_action['batch_size'])\n else:\n batch, self._rest_batch = _action['merge_fn'](batches, batch_size=_action['batch_size'])\n yield batch", "def crawl(self):\n if os.path.exists(self.__work_path):\n shutil.rmtree(self.__work_path)\n print '\\nOld Data Was Found And Removed.\\n'\n\n initial_first_run = True\n initial_recursion_depth = 0\n initial_prev_link_size = 0\n for url in self.__urls:\n self.__start_recursion(url, initial_first_run,\n initial_recursion_depth, initial_prev_link_size)\n\n Crawler.mission_report(self.__work_path)", "def _refresh_tree_ref(self):\n self._tree_ref = RedBlackNodeRef(\n address=self._storage.get_root_address())", "def reset(self):\n\n self.simple_cache = {}\n self.complex_cache = {}\n self.target_cache = {}", "def refresh(self):\n\t\tif self.id is None:\n\t\t\tprint(\"({cls}): self.id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id and self.project_id is None:\n\t\t\tprint(\"({cls}): self.project_id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id:\n\t\t\targs = [self.project_id, self.id]\n\t\telse:\n\t\t\targs = [self.id]\n\n\t\tres = getattr(self._client, \"get_\" + self.method)(*args, raw=True)\n\t\tself._create_fields(res)", "def _rebuild_ledger(self, form_id, ledger_value):\n transactions = LedgerAccessorSQL.get_ledger_transactions_for_case(\n **ledger_value.ledger_reference._asdict()\n )\n transaction_excluding_deprecated_form = [tx for tx in transactions if tx.form_id != form_id]\n new_transactions = ledger_value.get_tracked_models_to_create(LedgerTransaction)\n all_transactions = transaction_excluding_deprecated_form + new_transactions\n sorted_transactions = sorted(all_transactions, key=lambda t: t.report_date)\n\n ledger_value.clear_tracked_models(LedgerTransaction)\n ledger_value = self._rebuild_ledger_value_from_transactions(\n ledger_value, sorted_transactions, self.domain)\n return ledger_value", "def _rebuild(self):\n for shape, record in iter(self):\n self.write_row(shape, record)\n self.__isBuilt = True", "def onRegisterHistory(self):\n pass", "def update_url_cache(self, old_url):\r\n if old_url != 'self':\r\n key = self.by_url_key(old_url)\r\n link_ids = g.permacache.get(key) or []\r\n while self._id in link_ids:\r\n link_ids.remove(self._id)\r\n g.permacache.set(key, link_ids)\r\n self.set_url_cache()", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def make_reference(self):\n self.make_reference2()", "def requeue_changes(cls, queue):\n for c in sorted(cls.get_changes(), key=lambda c: 1 if fnmatch.fnmatch(c, \"*mini-buildd-build*\") else 0):\n LOG.info(\"Incoming: Re-queuing: {c}\".format(c=c))\n queue.put(c)", "def refresh(self):\n pass", "def refresh(self):\n pass", "def reset(self):\n self.fscore_history = []", "def recurrent(self):\n pass", "def do_api_calls_update_cache(self):\n self.get_nodes()\n self.write_to_cache(self.inventory, self.cache_path_cache)\n self.write_to_cache(self.index, self.cache_path_index)", "def push_back(self, *args):\n return _ida_frame.xreflist_t_push_back(self, *args)", "async def _on_ref_change(self, _change=None):\n self._update_heads()\n self._update_head_history()\n for remote in self.remotes.values():\n await remote._update_heads()", "def rebuildLookdev(self):\n self.logger.info('Reverting Lookdev')\n # TODO", "def build(self):\n self.originModel.build()\n return self", "def build(self):\n self.originModel.build()\n return self", "def _reset_state(self):\n # Directed graph, (u, v) => v depends on u. u, v are pairs of (rule_name, rule_dir_abs)\n # Used for generating Topological Sort\n self._rule_to_dependency_graph_adjlist = {}\n self._topologically_sorted_build_rule_names = []\n\n # List of (dependency_name, dependency_dir_abs) for each build rule\n self._rule_to_dependency_list = {}\n\n # Space for rough work :P\n self._unresolved_commands = set()", "def _reset_changes(self):\r\n self._original = {}\r\n if self.last_updated is not None:\r\n self._original['last_updated'] = self.last_updated", "def reinitialize(self, requestContext):\n pass", "def gen_internal_rebuild(self):\n stamp = self.stamp()\n return self._sub_rightmost(r\"\\+rebuilt\" + self.stamp_regex(stamp),\n \"+rebuilt\" + stamp,\n self.full_version)", "def build_graph(self):\n self._reset_iterator_memory()\n self._construct_graph_handler()\n assert self.graph_handler\n for rxn_id in self.graph_handler.get_valid_reaction_ids():\n rxn = db.Reaction(rxn_id, self._reactions)\n self.graph_handler.add_rxn(rxn)", "def references(self, references):\n\n self._references = references", "def _rebuild_index(self):\n from django.core.management import call_command\n call_command('rebuild_index', interactive=False, verbosity=0)" ]
[ "0.5566847", "0.546248", "0.54090655", "0.5392603", "0.5335858", "0.5311608", "0.52756536", "0.52722096", "0.52640605", "0.5223405", "0.5222167", "0.5147405", "0.5122049", "0.5031895", "0.50197256", "0.5009398", "0.5008371", "0.49860406", "0.49698722", "0.4964786", "0.49647814", "0.4940484", "0.49188736", "0.4905431", "0.4905431", "0.48873168", "0.48682427", "0.4848128", "0.48440447", "0.48283798", "0.4824296", "0.48134875", "0.47929567", "0.47917217", "0.47804052", "0.47793043", "0.47719508", "0.4763671", "0.4761973", "0.4761124", "0.47524855", "0.47487813", "0.47487253", "0.4733428", "0.47302505", "0.47301516", "0.47151226", "0.47059542", "0.469924", "0.46895313", "0.46875456", "0.46862784", "0.46769553", "0.46743625", "0.4672336", "0.46689495", "0.4666982", "0.46559614", "0.4638239", "0.4636301", "0.4634707", "0.4628805", "0.46286157", "0.46250087", "0.46189088", "0.46120426", "0.4611834", "0.4611834", "0.4609053", "0.46063405", "0.46060038", "0.45968094", "0.45961693", "0.4589574", "0.45873538", "0.4587306", "0.4587181", "0.45870477", "0.45865196", "0.45865196", "0.45838547", "0.4580737", "0.45807204", "0.4576649", "0.4576649", "0.45762", "0.45738748", "0.45697954", "0.45692995", "0.45691696", "0.45682177", "0.456632", "0.456632", "0.45625407", "0.45621762", "0.4559264", "0.4558031", "0.4557847", "0.45542434", "0.45527756" ]
0.67901736
0
check to see whether an id is for a group
def is_group(id): return id.startswith('G')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_uuid(self, obj, groupid):\n if self.get_uuid(obj) == groupid:\n return True", "def alreay_in_group(self,uid,group_id):\n uid = str(uid)\n user_group_list = self.get_group_list_via_uid(uid)\n return True if group_id in user_group_list else False", "def is_group(self, group_name):\n\n return group_name in self._group", "def isValidGroup(expense_group_id, cursor):\n query = \"\"\"\n SELECT * FROM expense_group WHERE id = ?\n \"\"\"\n cursor.execute(query, (expense_group_id,))\n return len(cursor.fetchall()) == 1", "def isSetId(self):\n return _libsbml.Group_isSetId(self)", "def test_groups_group_id_get(self):\n pass", "def is_in_group_user_id(user_id, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user_id).exists()\n except Group.DoesNotExist:\n return None", "def is_in_group(self, group):\n return group in self.get_all_groups()", "def what_is(self, _id):\n for g in self.groups:\n if _id in self.h_group_ids[g]:\n return g\n return None", "def IsObjectInGroup(object_id, group_name=None):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n count = rhobj.GroupCount\n if count<1: return False\n if not group_name: return True\n index = scriptcontext.doc.Groups.Find(group_name, True)\n if index<0: raise ValueError(\"%s group does not exist\"%group_name)\n group_ids = rhobj.GetGroupList()\n for id in group_ids:\n if id==index: return True\n return False", "def _check(isamAppliance, id=None):\n ret_obj = get_all(isamAppliance)\n\n if id != None:\n for groups in ret_obj['data']:\n if groups['id'] == id:\n return True\n\n return False", "def get_group_values(self, group_id:int, group_name:str) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT id, name FROM {table_groups} WHERE id={group_id};\").fetchone()\n if not value_list:\n return False\n group_used_id, group_used_name = value_list\n if group_used_name != group_name:\n self.cursor.execute(f\"UPDATE {table_groups} SET name={group_name} WHERE id={group_used_id};\")\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We faced problems with checking of the group prensence. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def is_in_group(user, group_name):\n return is_in_group_user_id(user.id, group_name)", "def is_group(obj) -> bool:\n return hasattr(obj, IOConstants.GROUP_ATTR_NAME)", "def _is_in_group(user, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()\n except Group.DoesNotExist:\n return None", "def group_exists(self):\n return AzureTools().group_exists(names.group_name(self))", "def in_group(self, group):\n\n return self.secondary_groups.filter(\n groups_users.c.group_id == group.id).count() > 0", "def has_group(group, user, request):\n return group_names[group] in groupfinder(user.username, request)", "def check_presence_groups(self, id_user:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_users_groups} where id_user={id_user};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We faced problems with checking the groups for users. Error: {e}\"\n self.proceed_error(msg)\n return False", "def is_in_group(user, group_name):\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()", "def _group_matcher(group):\n return (group.uuid == _DB_UUID and\n group.name == _INST_GROUP_DB['name'] and\n group.user_id == _INST_GROUP_DB['user_id'] and\n group.project_id == _INST_GROUP_DB['project_id'] and\n group.created_at == _TS_NOW and\n group.updated_at == _TS_NOW and\n group.members == _INST_GROUP_DB['members'] and\n group.policies == [_INST_GROUP_DB['policy']['policy']] and\n group.id == 1)", "def is_group(group_name):\n\n try:\n r_json = requests.get(\n 'https://api.rozklad.org.ua/v2/groups/{}'.format(group_name)).json()\n message_text = r_json['message']\n if message_text == 'Ok':\n return True\n elif message_text == 'Group not found':\n return False\n else:\n logger.error(message_text)\n except ConnectionError as error_text:\n logger.error(error_text)\n except IndexError as error_text:\n logger.error(error_text)", "def check_id(self, id):", "def test_groups_group_id_state_get(self):\n pass", "def check_ldap_group_existence(group_id):\n endpoint = f\"/identities/groups/{group_id}\"\n http_response = call_rest_api(endpoint, \"head\", **config.DEFAULT_REST_KWARGS)\n if http_response.status_code == 200: # 200 = 'OK. Group exists.'\n return True\n return False", "def check_group(self, groupid, scenegroup):\n if self.find_with_uuid(groupid, bpy.data.objects, \"objects\"):\n self._found[\"objects\"] += 1\n self._total_server[\"objects\"] += 1\n if self.find_with_uuid(scenegroup[\"asset\"], bpy.data.meshes, \"meshes\"):\n self._found[\"meshes\"] += 1\n self._total_server[\"meshes\"] += 1", "def has_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for g in self.groups.query(name=group.name):\n if g.name == group.name:\n return True\n\n return False", "def is_group(g, node):\n if node not in g.nodes():\n print('Not a node in the graph')\n return False\n elif g.node[node]['type'] == 'group':\n return True\n else:\n return False", "def _check_group(group):\n filled_cells = Sudoku._filter_empty(group)\n return utils.is_unique(filled_cells)", "def is_user_in_group(user, group):\n users = group.get_users()\n if user in users:\n return True\n return False", "def check_group_exists(self, group_name):\n for grp in self.get_list_groups():\n if grp[\"name\"] == group_name:\n return grp[\"id\"], grp[\"members\"]\n\n return None", "def _is_server_in_group(group, server_id):\n try:\n response, server_info = yield Effect(TenantScope(\n retry_effect(get_server_details(server_id),\n retry_times(3),\n exponential_backoff_interval(2)),\n group.tenant_id))\n except NoSuchServerError:\n raise ServerNotFoundError(group.tenant_id, group.uuid, server_id)\n\n group_id = group_id_from_metadata(\n get_in(('server', 'metadata'), server_info, {}))\n\n if group_id != group.uuid:\n raise ServerNotFoundError(group.tenant_id, group.uuid, server_id)", "def test_add_member_by_id_to_group(self):\n pass", "def id_is_valid(gal_id, query_id, data):\n return not ((data.cam_idx[query_id] == data.cam_idx[gal_id]) and (data.labels[query_id] == data.labels[gal_id]))", "def check_insert_group_user(self, id_user:int, id_group:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_user_group_connect} WHERE id_user={id_user} AND id_group={id_group};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We faced problems with the check previous insertion on th. Mistake: {e} \"\n self.proceed_error(msg)\n return False", "def is_in_group(user, group_name):\n return user.groups.filter(name__exact=group_name).exists()", "def is_group(self):\n # Implemented from template for osid.resource.Resource.is_group_template\n return self._my_map['group']", "def security_group_exists(self, sg_id=None, name=None):\n if sg_id:\n return sg_id in [sg.id for sg in self.get_all_security_groups()]\n elif name:\n return name in [sg.name for sg in self.get_all_security_groups()]", "def is_create_group(string, nickname):\n if string == f\"{nickname} created the group.\":\n return True\n return False", "def check_if_group_member(self, organism):\n for key, item in self.phen_dict.items():\n if organism in item:\n self.declare(Organism(name=key))", "def test_list_group_by_id(self):\n # First add our users\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n resp = self.app.post('/users', data=json.dumps(self.test_user2_data))\n assert resp.status_code == 200\n\n # Finally list the group\n resp = self.app.get('/groups/{}'.format(self.test_group2_groupid))\n assert resp.status_code == 200\n\n data = json.loads(resp.data)\n assert self.test_user1_userid in data\n assert self.test_user2_userid in data", "def test_IGroupIntrospection_getGroupById(self):\n from Products.PlonePAS.plugins.group import PloneGroup\n\n self.assertIsInstance(self.ldap.getGroupById(\"group0\"), PloneGroup)\n self.assertEqual(self.ldap.getGroupById(\"group0\").getId(), \"group0\")\n self.assertIsNone(self.ldap.getGroupById(\"non-existent\"))", "def make_group_insertion(self, group_id:int, group_name:str) -> bool:\n try:\n self.cursor.execute(f\"INSERT INTO {table_groups} (id, name) VALUES (?, ?);\", (group_id, group_name))\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We faced problems with isertion of the groups. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def is_group(self):\n return self._is_group", "def test_get_groups(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.grant('Perm1', object0)\n group0.grant('Perm3', object1)\n group1.grant('Perm2', object1)\n \n self.assert_(group0 in get_groups(object0))\n self.assertFalse(group1 in get_groups(object0))\n self.assert_(group0 in get_groups(object1))\n self.assert_(group1 in get_groups(object1))\n self.assert_(len(get_groups(object1))==2)", "def has_group(self,groupname):\n\n if not self.check_prereqs():\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" WHERE $groupname_field$='$groupname$'\",{'groupname':groupname,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: has_group: %s\" % (query,))\n\n cursor.execute(query)\n for row in cursor:\n return True\n return False", "def test_get_device_group_by_id(self):\n pass", "def check_gadm(user_id):\n cur = g.db.execute('select gadm from user_group where id_user == ?', [user_id])\n for row in cur.fetchall():\n if row[0] == 1:\n return True\n return False", "def is_user_in_group(user, group):\n\n if user == group.get_name():\n return True\n elif user in group.get_users():\n return True\n else:\n for group in group.get_groups():\n return is_user_in_group(user, group)\n\n return False", "def is_eionet_group(self, role_id):\n for role in EIONET_GROUPS:\n if role in role_id:\n return True\n\n return False", "def test_user_group_controller_get_id(self):\n pass", "def check_user_group_connection(self, id_group:int, id_user:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_users_groups} WHERE id_group={id_group} AND id_user={id_user};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We have problem with getting values from the {table_users_groups}. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def check_groupname_existance(group_name):\n query=\"SELECT * FROM groups WHERE group_name='{}'\".format(group_name)\n cur.execute(query)\n return cur.fetchone()", "def test_group_field(self):\n field = self.record.find('field[@name=\\'groups_id\\']')\n self.assertEqual(field.attrib['eval'],\n '[(4, ref(\\'nh_clinical.group_nhc_admin\\'))]',\n 'Incorrect eval on groups id')", "def belongs_to(self, group):\n return self in group.users", "def __contains__(self, i):\n if not isinstance(i, FreeGroupElement):\n return False\n group = i.group\n return self == group", "def check_group_pack(self, cr, uid, context=None):\n return self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot')", "def groups_by_id(request, gid):\r\n group = Group()\r\n filtered_groups = group.query({\"gid\":str(gid)})\r\n if len(filtered_groups) == 0:\r\n badRequest(\"No available group under GID \"+str(gid))\r\n return HttpResponse(json.dumps(filtered_groups))", "def get_member_from_group(member, group_name):\n query= \"SELECT * FROM groupmembers WHERE member='{}' AND group_id='{}'\".format(member, group_name)\n cur.execute(query)\n result = cur.fetchall()\n if len(result) > 1:\n return True\n return False", "def es_utilizado(self):\n group = Group.objects.filter(id=self.id)\n group = group.all()[0] if group.exists() else None\n # group = Group.objects.get(name=self.nombre)\n return group.user_set.all().exists() if group is not None else False", "def is_user_in_group(user: str, group: Group) -> bool:\n if group is None or user is None or user is \"\":\n return False\n if user in group.get_users():\n return True\n for sub_group in group.get_groups():\n user_exists = is_user_in_group(user, sub_group)\n if user_exists:\n return True\n return False", "def at_least_a_group(exp, mesh, mod):\n is_valid = True\n if not exp.find_groups(mesh):\n mess = \"At least a group needs to be defined on the selected object\"\n mod.launch(GC.ERROR, mess)\n is_valid = False\n return is_valid", "def is_member_of_group(self, mail, group):\n members = self.get_group_members(group)\n\n if mail in members:\n return True\n return False", "def has_hgid(self, id):\n return (len(self.has_hgids([id])) == 1)", "def test_add_member_by_id_to_group1(self):\n pass", "def get_existing_group(self, path, id, name):\n v_id = re.match( r'^<[^>]+>$', id) # True if variable_id (in < >)\n lookup_name = name if v_id else id\n full_path = path + \"/\" + lookup_name\n node = self.get_node(full_path, False)\n if node and node.sdef['type'] == 'group':\n # found already existing group\n return node\n else:\n return None", "def test_api_v1_groups_id_put(self):\n pass", "def verify_user_group_details(connection_obj, uid, group, device=\"server\"):\n output = get_user_group_details(connection_obj,device=device)\n if not output:\n st.log(\"Output not found {}\".format(output))\n return False\n if uid:\n user_data = re.findall(r\"uid=\\d+\\({}\\)\".format(uid), output)\n if not user_data:\n st.log(\"User data not found -- {}\".format(uid))\n return False\n if group:\n group_data = re.findall(r\"gid=\\d+\\({}\\)\".format(group), output)\n if not group_data:\n st.log(\"Group data not found -- {}\".format(group))\n return False\n return True", "def group_exists(self, path_to_group, groupname):\n self.open_db()\n try:\n group = self.h5file.get_node(path_to_group,\n name=groupname)\n except tb.NoSuchNodeError:\n group = False\n return group", "def is_id(self):\n found = False\n for p in self.ant:\n for prop in self.con:\n if p == prop:\n found = True\n return found", "def has_group(user, group_name):\n return user.groups.filter(name=group_name).exists()", "def does_group_exist(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return 1\n\n sanitised_group = args.group.replace('/', '-')\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n for group in config_json[\"groups\"]:\n if group[\"name\"] == sanitised_group:\n return 0\n \n return 1", "def is_user_in_group(user, group):\r\n if type(group) is not Group:\r\n raise ValueError(\"Not a valid group\")\r\n\r\n if type(user) is not str:\r\n raise ValueError(\"Not a valid user\")\r\n\r\n user_name = find_user(user, group)\r\n if user_name == \"\":\r\n return False\r\n\r\n return True", "def is_user_in_group(user, group):\n # Check group\n if user in group.users: # O(N)\n return True\n\n # Check subgroups\n for sub_group in group.groups: # O(N)\n if is_user_in_group(user, sub_group):\n return True\n\n return False", "def k(self, id):\n return id in self._m", "def test_group_of_one(self):\n self.testcases[0].group_id = 1\n self.testcases[0].put()\n self.testcases[1].key.delete()\n\n grouper.group_testcases()\n\n testcase = data_handler.get_testcase_by_id(self.testcases[0].key.id())\n self.assertEqual(testcase.group_id, 0)\n self.assertTrue(testcase.is_leader)", "def _check_groups_support(self, groups=()):\n available_groups = set(self.df[self.col_group].unique())\n for group in groups:\n assert group in available_groups, \"Group %s is not in the dataset provided\" % group", "def testGroupsNotReturnedByEnumerateUsers(self):\n results = self.pas.searchUsers()\n resultIds = [a[\"id\"] for a in results]\n self.assertFalse(\"group1\" in resultIds)", "def __contains__(self, gid: uuid.UUID) -> bool:\n return gid in self._nodes", "def test_get_device_group_by_id1(self):\n pass", "def group_exists(func):\n @functools.wraps(func)\n def decorator(self, bot, update, args):\n try:\n group_name = self.format_group(str(args[0]))\n if not self.is_group(group_name):\n bot.send_message(update.message.chat_id,\n 'Группы с таким именем не существует, '\n 'проверьте корректность введенного имени.',\n parse_mode='Markdown')\n return\n except IndexError:\n user_id = update.message.from_user['id']\n group_name = self.user_db.get_group_name(user_id)\n return func(self, bot, update, group_name)\n return decorator", "def group_exists(name):\n with fabric.api.settings(fabric.api.hide('warnings', 'stderr', 'stdout', 'running'), warn_only=True):\n group_data = fabric.api.run(\n \"cat /etc/group | egrep '^%s:' ; true\" %\n (name))\n\n if group_data:\n name, _, gid, members = group_data.split(\":\", 4)\n return dict(name=name, gid=gid, members=tuple(m.strip()\n for m in members.split(\",\")))\n else:\n return None", "def test_get_groups_any(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.set_perms(['Perm1', 'Perm2'], object0)\n group0.set_perms(['Perm1', 'Perm3'], object1)\n group1.set_perms(['Perm2'], object1)\n \n # no perms\n self.assertFalse(user1 in get_groups_any(object0, ['Perm1']))\n \n # explicit any perms\n self.assert_(group0 in get_groups_any(object0))\n self.assert_(group0 in get_groups_any(object1))\n self.assertFalse(group1 in get_groups_any(object0))\n self.assert_(group1 in get_groups_any(object1))\n \n # has perms, but not the right one\n self.assertFalse(group0 in get_groups_any(object0, ['Perm3']))\n \n # has one perm, but not all\n self.assert_(group0 in get_groups_any(object0, ['Perm1','Perm3']))\n self.assert_(group0 in get_groups_any(object1, ['Perm1','Perm2']))\n \n # has single perm\n self.assert_(group0 in get_groups_any(object0, ['Perm1']))\n self.assert_(group0 in get_groups_any(object0, ['Perm2']))\n self.assert_(group1 in get_groups_any(object1, ['Perm2']))\n \n # has multiple perms\n self.assert_(group0 in get_groups_any(object0, ['Perm1','Perm2']))\n self.assert_(group0 in get_groups_any(object1, ['Perm1','Perm3']))", "def is_user_in_group(user, group):\n sub_user=group.get_users() # Get all the users within the group\n\n if user in sub_user: # If user is within the group, return True\n return True\n\n sub_group=group.get_groups() # Get all the sub groups within the group\n\n if len(sub_group)==0: # Base case if there are no sub groups within group\n return False\n\n for item in sub_group: # Recursively search within sub groups for the user\n return is_user_in_group(user,item)\n return False", "def get_group(self, obj):\n group = Group.objects.filter(name=\"teachers\")\n users = User.objects.filter(groups__in=group)\n if obj in users:\n return \"teachers\"\n else:\n return \"students\"", "def test_get_group(self):\n pass", "def check_field(self, spritegroup):\n\t\tfield = pygame.sprite.spritecollideany(self, spritegroup)\n\t\tif field:\n\t\t\treturn field.id", "def same_group(self,i,j):\n if self.group_number(i) == self.group_number(j):\n return True\n else:\n return False", "def _user_belongs_to(group_name):\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups", "def test_groups_group_id_state_put(self):\n pass", "def __is_image_id( self, image_id ):\n images_ids = self.__get_multi_images_ids()\n for id in images_ids:\n if image_id == id:\n return True\n return False", "def contains(self, g):\n if not isinstance(g, FreeGroupElement):\n return False\n elif self != g.group:\n return False\n else:\n return True", "def is_group_admin_group(self):\n groups = self['__store']\n return self.group_id in groups.get_group_admin_group_ids()", "def test_by_group(self):\n thread = self.create_thread()\n result = Thread.public.by_group(thread.group)\n self.assertIn(thread, result)", "def group_exists(ctrl_name, ctrl_info, group):\n if group not in ctrl_info:\n return ['{0}:{1} does not exist'.format(ctrl_name, group)], []\n return [], []", "def match_id(self, id):\n btest = re.compile(id, re.IGNORECASE)\n return 'ID' in self and btest.search(self['ID']) != None", "def is_user_in_group(_cls, user, group):\n if user is None or group is None:\n return \"Please enter a valid user and group\"\n\n if user in group.get_users():\n return True\n else:\n for sub_group in group.get_groups():\n if Group.is_user_in_group(user, sub_group):\n return True\n\n return False", "def checkValidId(self, id, prep_id = False):\n new_id = unquote(id)\n if prep_id: new_id = self.prepId(id)\n try:\n globalCheckValidId(self, new_id)\n return True\n except Exception:\n return str(sys.exc_info()[1])", "def can_substitute(userid, group):", "def testValidGroupResult(self):\r\n \r\n result = self._ldapPrincipalSearcher.searchPrincipal(_VALID_GROUP_QUERY, constants.SEARCH_MODE_GROUP_ONLY)\r\n self.assertEquals(len(result), 1)\r\n self.assertEquals(result[0], _MAPPED_GROUP)" ]
[ "0.7496174", "0.7397895", "0.7248163", "0.72468346", "0.7207925", "0.7201284", "0.71829623", "0.715947", "0.7065384", "0.70614374", "0.6950488", "0.69323575", "0.68989813", "0.6898132", "0.686232", "0.6849973", "0.682175", "0.68139756", "0.6812948", "0.6809037", "0.6806396", "0.6740182", "0.67214423", "0.6714532", "0.66386086", "0.6593438", "0.6543826", "0.6539853", "0.65378433", "0.65290093", "0.650988", "0.647326", "0.64495707", "0.6439802", "0.64296377", "0.6416901", "0.64117444", "0.6374181", "0.6370357", "0.6368184", "0.63563854", "0.6342354", "0.63392025", "0.6336363", "0.63342506", "0.63272923", "0.6326872", "0.6323441", "0.6318423", "0.6311096", "0.6304669", "0.630286", "0.6286258", "0.62682813", "0.62413204", "0.6234129", "0.62202203", "0.61999553", "0.61833286", "0.61757886", "0.6162104", "0.6160942", "0.6143681", "0.6140331", "0.6138161", "0.61378574", "0.6121666", "0.6116234", "0.6104929", "0.6103408", "0.6091007", "0.6072146", "0.6069604", "0.6052444", "0.6052271", "0.604956", "0.60474974", "0.6035024", "0.6028503", "0.6024347", "0.6014329", "0.6013632", "0.60077703", "0.600756", "0.6007012", "0.59999907", "0.5999389", "0.5995114", "0.5987155", "0.59643656", "0.59626323", "0.5954674", "0.59477216", "0.591623", "0.5912972", "0.59125954", "0.59014493", "0.5898888", "0.5897654", "0.58940214" ]
0.81725055
0
check to see whether an id is for a user
def is_user(id): return id.startswith('U')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def userIDExists(self, id : int) -> bool:\n return id in self.users.keys()", "def hasUser(self, id):\n try:\n self.getUser(id)\n return True\n except KeyError:\n return False", "def check_user(user):\n result_user = search_column_with_constraint(choose_database(\"auth\"), \"users\", \"id\", \"id\", user)\n # result_user = search_single_entry(choose_database(\"auth\"), \"users\", \"id\", user)\n\n if len(result_user) == 0:\n return 0\n else:\n return 1", "def isOwner(id, userId):\n db = core.connect()\n return db[id][\"createdBy\"] == userId", "def validate_user_id(self, value):\n if not User.objects.filter(id=value).exists():\n raise serializers.ValidationError('User with this id does not exist.')\n return value", "def check_id(self, id):", "def is_self(user_id):\n query_user_id = request.args.get('user_id', default=None, type=int)\n return user_id==query_user_id and user_id is not None", "def __contains__(self, userid):\r\n userid = int(userid)\r\n return bool(userid in self.players)", "def user_in_session():\n return 'user_id' in login_session", "def checkIfUserIsCurrent(self,userId : str) -> bool:\n\n if userId == userId[0]:\n return True\n else:\n return False", "def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")", "def same_user(user_id):\n return user_id == login_session['user_id']", "def is_registered(user_id: str) -> bool:\n inventories = get_file(\"inventories\")\n return str(user_id) in inventories", "def user_exists(mail_or_id) -> bool:\n conn = sqlite3.connect(\"db.sqlite3\")\n c = conn.cursor()\n\n if type(mail_or_id) is int:\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE id=?\n \"\"\", (mail_or_id,))\n else: #mail\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE mail=?\n \"\"\", (mail_or_id,))\n \n conn.commit()\n \n exists = bool(len(list(c)))\n \n conn.close()\n\n return exists", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )", "def is_user_id_available(self,\n\t user_id,\n\t shutit_pexpect_child=None,\n\t note=None,\n\t loglevel=logging.DEBUG):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tshutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child\n\t\tshutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)\n\t\treturn shutit_pexpect_session.is_user_id_available(user_id,\n\t\t note=note,\n\t\t loglevel=loglevel)", "def test_user_id_get(self):\n pass", "def user_has_permission(self, id: int, user: User) -> bool:\n return self.get_queryset().filter(pk=id).filter_for_user(user).exists()", "def user_exists(self,unique_ID):\n\t\ttry:\n\t\t\tself.data[unique_ID]\n\t\texcept KeyError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def exists(cls, user_id):\n user_id = int(user_id)\n user = DB_USER_TABLE.get(doc_id=user_id)\n if not user:\n raise ValueError(f\"unknown user '{user_id}'\")\n return user_id", "def checkIfUserExists(self, userID):\n return self.db.select_user(userID)", "def check_if_bot(self, user_id):\n return str(self.get_int_index(bot_id, 9)) in str(user_id)", "def findUniqueUserID(userID):\n connector = appEngine.connect()\n userIdentifier = connector.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", userID).fetchone()\n #userIdentifier = db.session.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", userID)\n if type(userIdentifier) == type(None):\n return False # this means there is no user in the database yet\n else:\n return True # this means there is a user in the database", "def check_id(server_id, user_id):\n\n # The user_id parameters here is the same as nym_id in other api calls\n\n # The method is described as a \"ping\" in the API documentation, which should\n # be called after wallet initialized. However a remote account on the server\n # is required.\n\n if hasattr(opentxs, 'OTAPI_Wrap_pingNotary'): # new api name\n retval = opentxs.OTAPI_Wrap_pingNotary(server_id, user_id)\n else: # todo: old api name, remove in due time\n retval = opentxs.OTAPI_Wrap_checkServerID(server_id, user_id)\n\n print(\"(debug) check_server_id retval=\", retval)\n\n # The return value `1` for success is defined by\n # case (OTClient::checkServerId)\n # in OTClient::ProcessUserCommand()\n\n return retval == 1", "def check_if_user_exists(self, email):\n for user in self.users.values():\n if user['email'] == email:\n return user['id']\n else:\n return False", "def check_user(entry_code):\n\tif len(User.objects.filter(unique_code=entry_code)) == 1:\n\t\treturn(True)\n\telse:\n\t\traise Http404('No users exist with this code.')", "def userObjExists(self, user : bbUser.bbUser) -> bool:\n return self.userIDExists(user.id)", "def get_user_from_uid(uid):\n id, tmp = uid.split('-')\n user = AuthUser.query.filter_by(id=id).first()\n if user and user.get_uid() == uid:\n return True\n return False", "def test_get_user_id(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n # for patient\n self.assertEqual(\n PATIENT_ID, self.connection.get_user_id(PATIENT_USERNAME))\n # for doctor\n self.assertEqual(\n DOCTOR_ID, self.connection.get_user_id(DOCTOR_USERNAME))", "def checkIfPublicUser(userId):\n return User.query.filter_by(public = True).filter_by(id = userId).first()", "def verified_connection_ft1(self, id1, id2):\n if id1 in self.users.keys():\n return id2 in self.users[id1]\n return False", "def has_reference_to_user_id(cls, user_id: str) -> bool:\n return cls.query(datastore_services.any_of(\n cls.recipient_id == user_id,\n cls.sender_id == user_id,\n )).get(keys_only=True) is not None", "def check_event_id_added_to_user(event_id: event_models.EventId,\n user_id: user_models.UserId) -> bool:\n try:\n user_identifier = user_models.UserIdentifier(user_id=user_id)\n user_from_db = async_to_sync(\n user_utils.get_user_info_by_identifier)(user_identifier)\n assert event_id in user_from_db.events_created\n return True\n except AssertionError as assert_error:\n debug_msg = f\"failed at: {assert_error}.\\\n event_id: {event_id}, user_id: {user_id}\"\n\n logging.debug(debug_msg)\n return False", "async def id(ctx, user: discord.Member = None):\n user = user or ctx.message.author\n with open('users.json') as f:\n data = json.load(f)\n\n if data.get(user.id) is not None:\n await bot.say('`User id is {}`'.format(user.id))\n else:\n await bot.say(f'I can not seem to grab your id')", "def get_in_users(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user in obj.users.all():\n return True\n else:\n return False", "def external_check_user_id(user_id):\n\n db = external_db.Database(user_id)\n response = db.get_response() # contains a dict\n local_db = internal_db.Database(user_id)\n\n if response['status'] == 'connected':\n print('(info) - connected to external database, checking userID')\n if response['response'] == 'true':\n local_db.insert_or_update_user_id(response['response'])\n return 'true'\n elif response['response'] == 'false':\n local_db.insert_or_update_user_id(response['response'])\n return 'false'\n elif local_db.is_authorized():\n return 'true'\n else:\n return 'false'\n else:\n print('(warning) - could not connected to external database, checking userID from internal database')\n # Check if the user exists and is authorized in local database\n if local_db.check_user_id() and local_db.is_authorized():\n return 'true'\n elif local_db.check_user_id() and not local_db.is_authorized():\n return 'false'\n elif not local_db.check_user_id():\n print('(info) - userID: ({}): does not has any record in local database'.format(user_id))\n print('(info) - TIP: the next time that connected to external database (if the userID exists) it will '\n 'be insert in local database to consult it')\n return 'false'", "def me(self): \n return self.users(\"identifier == $ApiUser\")", "def get_user(id):\n pass", "def has_user(self, username):\n\t\treturn username in self.users", "def check():\n # Sets variable username to username inputed by user\n username = request.args.get(\"username\")\n # Selects userid from username inputed by user (if there is one)\n userinfo = db.execute(\"SELECT * FROM users WHERE username = :username\", username=username)\n # If there is no info on the username inputed, that means username is not taken, and user can take the username\n if not userinfo:\n # Return true for the username is not taken\n return jsonify(True)\n # Return false if there is info on the username (meaning it was taken)\n return jsonify(False)", "def testPersonIsUser(self):\n member = self.portal.portal_membership.getMemberById('abc123')\n self.failUnless(member,\"%s\" % member)", "def is_userAS(self, obj):\n # Some other places simply check for owner=None.\n return UserAS.objects.filter(as_ptr=obj).exists()", "def identify(cls, user_id):\n return cls.query.get(user_id)", "def has_user(self, username):\n return username in self.user_table", "def _is_initiated(self, context):\n user_data = context.user_data\n has_attr = 'id' in user_data and 'email' in user_data\n has_values = self._id_valid(user_data['id'])\n return has_attr and has_values", "def validateUser(self,admin):\n \n res=admin.helper.getOneUser(self.name)\n if res == False:\n return True\n else:\n return False", "def test_is_valid_user_id_valid(self):\n ids = (\n \"NDcyMjY1OTQzMDYyNDEzMzMy\",\n \"NDc1MDczNjI5Mzk5NTQ3OTA0\",\n \"NDY3MjIzMjMwNjUwNzc3NjQx\",\n )\n\n for user_id in ids:\n with self.subTest(user_id=user_id):\n result = TokenRemover.is_valid_user_id(user_id)\n self.assertTrue(result)", "def is_user_name(user_name, project):\n try:\n uid = query_mod.get_mw_user_id(user_name, project)\n except Exception:\n return False\n\n if uid:\n return uid\n return False", "def check_user_id(user_id):\n\n try:\n message = (\n 'Validating submitted user id.'\n )\n logger.info(message)\n if user_id != '':\n invalid = (\n int(user_id) < 0 or\n cassy.check_user_id_exists(int(user_id))\n )\n if invalid:\n raise PlantalyticsDataException(USER_ID_INVALID)\n message = (\n 'Submitted user id successfully validated.'\n )\n logger.info(message)\n except PlantalyticsException as e:\n raise e\n except ValueError:\n raise PlantalyticsDataException(USER_ID_INVALID)\n except Exception as e:\n raise e", "def has_user(self):\n\t\treturn len( self.a_token ) > 0 and len( self.a_secret ) > 0", "def contains(self, user_id: int, client_name: str) -> bool:\n return client_name in self.clients[user_id]", "def is_in_group_user_id(user_id, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user_id).exists()\n except Group.DoesNotExist:\n return None", "def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False", "def user_exist(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return True\n return False", "def has_access(self, user):\n if user.is_superuser:\n return True\n return self.user_objects(user).filter(id=self.id).exists()", "def test_func(self):\n answer = self.get_object()\n return True if self.request.user == answer.author or self.request.user.is_superuser else False", "def _checkUID(self, uid):\n return uid in self._reservedUID", "def user_auth_inst(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n inst = UserInformation.objects.get(user=user)\n if(inst.user_instructor):\n return True\n return False", "def test_get_by_id(self):\n with self.client:\n self.client.post('/users/login', data=dict(\n username=\"eschoppik\", password='secret'\n ), follow_redirects=True)\n self.assertTrue(current_user.id == 1)\n self.assertFalse(current_user.id == 20)", "def lookup(self, user_id):\n raise NotImplementedError", "def _get_user_info(self, userid):\n if User.check_existing_user(userid):\n user = User(userid)\n self.session.output({\n 'user_id': userid,\n 'user_name': user.user_name,\n 'user_type': user.user_type\n })\n return True\n else:\n self.session.output({'invalid_user': 'please enter valid user ID!\\n'}, '[ Fail to see user info ]')\n return False", "def test_user_id(self):\n new_user = self.app\n self.assertTrue(new_user.user_id, 0)\n new_user.create_user()\n self.assertTrue(new_user.user_id, 1)\n for key in new_user.users:\n self.assertEqual(new_user.user_id, key)", "def verifyNormalUserID(pUserId):\n global _limitsConfig\n # to test in VMs default user (it may have UID of 999, -1 from limit), this should work fine for any other case\n return((_limitsConfig[\"UID_MIN\"]-1 <= int(pUserId) <= _limitsConfig[\"UID_MAX\"]))", "def assert_user_exists(self, user_id):\n result = self.con.execute(\n 'SELECT id FROM registered_user WHERE id = ? AND active = 1',\n (user_id,)\n ).fetchone()\n if result is None:\n raise err.UnknownUserError(user_id)", "def has_reference_to_user_id(cls, user_id: str) -> bool:\n return (\n cls.query(cls.sender_id == user_id).get(keys_only=True) is not None\n )", "def user_exists(self, login):\n\t\tif login in self.users_by_name and isinstance(self.users_by_name[login], VDOM_user):\n\t\t\treturn True\n\t\treturn False", "def get_user_values(self, id_user:int) -> bool:\n try:\n value_user = self.cursor.execute(f'SELECT id from {table_users} where id={id_user}').fetchone()\n if value_user:\n return True\n return False\n except Exception as e:\n msg = f'We found problems with checking values of the previous insertion, mistake: {e}'\n self.proceed_error(msg)", "def identify_id(id: str) -> bool:\n return validate_handle(id)", "def isUserType(user, usercls):\n try:\n u = usercls.objects.get(user_ptr=user.id)\n return True\n except:\n return False", "def test_user_id_identities_get(self):\n pass", "def user(request, user_id):\n raise NotImplementedError", "def has_id(self):\n return not self.id is None", "def test_api_can_get_users_by_id(self):\n rv = self.client().post('/api/v1/user/', \n data = self.req)\n\n res = self.client().get('/api/v1/user/3')\n self.assertEquals(res.status_code, 200)", "def validate_user_id(self, user_id: int) -> APIUser:\n if (\n organization_service.check_membership_by_id(\n user_id=user_id, organization_id=self.organization.id\n )\n is None\n or (user := user_service.get_user(user_id=user_id)) is None\n ):\n raise serializers.ValidationError(\"This member does not exist.\")\n return user", "def __contains__(self, user_name):\n tuples = self._execute(\n \"SELECT name FROM users WHERE name == ?\",\n (user_name,)\n )\n return len(tuples) == 1", "def check_email(request):\n\temail_id = str(request.GET['id'])\n\tuser = User.objects.filter(username=email_id,is_active=1).exists()\n\tif user:\n\t\treturn HttpResponse(1)\n\telse:\n\t\treturn HttpResponse(0)", "def has_object_permission(self, request, view, obj):\n # Users authentified via LTI are identified by a TokenUser with the\n # resource_link_id as user ID.\n if str(self.get_resource_id(obj)) == request.user.id:\n return True\n\n return False", "def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_council_privileges():\n return True\n return False", "def has_object_permission(self, request, view, obj):\n return request.user.id == obj.user_id", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "def get_if_app_services_interested_in_user(self, user_id: str) -> bool:\n if self.exclusive_user_regex:\n return bool(self.exclusive_user_regex.match(user_id))\n else:\n return False", "def test_func(self):\r\n \r\n comment = self.get_object()\r\n return self.request.user == comment.name", "def is_id(self):\n found = False\n for p in self.ant:\n for prop in self.con:\n if p == prop:\n found = True\n return found", "def is_member(request):\n if request.method == \"GET\":\n user_id = request.GET.get('user_id', None)\n board_id = request.GET.get('board_id', None)\n if Member.objects.get(board_id=board_id, user_id=user_id).exists():\n return Response({\"is_member\": True})\n else:\n return Response({\"is_member\": False})", "def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_club_privileges():\n return True\n return False", "def check_have_attend_by_uid(self,uid,eid):\n uid = str(uid)\n eid = str(eid)\n count_info = self.db.get(\"SELECT COUNT(*) AS num FROM fs_user_event WHERE uid=%s and eid=%s and status=0 and checkstatus = 2\",uid,eid)\n return True if count_info['num'] else False", "def test_get_user_by_id_mismatch(client: FlaskClient) -> None:\n username = create_random_username()\n # Users with mismatching username in auth token are not allowed\n # to make the request\n other_username = create_random_username()\n auth_token = create_auth_token(other_username)\n response = get_user(client, username, auth_token.signed)\n assert_error_response(response, HTTPStatus.FORBIDDEN)", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def user_by_id(self, user_id):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, password, phone, email, role\n FROM users WHERE user_id = %s\"\"\", (user_id, ))\n \n user_from_db = cur.fetchone()\n if cur.rowcount == 1: \n user_id, username, password, phone, email, role = user_from_db\n resp = dict(user_id=user_id, username=username, password=password, phone=phone, email=email, role=role)\n \n return resp\n return None", "def test_get_by_id_false(self):\n\n user = CustomUser.get_by_id(44444)\n\n self.assertIsNone(user)", "def is_emperor(user_id: int, table_id: int) -> bool:\n table = Table.query.get(table_id)\n return table.emperor == user_id", "def verify_user_existance(self, user):\n for client in self.clients:\n if user == client.get_name():\n return True\n return False", "def userExists(self, user_uuid):\n return self.getUser(user_uuid) is not None", "def __contains__(self, item):\n if item == self.profile_id:\n return True", "def has_id(self, data):\n # (Dict[str, Any]) -> bool\n return self.id_column.name in data", "def user_exists(cls, name):\n\n for user in cls.user_list:\n if user.user_name == name:\n return True\n\n return False" ]
[ "0.79370236", "0.75839674", "0.7334541", "0.732782", "0.72933257", "0.7157885", "0.71560794", "0.70645714", "0.70558435", "0.7010881", "0.6988318", "0.69240403", "0.69037765", "0.6896436", "0.6886491", "0.6883643", "0.6861566", "0.6858304", "0.6853838", "0.6834101", "0.68314224", "0.68115324", "0.67674124", "0.67472446", "0.6740736", "0.67066634", "0.66630644", "0.66575384", "0.6654994", "0.66423166", "0.6601554", "0.65725845", "0.6561009", "0.65502906", "0.6545232", "0.6541909", "0.65114933", "0.65098375", "0.6504408", "0.6496566", "0.6464849", "0.6461507", "0.6445942", "0.643566", "0.64102745", "0.63910824", "0.6375397", "0.6338595", "0.63318014", "0.6327748", "0.63259304", "0.63207555", "0.63169616", "0.6311509", "0.63038445", "0.6301519", "0.63012445", "0.6296768", "0.6267946", "0.626551", "0.6264183", "0.62626183", "0.62567073", "0.62505543", "0.6249304", "0.6240451", "0.62343657", "0.62339574", "0.6233404", "0.6221618", "0.62143546", "0.6208153", "0.61886466", "0.61858016", "0.61779034", "0.6174418", "0.61654043", "0.6162467", "0.61565936", "0.61536795", "0.6147247", "0.61444074", "0.6140919", "0.6140479", "0.6135485", "0.61223614", "0.6122267", "0.6120481", "0.6116352", "0.61123633", "0.61111313", "0.61111313", "0.61095256", "0.61035836", "0.610134", "0.60992604", "0.60958534", "0.6095518", "0.60935175", "0.6093044" ]
0.8175753
0
a new session has been created add user's sid to cache with their related chat id
def user_joined(cls, sid, token): session = Session.find(token=token) if not session: return False redis.hset('sid-id', sid, session.user_id) redis.hset('id-sid', session.user_id, sid) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_session(self, session_id):\n sessions = self.get_sessions()\n if session_id not in sessions:\n sessions.append(session_id)\n self.ref_cache.set(self.sid, sessions)", "def add_user_to_session(self,session_id,client_id,display_name):\n self.sessions[session_id][\"USERS\"][client_id] = {\n \"display_name\" :display_name,\n \"permissions\" : {\n \"add_to_queue\" : True,\n \"playback\" : True,\n \"skip\" : True,\n \"edit_queue\" : True\n }\n }", "def add_information_about_person(self, session_info):\n\n session_info = dict(session_info)\n name_id = session_info[\"name_id\"]\n issuer = session_info.pop(\"issuer\")\n self.cache.set(name_id, issuer, session_info, session_info[\"not_on_or_after\"])\n return name_id", "def _get_by_sid(self, sid):\n if self._is_valid_sid(sid):\n data = self.session_model.get_by_sid(sid)\n if data is not None:\n self.sid = sid\n logging.info(sid)\n logging.info(sessions.SessionDict(self, data=data))\n return sessions.SessionDict(self, data=data)\n logging.info('new')\n self.sid = self._get_new_sid()\n return sessions.SessionDict(self, new=True)", "def find_session(sender_id):\n session = db.sessions.find_one({'sender_id': sender_id})\n if session is None: \n session_id = str(uuid.uuid4())\n db.sessions.insert_one({'createdAt': datetime.datetime.utcnow(), 'sender_id': sender_id, 'session_id': session_id})\n else:\n session_id = session['session_id']\n return session_id", "def insert_chat_id_to_user_state(mess_chat_id):\n connection = connection_to_db()\n cursor = connection.cursor()\n\n cursor.execute(\n \"INSERT INTO user_state (user_id) \"\n \"SELECT {0} WHERE NOT EXISTS \"\n \"(SELECT user_id FROM user_state \"\n \"WHERE user_id = {0});\".format(mess_chat_id)\n )\n\n connection.commit()", "def addsession(cls, session, username, passwd):\n sessionkey = cls.sessionkey(session)\n tmpdict = dict({'username': username, 'password': passwd})\n sessionmgr.update(dict({sessionkey: tmpdict}))", "def before_request():\n if 'user_key' in session:\n user = cache.get(session['user_key'])\n\n if user is None:\n # if the user is not available in memcache we fetch\n # it from the datastore\n user = User.get_by_key_name(session['user_key'])\n\n if user:\n # add the user object to memcache so we\n # don't need to hit the datastore next time\n cache.set(session['user_key'], user)\n\n g.user = user\n else:\n g.user = None", "def add_session(self, session):\n with self._sessions_lock:\n if session.session_id in self.sessions:\n raise KeyError(\"non-unique session id %s for %s\" % (session.session_id, session))\n self.sessions[session.session_id] = session\n\n return session", "def _insert_new_session():\n request = self._make_request()\n session_existing = self._set_up_session_in_Redis_and_makeOne( # noqa: F841\n request, session_id, session_dict={\"visited\": True}, **session_args\n )\n return request", "def session(rq):\n rq.session['username']='wxy'\n return HttpResponse(__file__ + '::session and first user is my daugter:' + rq.session['username'])", "def on_session_started(event, session_started_request, session):\n\n logger.info(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\n # Stash the device ID along with the user ID\n device_id = event['context']['System']['device']['deviceId']\n user_id = session['user']['userId']\n with conn.cursor() as cur:\n cur.execute(\"CREATE TABLE IF NOT EXISTS user_device (user_id varchar(255), device_id varchar(255))\")\n cur.execute(\"DELETE FROM user_device WHERE device_id=%s\", (device_id))\n cur.execute(\"INSERT INTO user_device (user_id, device_id) VALUES(%s, %s)\", (device_id, user_id))\n conn.commit()", "def method_loginid(self, chat_id, password):\n\n with open('./package_login/logged.json', 'r') as f:\n data = json.load(f)\n\n cpass = sha256(password.rstrip().encode()).hexdigest()\n id_c = sha256(str(chat_id).rstrip().encode()).hexdigest()\n if cpass == self.password:\n\n # password ok\n ids = data\n find_it = False\n find_user=\"\"\n\n for x in ids:\n if x['chat_id'] == id_c:\n find_it = True\n find_user = x\n break\n\n if find_it:\n # find user user in json.\n if find_user['password'] != cpass:\n\n # this id was logged in with old password\n # so update the password.\n find_user['password'] = cpass\n find_user['is_logged'] = True\n\n else:\n #\n # id was already logged in.\n find_user['is_logged'] = True\n\n # update json object.\n for x in data:\n if x['chat_id'] == id_c:\n x['is_logged'] = True\n x['password'] = cpass\n\n else:\n # the id wasn't logged in, but it cans, because it\n # inserted right password.\n find_user = {\n 'chat_id': id_c,\n 'is_logged': True,\n 'password': cpass\n }\n data.append(find_user)\n\n # update json file\n with open('./package_login/logged.json', 'w') as json_file:\n json.dump(data, json_file)\n return True\n else:\n return False", "def save_session(self, session):\n db = self.open()\n db[session.id] = session", "def add_chatroom(request):\n title = request.POST['title'].strip()\n psk = request.POST['psk']\n \n # If thread already exists\n if models.MessageThread.objects.filter(title=title).exists():\n thread = models.MessageThread.objects.get(title=title)\n if thread.psk != psk:\n # Invalid passkey\n thread = None\n return HttpResponse(status=403)\n # If the thread does not exist yet\n else:\n return HttpResponse(status=405)\n\n if not request.user in thread.clients.all():\n thread.clients.add(request.user)\n channel_layer = get_channel_layer()\n\n if 'channel_name' in request.session:\n async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])\n\n return HttpResponse(status=200)", "def set_user_cookie_id():\n #new fresh user\n if not request.cookies.get(config.COOKIE_ADSABS2_NAME):\n if current_user.is_anonymous():\n g.user_cookie_id = unicode(uuid.uuid4())\n else:\n g.user_cookie_id = current_user.get_id()\n #the user has already visited the web site\n else:\n if current_user.is_anonymous():\n #if the cookie is a valid UUID it's ok\n curr_cookie = request.cookies.get(config.COOKIE_ADSABS2_NAME)\n try:\n uuid.UUID(curr_cookie)\n g.user_cookie_id = curr_cookie\n #otherwise the app generates a new one\n except ValueError:\n g.user_cookie_id = unicode(uuid.uuid4())\n else:\n g.user_cookie_id = current_user.get_id()", "def create_single_sign_on_session(remote_ip, auth_user, secure=True):\n # must define groups but not populating at the moment !!!\n groups = []\n\n # Successful authentication and access verification, create a session and return.\n cherrypy.log.error(\"++ create_single_sign_on_session creating session for %s\" % auth_user)\n sid = uuid.uuid4().hex\n session = {\"created\": datetime.datetime.utcnow(), \"creator\": auth_user}\n with slycat.web.server.database.couchdb.db_lock:\n clean_up_old_session(auth_user)\n database = slycat.web.server.database.couchdb.connect()\n \n database.save({\"_id\": sid, \"type\": \"session\", \"created\": str(session[\"created\"].isoformat()), \"creator\": str(session[\"creator\"]),\n 'groups': groups, 'ip': remote_ip, \"sessions\": [], \"last-active-time\": str(session[\"created\"].isoformat())})\n\n cherrypy.response.cookie[\"slycatauth\"] = sid\n cherrypy.response.cookie[\"slycatauth\"][\"path\"] = \"/\"\n if secure:\n cherrypy.response.cookie[\"slycatauth\"][\"secure\"] = 1\n cherrypy.response.cookie[\"slycatauth\"][\"httponly\"] = 1\n timeout = int(cherrypy.request.app.config[\"slycat\"][\"session-timeout\"].total_seconds())\n cherrypy.response.cookie[\"slycatauth\"][\"Max-Age\"] = timeout\n cherrypy.response.cookie[\"slycattimeout\"] = \"timeout\"\n cherrypy.response.cookie[\"slycattimeout\"][\"path\"] = \"/\"\n cherrypy.response.cookie[\"slycattimeout\"][\"Max-Age\"] = timeout\n\n cherrypy.response.status = \"200 OK\"\n cherrypy.request.login = auth_user", "def cacheChats(self):\n logger.debug(\"Async cacheChats() -- this may take a while\")\n self.chats = OrderedDict()\n\n # First get all fresh chats\n chats = []\n for chat in self.skype.Chats:\n\n # filter chats older than 6 months\n if time.time() - chat.ActivityTimestamp > 3600 * 24 * 180:\n continue\n\n chats.append(chat)\n\n chats = sorted(chats, key=lambda c: c.ActivityTimestamp, reverse=True)\n\n for chat in chats:\n # Encode ids in b64 so they are easier to pass in URLs\n m = hashlib.md5()\n m.update(chat.Name)\n self.chats[m.hexdigest()] = chat", "def add2session(key, value):\n cherrypy.session.acquire_lock()\n cherrypy.session[key] = value\n cherrypy.session.release_lock()", "def fusion_api_set_active_session(self, sessionId):\n return self.loginsession.set_active_session(sessionId)", "def joined(self, channel):\n # find or make a session. \n ss = self.findSessions(channel)[0]\n if ss.isDefaultSession: # i.e., not found\n channel = channel.decode(self.serverEncoding)\n ss = self.store.find(d20session.D20Session,\n d20session.D20Session.name == channel).one()\n\n if ss is None:\n ss = d20session.D20Session()\n ss.name = channel.decode(ss.encoding)\n self.store.add(ss)\n Store.of(ss).commit()\n\n self.sessions.append(ss)\n\n self.responding = 1", "def set_user_session(request, user):\n #Generate an access token for this user and send it back\n user_hash = hashlib.sha224(user.user_id).hexdigest()\n cache = LoginCache()\n cache.user_hash = user_hash\n cache.user_id = user.user_id\n cache.save()\n return user_hash", "def before_request(self):\n g.start_time = dt.datetime.now()\n if 'UUID' not in session.keys() or not self.redis.zrank(SORTED_SESSION_LIST, session['UUID']):\n _uuid = session.get('UUID', default=str(uuid.uuid4()))\n session['UUID'] = _uuid\n s = dict(\n user_agent=request.user_agent.string,\n ua_browser=request.user_agent.browser,\n ua_language=request.user_agent.language,\n ua_platform=request.user_agent.platform,\n ua_version=request.user_agent.version,\n )\n self.store_session(_uuid, s)", "def on_session_started(session_started_request, session):\n print(\"on_session_started requestId=\" + session_started_request['requestId'] + \", sessionId=\" + session['sessionId'])\n \n session['attributes'] = {\"currentQuestion\":0, \"score\":0, \"date\":datetime.datetime.now().strftime(\"%B-%d-%Y %I:%M%p\"), \"billNo\":\"\", \"age\":\"\", \"result\":[]}", "def _addSessionToWishlist(self, request):\n # Preload necessary data items\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Verify that the session actually exists\n session = _getEntityByWebsafeKey(request.websafeSessionKey, 'Session')\n profile = self._getProfileFromUser()\n if session.key not in profile.sessionWishlist:\n profile.sessionWishlist.append(session.key)\n profile.put()\n return BooleanMessage(data=True)", "def startSession(self):\n self.storage.insert(self.__json__())", "def add_conversation(timestamp, user):\n if timestamp not in ACTIVE_CONVS:\n debug_print(\"Adding a new conversation.\")\n ACTIVE_CONVS[timestamp] = [user]\n elif user not in ACTIVE_CONVS[timestamp]:\n debug_print(\"Adding a new user to an active conversation.\")\n ACTIVE_CONVS[timestamp].append(user)\n debug_print(ACTIVE_CONVS)", "def add_session(self, timeslot):\n new_session = Session(self, timeslot)\n self.sessions.append(new_session)", "async def update_user(self, cookie, remote_addr, user_agent):\n # SELECT uuid_in(md5(random()::text || now()::text)::cstring);\n # CREATE UNIQUE INDEX session_uuid_idx ON sessions(((data->>0)::uuid));\n if not cookie:\n sql = ('SELECT '\n 'uuid_in(md5(random()::text || now()::text)'\n '::cstring)::text')\n result = await self.db.fetchval(sql)\n # result = await self.md5.fetchrow()\n uuid = result\n await self.update_stats(1, 0, 0)\n else:\n uuid = cookie\n current_time = int(time.time())\n # -- UPSERT is PAIN\n # INSERT INTO sessions (data) VALUES\n # ('[\"983f2816-6ed2-4c4c-a13a-5432b67b6125\", 1489337990,\n # 0, \"00.00.00.00\", \"Not detected\"]')\n # on conflict (((data ->> 0)::uuid))\n # do update set data = jsonb_set(SESSIONS.data, '{2}','1');\n sql = (\n \" INSERT INTO sessions (data) VALUES\"\n \" ('[\\\"{0}\\\", {1}, {2}, \\\"{3}\\\", \\\"{4}\\\"]') \"\n \" on conflict (((data ->> 0)::uuid)) \"\n \" do update set data = \"\n \" jsonb_set(\"\n \" jsonb_set(\"\n \" jsonb_set(\"\n \" jsonb_set(SESSIONS.data, '{{1}}','{1}'),\"\n \" '{{2}}',(select (((data->>2)::int+1)::text)::jsonb\"\n \" from sessions where data->>0 = '{0}')),\"\n \" '{{3}}', '\\\"{3}\\\"'),\"\n \" '{{4}}', '\\\"{4}\\\"');\")\n sql = sql.format(\n uuid, current_time, 0, remote_addr, user_agent)\n await self.db.execute(sql)\n return uuid", "def get_logged_in_user(self):\n\n if type(self.cache) is Cache:\n sessionId = self.cache.get('user.sessionId')\n userId = self.cache.get('user.id')\n if sessionId and userId:\n self.sessionId = sessionId\n self.userId = userId\n user = {}\n user['id'] = userId\n user['username'] = self.cache.get('user.username')\n user['profileUrl'] = self.cache.get('user.profileUrl')\n user['avatarUrl'] = self.cache.get('user.avatarUrl')\n user['reputation'] = self.cache.get('user.reputation')\n user['badge1'] = self.cache.get('user.badge1')\n user['badge2'] = self.cache.get('user.badge2')\n user['badge3'] = self.cache.get('user.badge3')\n return user", "def on_login(self, user):\n s = cherrypy.session\n s[USERDATA_SESSION_KEY] = user", "def subscribe(id, userId):\n db = core.connect()\n theUser = db[userId]\n theStream = db[id]\n allowed = not theStream[\"private\"]\n if not allowed:\n perms = permission.joinableStreams(userId);\n allowed = id in perms\n if allowed and (not id in theUser[\"streams\"]):\n theUser[\"streams\"].append(id)\n db[userId] = theUser\n if theStream[\"private\"]:\n perm = permission.permissionForUser(userId, id)\n permission.update(perm[\"_id\"], 1)", "def add_user_to_g():\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def add_user_to_g():\n\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def add_user_to_g():\n\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def add_user_to_g():\n\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def before_request():\n\n session.permanent = True\n app.permanent_session_lifetime = timedelta(minutes=5)\n session.modified = True\n global_buffer.user = current_user", "def addsession_unkown(update, context):\n\tuser = update.message.from_user\n\tlogger.info(\"User %s called unknown command while adding new session.\", user.first_name)\n\t\n\tuser_data = context.user_data\n\tuser_data.clear()\n\n\tupdate.message.reply_text('You called an unknown command while adding new session.\\n'\n\t\t'I cancel this attempt - no session will be added.')\n\n\treturn ConversationHandler.END", "def session_start(self, ignored):\n self.get_online_users()", "def put_request_session(self, key, inst):\n with self.GLOB_LOCK:\n inst.touch()\n self._request_sessions[key] = inst", "def on_session_started(session_started_request, session):\n \n #session.attributes['result_number'] = 1\n session['attributes'] = {}\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])", "def add_direct(request):\n friend = request.POST['friend'].strip()\n\n if userauth_models.User.objects.filter(username=friend).exists():\n friendUser = userauth_models.User.objects.get(username=friend)\n elif userauth_models.User.objects.filter(phone_number=friend):\n friendUser = userauth_models.User.objects.get(phone_number=friend)\n elif userauth_models.User.objects.filter(email=friend):\n friendUser = userauth_models.User.objects.get(email=friend)\n else:\n return HttpResponse(status=403) #no friend :(\n\n threadName = request.user.username + friendUser.username\n\n if models.MessageThread.objects.filter(title=threadName).exists():\n thread = models.MessageThread.objects.get(title=threadName)\n elif models.MessageThread.objects.filter(title=(friendUser.username + \\\n request.user.username)).exists():\n thread = models.MessageThread.objects.get(title=(friendUser.username \\\n + request.user.username))\n else:\n thread = models.MessageThread(title=threadName, psk=threadName, \\\n admin=request.user.username, friend1 = friendUser.username, is_direct=True)\n #thread = models.MessageThread(title=threadName, psk=threadName)\n thread.save()\n\n if not request.user in thread.clients.all():\n thread.clients.add(request.user)\n #thread.clients.add(friendUser)\n channel_layer = get_channel_layer()\n if 'channel_name' in request.session:\n async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])\n \n #if not friendUser in thread.clients.all():\n # thread.clients.add(friendUser)\n # channel_layer = get_channel_layer()\n\n # if 'channel_name' in request.session:\n # async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])\n\n thread_data = serializers.MessageThreadSerializer(thread).data\n\n return HttpResponse(status=200)", "def add_sessionid_in_form(self, form):\n if self.session:\n form(self.div(self.session.sessionid_in_form(self, self.request, self.response), class_='nagare-generated nagare-session-data'))", "def cache_message(self, comm_id, msg):\n if comm_id not in self._cached_messages:\n self._cached_messages[comm_id] = []\n self._cached_messages[comm_id].append(msg)", "def create_session(self,session_id,host_id,host_name,spotify_token):\n self.sessions[session_id] = {\n \"HOST\" : {\n \"ID\" : host_id,\n \"NAME\" : host_name,\n \"spotify_token\" : spotify_token,\n \"spotify_player\": None,\n },\n \"queue\" : [],\n \"queue_lock\" : False,\n \"current_track\" : \"\",\n \"previous_track\" : \"\",\n \"USERS\" : {}\n }", "def cache_session(self):\n # always save (to update timeout)\n self.i('Cache Session')\n with open(self.cache_file_path, \"wb\") as file:\n pickle.dump(self, file)", "def session(self):", "def add_user_to_g():\n \n if CURRENT_USER in session:\n g.user = User.query.get(session[CURRENT_USER])\n\n else:\n g.user = None", "def store_userid(request_handler, userid):\n session = sessions.LilCookies(request_handler, SESSION_SECRET)\n session.set_secure_cookie(name='userid', value=userid)", "def add_session_to_wishlist(self, websafe_session_key, user):\n wl_key = self.get_wishlist_key(user)\n\n wishlist = wl_key.get()\n\n if websafe_session_key in wishlist.sessionKeys:\n raise ConflictException(\n \"You already have this session in your wishlist.\")\n\n wishlist.sessionKeys.append(websafe_session_key)\n wishlist.put()\n\n return self.to_message(wishlist)", "def for_session(self, session_id):\n if not isinstance(session_id, str):\n raise TypeError('Session Id must be a string')\n\n self.token['sessionId'] = session_id\n\n return self", "def add_session_to_wishlist(self, request):\n return self.wishlist_service.add_session_to_wishlist(\n request.websafeSessionKey, endpoints.get_current_user())", "def user(self, uid):", "def refresh_session():\n\n hruntime.response.headers['Cache-Control'] = 'must-revalidate, no-cache, no-store'\n\n hruntime.user = hruntime.dbroot.users[hruntime.session.name]\n hruntime.i18n = hruntime.dbroot.localization.languages['cz']", "def init_user_session(request, user, remember=True):\n from appengine_utilities.sessions import Session\n lang = request.session['LANGUAGE_CODE']\n request.session = Session(set_cookie_expires=remember)#register the user with session\n request.session['LANGUAGE_CODE'] = lang#saved language\n user._session = request.session.get_ds_entity()\n from datetime import datetime\n user.last_login = datetime.now()\n if not user.profile: \n from georemindme.models import UserProfile\n p = UserProfile(user=user)\n p.put()\n user.put()\n request.session['user'] = user", "def add_message():\n user_ID = str(session['user_id'])\n if 'user_id' not in session:\n abort(401)\n if request.form['text']:\n add_message_query()\n flash('Your message was recorded')\n if redis_obj.get(user_ID):\n redis_obj.delete(user_ID)\n print \"Invalidating cache after adding new message\"\n return redirect(url_for('timeline'))", "def addsession(update, context):\n\tupdate.message.reply_text('Ok, for this I will need two items:\\n\\n'\n\t\t'1. Date and time of the registration opening (your local time)\\n' \n\t\t'2. URL-link to the session page\\n\\n' \n\t\t'You can always cancel the input of a new session by typing /cancel.')\n\n\tupdate.message.reply_text('Let\\'s start with the first one.\\n\\n' \n\t\t'When does the registration open for your swimming session?\\n' \n\t\t'Please, send me the date and time in the following format:\\n\\n'\n\t\t'dd/mm/yyyy hh:mm')\n\n\treturn DATETIME", "def save(self, *args, **kwargs):\n if not self.id:\n self.last_msg_time = timezone.now()\n super(WeixinUser, self).save(*args, **kwargs)", "def session_id(self, session_id):\n\n self._session_id = session_id", "def reload_sessions(self):\n import glob \n sessions = glob.glob('*.session')\n for x in sessions:\n self._db['accounts'][x.split('.')[0]] = { 'session': x.split('.')[0] }", "def persist(self, username):\n if username:\n database = get_database(username)\n add_sessions(database, [self[-1]])", "def join_session(self, information, player):\n try: # if input of int() is not convertible to integer it throws an error\n req_ses_id = int(information.split(protocol._MSG_FIELD_SEP)[1])\n except ValueError:\n print(\"session id is not int convertible: %s\" % information.split(protocol._MSG_FIELD_SEP))\n return # TODO: appropriate error to user\n\n for session in self.current_sessions:\n if session.game_id == req_ses_id:\n break\n self.__lock.acquire()\n player.current_session_id = session.game_id\n joined_session = session.add_player(player)\n # TODO: some mysterious behavior observed here. couldn't reproduce it [Novin]\n print(\"player added to current session!\")\n self.__lock.release()\n if joined_session:\n return session\n else:\n return None", "def save_user(message):\n uid = message.chat.id\n username = message.chat.username\n first_name = message.chat.first_name\n\n all_users = db.all_users\n user_data = {\n 'uid': uid,\n 'username': username,\n 'first_name': first_name\n }\n result = all_users.update_one({'uid': uid}, {'$setOnInsert': user_data}, upsert=True)\n logging.info(f'{username} started answering.')\n\n return user_data", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def get_user(self, session, chat_id) -> Tuple[int, str, str]:\n user = session.query(User).get(chat_id)\n return user", "async def fix_cache(self, ctx):\n self.initial_config(ctx.message.server.id)\n self.settings[server.id]['usercache'] = []\n self.save_json()", "def start(update, context):\n chats = load_chats()\n chats.append( str( update.message.chat_id ) )\n save_channels(chats)\n update.message.reply_text('Chat registered!')", "def __init__(self):\n\n self.lastcid=0\n self.calls = { }\n\n SessionList.__init__(self)", "def reinitsession(cls, arg, session):\n arg = None\n print(\"Dup Session start\")\n cls.log(1, \"Dup Session start\")\n ret, username = cls.getsessionuser(session)\n if ret is False:\n print(\"Unable to reinit the session\", session, arg)\n cls.log(3, \"Unable to reinit the session\",\n session, arg)\n return False\n ret, passwd = cls.getsessionpasswd(session)\n if ret is False:\n print(\"Unable to reinit the session\", session, arg)\n cls.log(3, \"Unable to reinit the session\",\n session, arg)\n return False\n\n IP = session[\"ip_addr\"]\n # vfid = session[\"vfid\"]\n https = session[\"ishttps\"]\n # debug = session[\"debug\"]\n # throttle_delay = session[\"throttle_delay\"]\n newsession = None\n retry = 0\n for i in range(10):\n retry = i\n newsession = auth.login(username, passwd, IP, https)\n if auth.is_failed_login(newsession):\n cls.sleep(20, session)\n continue\n else:\n break\n if not auth.is_failed_login(newsession):\n # print('old', cls.sessionkey(session), 'New',\n # cls.sessionkey(newsession))\n session['credential'] = newsession['credential']\n session[\"version\"] = newsession[\"version\"]\n print(\"Dup Session Completed after Iterations:\", retry)\n cls.log(1, \"Dup Session Completed after Iterations:\",\n retry)\n return True\n print(\"Dup Session Failed.\")\n cls.log(2, \"Dup Session Failed.\")\n sys.exit('Exiting as session dup didn\\'t work')\n return False", "def initializeUser(self,chat_id):\n\t\tcommand = \"INSERT INTO users (chat_id, lang, admin) VALUES (?,'EN',0);\"\n\t\tparams = (chat_id,)\n\n\t\ttry:\n\t\t\tself._run_command(command, params)\n\t\texcept sqlite3.IntegrityError:\n\t\t\t# if user already exists, do nothing\n\t\t\tpass", "def store_client_in_session(client):\n session[\"client_session\"] = client.session\n me = client.api.me()\n session[\"me\"] = me\n session[\"user_id\"] = me[\"id\"]\n session[\"user_display_name\"] = me[\"display_name\"]", "def user_login(TTL=60):\n # auth = request.authorization\n # username = auth.username\n # password = auth.password\n username = request.args.get('username')\n password = request.args.get('password')\n print 'user', username\n print 'pass', password\n\n if g.user:\n print \"g.user:\", g.user\n # return jsonify(URL=url_for('home_timeline'))\n # Create a hash key\n message_json = \"\"\n hash = hashlib.sha224(message_json).hexdigest()\n key = \"Login_API_Cache\" + hash\n print \"Created Key\\t : %s\" % key\n\n # Check if data is in cache.\n if (R_SERVER.get(key)):\n print \"** Messages returned from Redis Cache **\"\n return cPickle.loads(R_SERVER.get(key))\n\n if username != None:\n print \"session\"\n error = None\n if request.method == 'GET':\n print g.user\n print username\n user = userdetails_API_query(username)\n print \"query\", user\n if user is None:\n error = 'Invalid username'\n return jsonify(Status_code=status.HTTP_401_UNAUTHORIZED, username=username, error=error)\n elif not check_password_hash(user['pw_hash'],\n password):\n error = 'Invalid password'\n return jsonify(Status_code=status.HTTP_401_UNAUTHORIZED, username=username, error=error)\n else:\n print \"** Messages returned from MongoDB **\"\n flash('You were logged in')\n session['user_id'] = user['_id']\n username = {'Username_logged_in': username}\n\n ############### REDIS SESSION CODE #####################\n\n message_json = jsonify(\n Status_code=status.HTTP_200_OK, username=username)\n R_SERVER.set(key, cPickle.dumps(message_json))\n R_SERVER.expire(key, TTL)\n\n return message_json\n else:\n print\"logout\", g.user[1]\n flash('You were logged out')\n session.pop('user_id', None)\n username = {'Username_logged_out': g.user[1]}\n return jsonify(URL=url_for('newpublic_timeline'), Username=username, Status_code=status.HTTP_200_OK)", "def make_new_session():\n session = Session.objects.create(uuid=str(uuid4()), container_id=None)\n return session.id", "def user_left(cls, sid):\n id = redis.hget('sid-id', sid)\n redis.hdel('sid-id', sid)\n redis.hdel('id-sid', id)\n return id.decode(\"utf-8\")", "def add_user(self, session, user_data: Dict) -> User:\n chat_id = user_data[\"chat_id\"]\n username = user_data[\"username\"]\n first_name = user_data[\"first_name\"]\n last_name = user_data[\"last_name\"]\n time_registered = user_data[\"time_registered\"]\n is_admin = False\n reminder_time = datetime.time(hour=21, tzinfo=TIME_ZONE)\n\n user = session.query(User).get(chat_id)\n if user:\n if user.username != username:\n user.username = username\n session.commit()\n if user.is_banned is True:\n user.is_banned = False\n session.commit()\n return user\n\n new_user = User(\n chat_id=chat_id,\n is_banned=False,\n username=username,\n first_name=first_name,\n last_name = last_name,\n time_registered = time_registered,\n is_admin = is_admin,\n reminder_time = reminder_time,\n )\n session.add(new_user)\n session.commit()\n return new_user", "def enter_contest(self, message, db_session):\n username = self.ts.get_user(message)\n user = db_session.query(db.User).filter(db.User.name == username).one_or_none()\n if user:\n print('user found')\n if user.entered_in_contest:\n self._add_to_whisper_queue(user.name, 'You\\'re already entered into the contest, you can\\'t enter again.')\n else:\n user.entered_in_contest = True\n self._add_to_whisper_queue(user.name, 'You\\'re entered into the contest!')\n else:\n print('user created')\n user = db.User(entered_in_contest=True, name=username)\n # user.name = username\n db_session.add(user)\n print(user.name)\n self._add_to_whisper_queue(username, 'You\\'re entered into the contest!')", "def load_session(self, user_id):\n ukey = self.r_key('session', user_id)\n return self.r_server.hgetall(ukey)", "def new_session(self):\n body = yield from self._fetch_json(URL_LOGIN, self._new_session_data)\n self.sma_sid = jmespath.search('result.sid', body)\n if self.sma_sid:\n return True\n\n msg = 'Could not start session, %s, got {}'.format(body)\n\n if body.get('err'):\n if body.get('err') == 503:\n _LOGGER.error(\"Max amount of sesions reached\")\n else:\n _LOGGER.error(msg, body.get('err'))\n else:\n _LOGGER.error(msg, \"Session ID expected [result.sid]\")\n return False", "def timeline():\n # if not g.user:\n # print \"IT IS NOT USER\"\n # return redirect(url_for('public_timeline'))\n #messages = query()\n # followed = mongo.db.users.find_one(\n # {'_id': session['user_id']}, {'follows': 1})\n # if followed is None:\n # followed = {'follows': []}\n # messages = mongo.db.message.find(\n # {'$or': [\n # {'author_id': session['user_id']},\n # {'author_id': {'$in': followed['follows']}}\n # ]}).sort('pub_date', -1)\n # print \"inside time msg\", messages\n # print \"inside timeline\", g.user['email']\n # print \"IT IS USER\"\n\n user_ID = before_request()\n if user_ID != None:\n user_ID = str(g.user['_id'])\n if not g.user:\n return redirect(url_for('public_timeline'))\n\n ############### REDIS SESSION CODE #####################\n\n if redis_obj.get(user_ID):\n print \"Data from REdis cache\"\n return render_template('timeline.html', messages=pickle.loads(redis_obj.get(user_ID)))\n else:\n messages = query()\n redis_obj.setex(session['user_id'], pickle.dumps(messages), 30)\n print \"Data from REdis cache\"\n return render_template('timeline.html', messages=messages)\n\n # return render_template('timeline.html', messages=messages)", "def get_unique_users(chat):\n \n unique_users = { }\n\n for message in chat:\n unique_users[message['user']] = message['privilege']\n\n return unique_users", "def before_request():\n if current_user.is_authenticated:\n current_user.last_seen = datetime.utcnow()\n db.session.commit()", "def insert_data_to_sd_table(mess_chat_id):\n connection = connection_to_db()\n cursor = connection.cursor()\n\n cursor.execute(\n \"INSERT INTO song_data (user_id) \"\n \"SELECT {0} WHERE NOT EXISTS \"\n \"(SELECT user_id FROM song_data \"\n \"WHERE user_id = {0});\".format(mess_chat_id)\n )\n\n connection.commit()", "def remote_addUsertoROSProxy(self, userID, key):\r\n # TODO: Should this be deferred to a separate thread due to flock,\r\n # which is a blocking call?\r\n with open(self._dbFile, \"a\") as bridgefile:\r\n fcntl.flock(bridgefile.fileno(), fcntl.LOCK_EX)\r\n bridgefile.write('{0}:{1}\\n'.format(userID, key))", "def addNewUser(self) -> str:\n userId = str(uuid.uuid4())\n\n if len(self.usersQueue):\n # Start timer or logic to change user\n self.timer.start()\n\n self.usersQueue.append(userId)\n return userId", "def _test_id(self):\n #Force the session timeout to always update with the site's preferences.\n new_timeout = self.timeout\n Slate.__init__(\n self\n , self.session_cookie # Use the cookie name to isolate session data\n , self.originalid\n , timeout=new_timeout\n )\n if self.is_expired():\n # If we're expired, we want a new id to prevent session fixation.\n Slate.__init__(self, self.session_cookie, None, timeout=new_timeout)\n log('Session {0} expired -> {1}'.format(self.originalid, self.id))", "def _check_session(self, request):\n if request.user.is_authenticated:\n current_session_key = request.session.session_key\n stored_session_key = request.user.logged_in_user.session_key\n\n if stored_session_key and stored_session_key != current_session_key:\n self.switch_session_data(request, current_session_key,\n stored_session_key)\n\n # update LoggedInUser table with relevant session key\n request.user.logged_in_user.session_key = current_session_key\n request.user.logged_in_user.save()", "def on_before_render(self, request):\n \n cookie_name = request.get_action_parameter(\"session_cookie_name\",\n \"gyro-session-uuid\")\n uuid = request.get_cookie(cookie_name)\n \n session = None\n \n if uuid:\n session = self.storage.get_session(uuid)\n else:\n uuid = generate_uuid()\n \n request.session_uuid = uuid\n \n if session is not None:\n request.session = session\n else:\n def set_session(r):\n if not r:\n r = {}\n \n request.session = r\n \n return plugin.run_hook(\"on_new_session\", request).add_callback(\n set_session)", "def __init__(self, dbpath, sid=None, validity=u'3 hours', ipmatch=False):\n\n self.sid = sid\n self.dbpath = dbpath\n self.validity = validity\n self.ipmatch = ipmatch\n self.data = None\n \n connection = self._open_db()\n cursor = connection.cursor()\n cursor.execute('SELECT * FROM sqlite_master \\\n WHERE type = \\'table\\' AND name = ?;',\n (u'sessions',))\n tablecount = cursor.fetchall()\n if len(tablecount) == 0:\n cursor.execute('CREATE TABLE sessions (id PRIMARY KEY, data, \\\n created_time, accessed_time, expire_time, remote_addr);')\n\n cursor.execute('DELETE FROM sessions \\\n WHERE expire_time < datetime(\\'now\\');')\n\n if isinstance(self.sid, basestring):\n cursor.execute('SELECT id FROM sessions WHERE id = ?;',\n (self.sid,))\n idcount = cursor.fetchall()\n if len(idcount) == 0:\n self._create_session_id()\n self._insert_session_record(cursor)\n else:\n if self.ipmatch:\n current_addr = os.environ.get('REMOTE_ADDR', u'')\n past_addr = self.get_remote_addr()\n if current_addr == past_addr:\n self._update_session_record(cursor)\n else:\n self._create_session_id()\n self._insert_session_record(cursor)\n else:\n self._update_session_record(cursor)\n else:\n self._create_session_id()\n self._insert_session_record(cursor)\n\n cursor.close()\n connection.commit()\n connection.close()", "def open_session(self, user, client_pid):\n if self._active_sessions.is_client_active(client_pid):\n self._active_sessions.increment_reference_count(client_pid)\n else:\n signals, controls = self.get_user_access(user)\n watch_id = self._watch_client(client_pid)\n self._active_sessions.add_client(client_pid, signals, controls, watch_id)", "def do_login(user):\n session[CURRENT_USER_KEY] = user.id", "def before_request():\n g.user = None\n if 'user_id' in session:\n g.user = User.query.get(session['user_id'])", "def set_session_recs(rec_num: int, username: str, followers: int, likes: int,\n img_path: str) -> None:\n session['user_rec' + str(rec_num+1)] = str(username)\n session['fols_rec' + str(rec_num+1)] = str(followers)\n session['likes_rec' + str(rec_num+1)] = str(likes)\n session['img_rec' + str(rec_num+1)] = img_path", "def store_session_record(self, obj_type, obj_id):\n self.builtin.log(\"Storing {} {} to session records\".format(obj_type, obj_id))\n self._session_records.append({\"type\": obj_type, \"id\": obj_id})", "def add_user_to_g():\n # access g in templates, g only lives for life of request\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def user_tweets(username, TTL=30):\n # profile_user = query_db('select * from user where username = ?',\n # [username], one=True)\n profile_user = userdetails_API_query(username)\n print \"profile \", profile_user\n if profile_user is None:\n abort(404)\n followed = False\n\n if g.user:\n followed = mongo.db.users.find_one(\n {'_id': g.user[0]}, {'follows': profile_user['_id']}) is not None\n # followed = query_db('''select 1 from follower where\n # follower.who_id = ? and follower.whom_id = ?''',\n # [g.user[0], profile_user['user_id']],\n # one=True) is not None\n # Create a hash key\n user_profile = \"\"\n hash = hashlib.sha224(user_profile).hexdigest()\n key = \"user_timeline_key\" + hash\n # print \"Created Key\\t : %s\" % key\n\n############### REDIS SESSION CODE #####################\n\n # Check if data is in cache.\n if (R_SERVER.get(key)):\n print \"** Messages returned from Redis Cache **\"\n return cPickle.loads(R_SERVER.get(key))\n\n else:\n print \"** Messages returned from MongoDB **\"\n messages = user_query(profile_user)\n data = []\n # print messages\n for row in messages:\n data.append({'user': row['username'], 'message': row['text'],\n 'pub_date': format_datetime(row['pub_date'])})\n # print data\n user_profile = jsonify(messages=data, Status_code=status.HTTP_200_OK)\n\n R_SERVER.set(key, cPickle.dumps(user_profile))\n R_SERVER.expire(key, TTL)\n return user_profile", "def session(self, sid):\n s = self.list\n if sid not in s:\n for k in s:\n if s[k]['uuid'] == sid:\n if s[k]['type'] == 'meterpreter':\n return MeterpreterSession(k, self.rpc, s)\n elif s[k]['type'] == 'shell':\n return ShellSession(k, self.rpc, s)\n raise KeyError('Session ID (%s) does not exist' % sid)\n if s[sid]['type'] == 'meterpreter':\n return MeterpreterSession(sid, self.rpc, s)\n elif s[sid]['type'] == 'shell':\n return ShellSession(sid, self.rpc, s)\n raise NotImplementedError('Could not determine session type: %s' % s[sid]['type'])", "def update_chats(self):\n res = requests.get(self.info_url)\n messages = res.json()['result']\n chats = set(m['message']['chat']['id'] for m in messages)\n for c in chats:\n if c not in self.chats:\n self.save_chat(c)\n return [c for c in chats]", "def add_user(self, user):\n\n try:\n logging.info(\"channel redis add_user\")\n \n data = user.to_json()\n key = \"%s:%s\" % (self.channel_id, user.username)\n \n logging.info(\"adding new user timestamp: %s\" % key)\n # add our username to a set orderes by timestamp to be able to quickly purge\n affected = self.redis_server.zadd(ENVIRONMENT['REDIS_PREFIX'] + \"users_timestamp\",key, user.timestamp)\n logging.info(\"added new user timestamp(%s): %s:%s\" % (affected, key, user.timestamp))\n except Exception, e:\n logging.info(\"ERROR adding user %s: %s\" % (user, e))", "def broadcast_to_session(self,session_id,header,msg, exclude = []):\n host = self.sessions[session_id][\"HOST\"][\"ID\"]\n self.send(header,host,msg)\n for key in self.sessions[session_id][\"USERS\"].keys():\n if key not in exclude:\n self.send(header,key,msg)", "def insert_item(self, token_object,\n new_session, session_time=timedelta(0)):\n if self.file_type == settings.APACHE_COMMON:\n url_obj = get_or_create(\n self.session, Uurl, url=token_object.resource_requested)\n elif self.file_type == settings.APACHE_COMBINED:\n url_obj = get_or_create(\n self.session, Uurl, url=token_object.resource_requested)\n elif self.file_type == settings.SQUID:\n url_obj = get_or_create(\n self.session, Uurl, url=token_object.url)\n\n # If this is a new session\n if new_session:\n # Create session object\n session_obj = Session(\n ip=token_object.ip_address, session_time=session_time)\n # Set start and end time\n session_obj.start_time = token_object.date_time\n session_obj.end_time = token_object.date_time\n # If new_session is False, new session may or may not be created\n # (depending upon the session_time)\n else:\n # Try to get session object\n session_obj = get_or_create(\n self.session, Session, ip=token_object.ip_address)\n # If the object is a new session\n if session_obj.session_time is timedelta(0):\n session_obj.start_time = token_object.date_time\n\n session_obj.session_time = session_time\n session_obj.end_time = token_object.date_time\n\n # Add url to session\n session_obj.session_urls.append(url_obj)\n self.session.add(session_obj)" ]
[ "0.6442786", "0.60261506", "0.5993311", "0.59842753", "0.59716135", "0.5773349", "0.5754616", "0.5753378", "0.5728176", "0.5712133", "0.5694755", "0.5632899", "0.56139004", "0.5612166", "0.5564319", "0.5548212", "0.55396765", "0.5534832", "0.5517975", "0.5509574", "0.548927", "0.5463966", "0.54474163", "0.54244804", "0.54227936", "0.5407589", "0.5389396", "0.5386568", "0.5384981", "0.53820574", "0.537453", "0.5372782", "0.5363202", "0.5353696", "0.5353696", "0.5353696", "0.5352338", "0.53462285", "0.53454953", "0.53249013", "0.5317697", "0.5317509", "0.5287201", "0.5268308", "0.5266003", "0.5264498", "0.5259895", "0.5259317", "0.5248616", "0.52479506", "0.52461725", "0.52349514", "0.5229127", "0.5217689", "0.5204892", "0.52043545", "0.51997817", "0.5198849", "0.5185952", "0.51747584", "0.51722336", "0.5170574", "0.5169622", "0.5157941", "0.51578385", "0.51577944", "0.5153592", "0.5140128", "0.513708", "0.51341623", "0.5132057", "0.51293135", "0.5115777", "0.51146", "0.51124203", "0.51106393", "0.51088697", "0.5107482", "0.51010317", "0.5100724", "0.5097862", "0.5075997", "0.506347", "0.5063008", "0.5055647", "0.5050946", "0.5044767", "0.50430936", "0.5037309", "0.5037166", "0.50282156", "0.5027729", "0.50245357", "0.5017284", "0.5016214", "0.50105864", "0.5009223", "0.50068223", "0.5006403", "0.5006289" ]
0.5635927
11
a user has been disconnected from the server. delete its sid
def user_left(cls, sid): id = redis.hget('sid-id', sid) redis.hdel('sid-id', sid) redis.hdel('id-sid', id) return id.decode("utf-8")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disconnect():\n\n\tglob.tokens.deleteToken(glob.tokens.getTokenFromUserID(999))", "def connection_lost(self, exc):\n if isinstance(self.current, Session):\n self.current.removeUser(self)\n elif self.current == self:\n del super.clients[self]\n else:\n anon.remove(self)", "def connection_closed(self):\n if(SOCKET_TO_USERID.has_key(self.source)):\n if( VALIDATED_USERS.is_validated(SOCKET_TO_USERID[self.source])):\n VALIDATED_USERS.remove_user(self.source)\n tmp = incoming.disconnect_user(self.source)\n if(tmp):\n server.disconnect_from_server(tmp)\n elif(server.CONNECTIONS.has_key(self.source)):\n tmp = server.disconnect_from_server(self.source)\n incoming.disconnect_user(tmp)", "def disconnect_handler():\n print(\"---------------- DISCONNECTED ----------------\")\n\n phone_num = redis.get(request.sid)\n redis.delete(phone_num)\n redis.delete(request.sid)", "def session_end(self, user):\n self._transport.delete(\"/service/v3/sessions\", self._subject, username=user)", "async def logout(self):\n try:\n user = self.request.session.get(\"user\")\n chat = self.request.session.get(\"chat\")\n active_sockets = self.request.app.active_sockets\n active_sockets.get_chat(chat).del_user(user)\n\n self.request.session.pop(\"user\")\n self.request.user = None\n self.request.chat = None\n\n return {\n \"Type\": \"account\",\n \"Command\": \"logout\",\n \"Status\": \"success\"\n }\n except KeyError:\n return {\"Type\": \"account\", \"Command\": \"logout\", \"Status\": \"error\"}", "def handle_disconnect(self,message,conn):\n if self.connections[message[\"ID\"]][\"host\"] == True: #if user is host, delete all connections\n print(\"HOST DISCONNECTING\")\n host = message[\"ID\"]\n session_location = self.connections[message[\"ID\"]][\"session_id\"]\n for key in self.sessions[session_location][\"USERS\"].keys():\n self.disconnect(self.connections[key][\"CONN\"],message[\"ID\"],f\"[HOST <{host}>:<{self.connections[host]['display_name']}>] Unexpectedly Disconnected\") #close each connection\n self.delete_connection_entry(key) #delete the connection from the connections dictionary\n self.delete_session(session_location) # delete the session\n self.delete_connection_entry(message[\"ID\"])\n self.disconnect(conn,message[\"ID\"], \"You Disconnected\")\n else:\n session_location = self.connections[message[\"ID\"]][\"session_id\"]\n self.delete_session_entry(session_location,message[\"ID\"])\n self.broadcast_to_session(session_location, \"USER_DISCONNECT\",f\"[USER <{message['ID']}>:<{self.connections[message['ID']]['display_name']}>] Disconnected\", exclude=[message[\"ID\"]])\n self.delete_connection_entry(message[\"ID\"])\n self.broadcast_to_session(session_location, \"USERS\", json.dumps(self.sessions[session_location][\"USERS\"]))\n self.disconnect(conn,message[\"ID\"],\"You Disconnected\")", "def unregister(self, user_id: int, client_name: str) -> None:\n with self.app.app_context():\n user: User = User.query.get(user_id)\n\n if not self.contains(user_id, client_name):\n raise ValueError(f'User {user.username!r} has no associated client '\n f'named {client_name!r}')\n\n remote_addr = self.clients[user_id][client_name].protocol.remote_address\n self.clients[user_id][client_name].protocol.close()\n del self.clients[user_id][client_name]\n logger.info(f'Unregistered client {client_name!r} of user '\n f'{user.username!r} ({util.format_addr(remote_addr)})')", "def userQuit(self, user, quitmessage):\n sessions = self.findSessions(user)\n for ss in sessions:\n user = user.decode(ss.encoding)\n self.sendResponse(ss.removeNick(user))", "def del_user(self, username):\n pass", "def connectionLost(self,\n reason=twisted_error\n ):\n\n if self.connected_users and self.user_key in self.connected_users:\n self.log_to_debug(\n line=f\"DELETE CONNECTION WITH < {self.addr.host}:{self.addr.port} >\"\n f\" -> Reason: {reason.getErrorMessage()}\"\n )\n del self.connected_users[self.user_key]\n self.log_file.log_all(\n priority=3,\n string=f\"Connection lost with {self.addr.host}:{self.addr.port} \"\n f\"-> Reason: {reason.getErrorMessage()}\"\n )", "def on_disconnect(self, client, userdata, rc):\n\t\tprint (\"[{}] Client disconnected\".format(\n\t\t\tint(time.time())\n\t\t))", "def forget(self, uid):", "def delete_user():", "def on_disconnect():\n print(\"User disconnected!\")", "def disconnect_user(self, user):\n\t\tis_user_removed = False\n\t\tif user in self.users.all():\n\t\t\tself.users.remove(user)\n\t\t\tself.save()\n\t\t\tis_user_removed = True\n\t\treturn is_user_removed", "def on_disconnect(self, client, userdata, msg):\n self.log.warning(\"Disconnected: \" + str(msg))\n self.connected = False\n self.dconn += 1", "def delete_sql_login(user, server, userdata):\n global servers_to_remove\n betterprint(\"Removing LOGIN {} from server {}\".format(user, server))\n sql = \"DROP LOGIN [{}]\".format(user)\n try:\n betterprint(\"SQL: \" + sql)\n rows, userdata = execute_sql(sql, server, None, False, userdata)\n betterprint(\"LOGIN removal successful.\")\n\n if rows:\n servers_to_remove.append(server)\n return True, userdata\n except Exception as e:\n print (e)\n return False, userdata", "def sipserver_user_remove(self, user: str) -> None:\n self.remove_endpoint_from_sipserver(endpoint=user)", "def remove(self, session: \"pwncat.manager.Session\"):", "def unregister_user(self, userID: str):\n requests.post('https://' + self.serverIp + '/unregister/' + userID, verify=False)", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def on_disconnect():\n logger.info(f\"{request.sid} Disconnected\")", "def remove(self, user_id):\n pass", "def logout_user(session):\n del session['user']", "def view_removeConnection(self, user, tagA, tagB):\r\n key = int(md5(tagA).hexdigest(), 16) ^ int(md5(tagB).hexdigest(), 16)\r\n\r\n try:\r\n connection = user.connections.pop(key)\r\n except KeyError:\r\n raise InvalidRequest('Can not disconnect two unconnected '\r\n 'interfaces.')\r\n\r\n connection.dontNotifyOnDeath(user.connectionDied)\r\n connection.destroy()\r\n\r\n # TODO: Return some info about success/failure of request\r", "def do_logout():\n del session[CURRENT_USER_KEY]", "def destroy(self):\r\n for user in self._users.copy():\r\n user.destroy()\r\n\r\n assert len(self._users) == 0\r\n\r\n self._interface.unregisterConnection(self)\r\n self._interface = None\r\n\r\n self._protocol.unregisterConnection(self)\r\n self._protocol = None", "def handle_unexpected_disconnect(self,client_id, conn):\n try:\n if self.connections[client_id][\"host\"] == True: #if user is host, delete all connections\n session_location = self.connections[client_id][\"session_id\"]\n for key in self.sessions[session_location][\"USERS\"].keys():\n self.disconnect(self.connections[key][\"CONN\"],client_id,f\"[HOST <{client_id}>:<{self.connections[client_id]['display_name']}>] Unexpectedly Disconnected\") #close each connection\n self.delete_connection_entry(key) #delete the connection from the connections dictionary\n self.delete_session(session_location) # delete the session\n self.delete_connection_entry(client_id) # delete the original client entry from connections\n self.disconnect(conn,client_id,\"You Disconnected\")\n else:\n session_location = self.connections[client_id][\"session_id\"]\n self.delete_session_entry(session_location,client_id)\n self.broadcast_to_session(session_location, \"USER_DISCONNECT_UNEXPECTED\",f\"[USER <{client_id}>:<{self.connections[client_id]['display_name']}>] Unexpectedly Disconnected\", exclude=[client_id])\n self.delete_connection_entry(client_id)\n self.disconnect(conn,client_id,\"You Disconnected\")\n except:\n print(\"Something went wrong\")", "def __removeClient(self):\n client = self.sender()\n if (client in self.__clients):\n self.__clients.remove(client)\n \n print \"disconnect from\", self.__clientName(client)", "def disconnect():\n logging.info('Client disconnected')", "def on_disconnect( client, userdata, rc ):\n logging.info( \"Disconnected from Broker. Returned code: %s\\n\" %rc )\n client.connected_flag = False\n client.disconnect_flag = True", "def clean_up_old_session(user_name=None):\n cherrypy.log.error(\"cleaning all sessions for %s\" % user_name)\n if \"slycatauth\" in cherrypy.request.cookie:\n try:\n # cherrypy.log.error(\"found old session trying to delete it \")\n sid = cherrypy.request.cookie[\"slycatauth\"].value\n couchdb = slycat.web.server.database.couchdb.connect()\n session = couchdb.get(\"session\", sid)\n if session is not None:\n couchdb.delete(session)\n except:\n # if an exception was throw there is nothing to be done\n pass\n if user_name is not None:\n try:\n couchdb = slycat.web.server.database.couchdb.connect()\n sessions = [session for session in couchdb.scan(\"slycat/sessions\") if\n session[\"creator\"] == user_name]\n if sessions:\n #cherrypy.log.error(\"sessions found %s\" % user_name)\n for session in sessions:\n couchdb.delete(session)\n #cherrypy.log.error(\"sessions deleted %s\" % user_name)\n except:\n # if an exception was throw there is nothing to be done\n pass", "def player_disconnect(event_var):\r\n debug.write(\"[SourceRPG] Handling player_disconnect\", 1)\r\n userid = event_var['userid']\r\n gamethread.cancelDelayed('sourcerpg_reset_%s' % userid)\r\n if userid in players:\r\n debug.write(\"Remove player instance from players manager\", 1)\r\n players[userid]['lastconnected'] = int(time.time())\r\n debug.write(\"Calling object destructor...\", 1)\r\n del players[userid] # call's the destructor\r\n debug.write(\"[SourceRPG] player_disconnect handled\", 1)", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def userLeft(self, user, channel):\n ss = self.findSessions(channel)[0]\n user = user.decode(ss.encoding)\n self.sendResponse(ss.removeNick(user))", "def remove_session(self) -> None:\n pass", "def delete_user():\n #TODO user delete\n pass", "def delete(bot, update):\n chatID = update.message.chat_id\n username = get_user_info(chatID)['PID']\n logger.info(\"Deleting user credentials for {}!\".format(username))\n Chat.query.filter(Chat.chatID == chatID).delete() # Delete the user's record referenced by their ChatID\n Misc.query.filter(Misc.chatID == chatID).delete()\n db_session.commit()\n messageContent = \"Your credentials have been deleted, {}\\nHope to see you back soon!\".format(username[3:-4].title())\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent)\n \n mp.track(username, 'User Left')\n mp.people_set(username, {'active': False })", "def connectionDied(self, connection):\r\n if self.connections:\r\n for uid, candidate in self.connections.iteritems():\r\n if candidate == connection:\r\n del self.connections[uid]\r\n break\r\n else:\r\n print('Received notification for dead Connection, '\r\n 'but User is already destroyed.')", "def disconnect_user(room: PublicChatRoom, user) -> bool:\n return room.disconnect_user(user)", "def del_user(self, name):\n del self.users[irc.strings.IRCFoldedCase(modules.trim_nick(name))]", "def cleanup(self,context,result):\n if self.do_cleanup:\n try:\n return_code, stdout, stderr= runProgram([context.gsec_path,\n \"-user\", context.user_name,\n \"-password\", context.user_password,\n \"-delete\", self.user_name],[])\n except:\n result.note_exception(cause=\"Resource cleanup: Can't remove user.\")\n result[\"user_name\"] = self.user_name\n return\n else:\n if return_code != 0:\n self.fail_and_annotate_streams(result, Result.ERROR,'GSEC','Delete user',\n stdout,stderr)", "def on_disconnect(self):\n print('Client disconnected!')", "def on_disconnect(unused_client, unused_userdata, rc):\n print(f\"on_disconnect: {error_str(rc)}\")\n print()\n\n global connected\n connected = False", "def kill_session(user):\n\n # Destroy cookie\n user.cookie = None\n user.cookie_expiration = datetime.now()\n\n # Commit\n db.session.add(user)\n db.session.commit()", "def ws_disconnect(message):\n language = message.channel_session['knocker']\n grLangUser = Group('knocker-{0}-{1}'.format(language, \n message.user.id))\n grLangUser.discard(message.reply_channel)", "async def disconnect(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if not player.is_connected:\n return await ctx.send(\"I'm not currently connected to a voice channel :no_entry:\")\n if not ctx.author.voice or (player.is_connected and player.connected_channel.id != ctx.author.voice.channel.id):\n return await ctx.send(\"You have to be in my voice channel to disconnect :no_entry:\")\n if player.fetch(\"sessionowner\") == ctx.author.id:\n player.queue.clear()\n await player.disconnect()\n player.delete(\"votes\")\n await ctx.send(\"Disconnected <:done:403285928233402378>\")\n else:\n await ctx.send(\"Only the session owner can disconnect the bot :no_entry:\")", "def onUserDeletion(event):\n client = getUtility(IAdminClient)\n xmpp_users = getUtility(IXMPPUsers)\n storage = getUtility(IPubSubStorage)\n\n principal_id = event.principal\n principal_jid = xmpp_users.getUserJID(principal_id)\n\n if principal_id in storage.leaf_nodes:\n storage.leaf_nodes.remove(principal_id)\n if principal_id in storage.publishers:\n del storage.publishers[principal_id]\n if principal_id in storage.node_items:\n del storage.node_items[principal_id]\n if principal_id in storage.collections['people']:\n storage.collections['people'].remove(principal_id)\n\n pass_storage = getUtility(IXMPPPasswordStorage)\n pass_storage.remove(principal_id)\n\n d = deletePrincipal(client, principal_jid)\n return d", "def on_disconnect(unused_client, unused_userdata, rc):\n print('on_disconnect', error_str(rc))\n status_light.off()", "def delete_session_entry(self,session_id,client_id):\n del self.sessions[session_id][\"USERS\"][client_id]", "def guiding_disconnect():\r\n try:\r\n app.guider.disconnect()\r\n return jsonify({\"status\": True})\r\n except Exception as e:\r\n return jsonify(\r\n {\"status\": False, \"error\": \"Failed disconnecting from guider: %s\" % e}\r\n )", "def disconnect(self, code):\n try:\n if not self.scope['user'].is_authenticated:\n logger.error('User in not authenticated')\n self.close()\n\n user = Profile.objects.get(user=self.scope['user'])\n group_name = user.group_name\n\n self.channel_layer.group_discard(group_name, self.channel_name)\n except Exception as e:\n logger.error(e)", "def connection_lost(self, exc):\n super().connection_lost(exc)\n\n if self.session is not None:\n # Free up the allocated ID.\n self.server.session_id_allocator.free(self.session.id)\n\n # Kill the session.\n self.session.close(SessionCloseErrorCode.SESSION_DIED)\n self.session = None\n\n self.server = None", "def disconnect(self):\n self.rpc.call(MsfRpcMethod.DbDisconnect)", "def leave(self):\n self.remove(\n self.subreddit._reddit.config.username or self.subreddit._reddit.user.me()\n )", "def fbdisconnect():\n\n facebook_id = login_session['facebook_id']\n url = 'https://graph.facebook.com/%s/permissions' % facebook_id\n h = httplib2.Http()\n result = h.request(url, 'DELETE')[1]\n del login_session['facebook_id']\n return \"you have been logged out\"", "def delete_session(db, useremail):\n db.cursor().execute('DELETE FROM sessions WHERE useremail IS ?', [useremail])\n db.commit()", "def user_logged_out(connection,user):\r\n with connection:\r\n return connection.execute(UPDATE_USER_LOGIN_STATUS_TO_FALSE,(user,))", "def logout_user():\n pass", "def logout(self):\n self.session.disconnect()", "def delete_sql_user(user, server, database):\n betterprint(\"Deleting {} from server {} and db {}\".format(user, server, database))\n\n sql = \"DROP USER [{}]\".format(user)\n\n try:\n betterprint(\"SQL: \" + sql)\n rows, userdata = execute_sql(sql, server, database)\n betterprint(\"USER removal successful.\")\n return True\n except Exception as e:\n print (e)\n return False", "def disconnect(self):\n\n self.connection.logout()", "def del_user(self, server, username, quiet=False):\n self._op_user(\"del\", server, {\"username\": username}, quiet)", "async def unregister(websocket):\n USERS.remove(websocket)\n GAME.new_game()\n await update_players()", "def delete_user(id):\n pass", "def user_disappears(self, user):\n pass", "def on_disconnect():\n print('User disconnected!')\n return 'disconnected'", "def delUser(self, id):\n del self.users[id]\n if id in self._nameCache:\n del self._nameCache[self._nameCache[id]]\n del self._nameCache[id]\n if id in self._hostmaskCache:\n for hostmask in self._hostmaskCache[id]:\n del self._hostmaskCache[hostmask]\n del self._hostmaskCache[id]\n self.flush()", "def remove_user(self, username):\n del self.user_table[username]", "def disconnect_client(self, session):\n cls, pending, connected = self._proxies[session.app_name]\n try:\n connected.remove(session)\n except ValueError:\n pass\n logger.info('Session closed %s %s' %(session.app_name, session.id))\n session.close()\n self.connections_changed(session.app_name)", "def logoff_session(site_id):\n log = current_app.log\n db = request.db\n Cred = db.tables.Cred\n user_id = SiteService.get_current_uid()\n cred = Cred.query.filter_by(cred_owner=user_id,\n site_id=site_id).first()\n if cred:\n with managed_session(request,\n message=\"Database error while deleting creds\",\n http_error_code=500) as session:\n session.delete(cred)\n log.info(\"Deleted session for user %u at site %u.\", user_id, site_id)\n return \"\"", "def sign_out(self) -> None:\n self.is_signed_in = False\n self.db.close()\n log(f\"User:{self.id} has signed out.\")", "def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))", "def on_removeuser(self, username):\n self.users.remove(username)\n print ('%s left the room.' % username)", "def handle_exit_room_session(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n for room in self.rooms:\n if user in room.room_attrbts['active']:\n room.room_attrbts['active'].remove(user)\n msg = f'User {user} is no longer active in room {room.name}.'\n print(msg)\n return\n msg = f'Room {room.name} not found or user {user} is not yet a member. NONACTIVE'\n self.log_and_send(client_socket, msg)\n return", "async def delete_player_status(user_id):\n await ex.conn.execute(\"DELETE FROM blackjack.currentstatus WHERE userid = $1\", user_id)", "async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")", "def remove_client(self, username):\n try:\n self.clients.pop(username)\n except KeyError:\n print 'Client not in server.clients{}'", "def deauth(nick):\n global auth_list\n if nick in auth_list:\n a = auth_list.index(nick)\n del(auth_list[a])", "def on_client_disconnect(self, client):\r\n\t\tself.pre_client_disconnect.send(sender=client)\r\n\t\tself.connection_logger.info('Received client disconnection from %s:%u' % (client.address, client.port))\r\n\t\t# Iterate over anyone who had connected but did not authenticate\r\n\t\tif (client in self.pending_connection_list):\r\n\t\t\tself.pending_connection_list.remove(client)\r\n\t\t# Otherwise run over the list of people who had authenticated\r\n\t\telif (client in self.established_connection_list):\r\n\t\t\tplayer = self.world.find_player(id=client.id)\r\n\t\t\troom = self.world.find_room(id=player.location_id)\r\n\t\t\troom.broadcast(self.game_client_disconnect % player.display_name, player)\r\n\r\n\t\t\tself.established_connection_list.remove(client)", "async def user_logout_process(self, ctx: commands.Context):\n await ctx.cfg_member.szuruname.set(None)\n await ctx.cfg_member.szurutoken.set(None)\n await ctx.send(\n f\"{ctx.author.mention}: you have been logged out, I no longer have access to your account.\",\n reference=ctx.message,\n )", "def device_disconnect(self):\n pass", "def deleteUserById(SID, userId):\n return call(\"deleteUserById\", SID, userId)", "def disconnect(self, sid: Optional[str] = None) -> bool:\n\n # Fetch the client id\n sid = sid or self.get_sid()\n\n # If the client isn't connected, return False\n # to express an error.\n if not self.is_connected(sid):\n return False\n\n # Remove the client id from the list of connected clients.\n self.clients = [c for c in self.clients if c != sid]\n\n # Return True to express the succesfull disconnection.\n return True", "def disconnect():\n\n # Only disconnect a connected user.\n credentials = session.get('credentials')\n if credentials is None:\n response = make_response(json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Execute HTTP GET request to revoke current token.\n access_token = credentials.access_token\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n # Reset the user's session.\n del session['credentials']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n # For whatever reason, the given token was invalid.\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response", "def disconnect(self) -> None:\n IxLoadUtils.deleteSession(self.connection, self.session_url)", "def onUserDeletion(event):\n request = getRequest()\n if not IProductLayer.providedBy(request):\n return\n\n client = getUtility(IAdminClient)\n xmpp_users = getUtility(IXMPPUsers)\n\n principal_id = event.principal\n principal_jid = xmpp_users.getUserJID(principal_id)\n\n pass_storage = getUtility(IXMPPPasswordStorage)\n pass_storage.remove(principal_id)\n\n d = users.deletePrincipal(client, principal_jid)\n return d", "def gdisconnect():\n # Verify that the nonce received is valid.\n if request.args.get('state') != login_session['state']:\n response = make_response(\n json.dumps({'error': 'Invalid state parameter'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n # Only disconnect a connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(\n json.dumps({'error': 'Current user not connected.'}), 404\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Execute HTTP GET request to revoke current token.\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n # Reset the user's session\n del login_session['provider']\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n # Our response will include a new nonce.\n state = get_new_state()\n login_session['state'] = state\n response = make_response(\n json.dumps({'success': 'User disconnected', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n elif result['status'] == '400':\n del login_session['provider']\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n # Our response will include a new nonce.\n state = get_new_state()\n login_session['state'] = state\n response = make_response(\n json.dumps({'success': 'User was already disconnected', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(\n json.dumps(\"Error: \"+result['status']), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response", "def remove_user(self, user):\n\n data = user.to_json()\n key = \"%s:%s\" % (self.channel_id, user.username)\n\n logging.info(data)\n # remove our users timestamp\n affected = self.redis_server.zrem(ENVIRONMENT['REDIS_PREFIX'] + 'users_timestamp',key)\n logging.info(\"removed user timestamp(%d): %s\" % (affected, key))", "def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = None\n self._private_key = None\n else:\n raise SAMPClientError(\n \"Unable to unregister from the SAMP Hub. Hub proxy not connected.\"\n )", "def logout():\n body = request.json\n user_id = body.get('user_id')\n user = User.get(User.id == user_id).username\n clear_token(user)\n return HTTPResponse(status=200, body={\"message\":\"Log out succesful.\"})", "def on_disconnect(self, connection, event):\r\n\r\n print('[{}] Disconnected from {}' .format(event.type.upper(), event.source))\r\n print(\"{}\".format(event.arguments))\r\n\r\n res = self.cursor.execute(\"\"\"SELECT * FROM `IRC_servers` WHERE `Registred_users_userID` = %s AND `serverID` = %s;\"\"\", (self.userID, connection.serverID))\r\n if res != 0:\r\n result = self.cursor.fetchall()\r\n print(result)\r\n serverID_res = int(result[0][0])\r\n serverName_res = str(result[0][6])\r\n\r\n res = self.cursor.execute(\"\"\"UPDATE `IRC_servers` SET `isConnected` = %s WHERE `serverID` = %s;\"\"\", (0, serverID_res))\r\n\r\n print(\"RES: \",res)\r\n print(\"serverID = {}\".format(serverID_res))\r\n res = self.cursor.execute(\"\"\"INSERT INTO `IRC_other_messages` (IRC_servers_serverID,\r\n fromHostmask,\r\n messageBody,\r\n commandType,\r\n timeReceived)\r\n values (%s, %s, %s, %s, %s)\"\"\", (serverID_res, event.source,\r\n \"Disconnected from {0}...\".format(serverName_res), \"CLOUDCHAT_INFO\", datetime.datetime.utcnow()))\r\n self.db.commit()", "def del_user(id):\n user = User.query.get(id)\n\n db.session.delete(user)\n db.session.commit()\n\n return userSchema.jsonify(user)", "def deletesession():\n if middleware.linuxServerSessionId != None:\n middleware.ixn.linuxServerStopOperations(middleware.linuxServerSessionId)\n middleware.ixn.linuxServerDeleteSession(middleware.linuxServerSessionId)\n middleware.linuxServerSessionId = None\n middleware.sessionId = None\n else:\n print('\\nThere is currently no opened Linux sessions\\n')", "def disconnect():\n\n if 'provider' in login_session:\n if login_session['provider'] == 'google':\n gdisconnect()\n del login_session['gplus_id']\n if login_session['provider'] == 'facebook':\n fbdisconnect()\n del login_session['facebook_id']\n del login_session['access_token']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n del login_session['provider']\n flash(\"You have successfully been logged out.\")\n return redirect(url_for('showCategories'))\n else:\n flash(\"You were not logged in\")\n return redirect(url_for('showCategories'))", "def deauthenticate(self, server, channel, nick, params):\n user = nick.split('!')[0]\n if not user in self.users:\n return '%s: You are not registered yet.' % user\n if not self.users[user]['authenticated_at']:\n return '%s: You are not authenticated.' % user\n info('%s was de-authenticated manually.' % user, plugin='authdefault')\n self.users[user]['authenticated_at'] = None\n return '%s: You are no longer authenticated.' % user", "def gdisconnect():\r\n # only disconnect a connected user\r\n credentials = login_session.get('credentials')\r\n if credentials is None:\r\n response = make_response(json.dumps(\r\n 'Current user not connected.'), 401)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n # Execute HTTP GET request to revoke current token\r\n access_token = credentials.access_token\r\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\r\n h = httplib2.Http()\r\n result = h.request(url, 'GET')[0]\r\n if result['status'] == '200':\r\n del login_session['access_token']\r\n del login_session['gplus_id']\r\n del login_session['username']\r\n del login_session['email']\r\n del login_session['picture']\r\n response = make_response(json.dumps('Successfully disconnected.'), 200)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n else:\r\n response = make_response(json.dumps(\r\n 'Failed to revoke token for given user.', 400))\r\n response.headers['Content-Type'] = 'application/json'\r\n return response", "def remove_user_from_db(choice):\n client_detail_list = sqlite3.connect('../db/client_list.db')\n client_db = client_detail_list.cursor()\n client_db.execute(\"DELETE FROM clients WHERE nickname=?\", (choice,))\n client_detail_list.commit()\n client_detail_list.close()", "def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass" ]
[ "0.72285557", "0.7040492", "0.68665254", "0.6458032", "0.64499587", "0.64076656", "0.6401039", "0.6399896", "0.6398915", "0.6398157", "0.6392838", "0.63727134", "0.6348618", "0.63431454", "0.6333374", "0.6315425", "0.62992054", "0.62812126", "0.6270461", "0.6263564", "0.6252692", "0.6247039", "0.62354255", "0.6231013", "0.62305564", "0.62199825", "0.6214799", "0.6175542", "0.61413115", "0.61396605", "0.61163616", "0.61150575", "0.6098318", "0.60894966", "0.60873497", "0.6053151", "0.60480785", "0.6046588", "0.603918", "0.6029977", "0.602709", "0.60268104", "0.6024911", "0.6019253", "0.6016993", "0.60167", "0.6013805", "0.6012914", "0.5987518", "0.5978445", "0.59731877", "0.5973039", "0.597118", "0.59670484", "0.5953703", "0.59456086", "0.59402", "0.5936945", "0.5934328", "0.5931518", "0.5916759", "0.59146965", "0.59096944", "0.59087574", "0.5906782", "0.5906579", "0.5906347", "0.5901952", "0.5893929", "0.58937746", "0.5886489", "0.5885784", "0.58814585", "0.5868287", "0.58677316", "0.5867316", "0.58641106", "0.5863526", "0.5859873", "0.58592594", "0.5854953", "0.5854173", "0.5851764", "0.5832125", "0.5830879", "0.582787", "0.58277005", "0.5824821", "0.58077586", "0.5805274", "0.5797353", "0.5792437", "0.5788813", "0.578833", "0.5785759", "0.5784581", "0.5780411", "0.57782626", "0.5776689", "0.5773622" ]
0.6578083
3
search for a user's socket id
def get_user_sid(cls, user_id): sid = redis.hget('id-sid', user_id) if not sid: return None return sid.decode("utf-8")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_socket(self, user):\n for client in self.clients:\n if user == client.get_name():\n return client.get_socket()", "def lookup_friend(self,username):\n if self.isBlank(username) or self.isValidLen(username):\n return False\n safe_input = (username,)\n try:\n vals = self.cur.execute(\"SELECT Client_IP, Client_Port FROM Users WHERE Username=?\" ,safe_input).fetchone()\n if vals:\n return vals[0],str(vals[1])\n else:\n return False\n except LookupError as e:\n return False", "def lookup_socket(self, address): # TODO: optimize me\n\n net_tuple = self.read_nodestate(0)\n for item in net_tuple:\n discovered_address = item[1]\n if address == discovered_address:\n return item[0]", "def fetch_current_user_id(s):", "def lookup_address(self, in_sock): # TODO: optimize me\n\n net_tuple = self.read_nodestate(0)\n for item in net_tuple:\n discovered_socket = item[0]\n if in_sock == discovered_socket:\n return item[1]", "def getUserId(self, s):\n if ircutils.isUserHostmask(s):\n try:\n return self._hostmaskCache[s]\n except KeyError:\n ids = {}\n for (id, user) in self.users.iteritems():\n x = user.checkHostmask(s)\n if x:\n ids[id] = x\n if len(ids) == 1:\n id = ids.keys()[0]\n self._hostmaskCache[s] = id\n try:\n self._hostmaskCache[id].add(s)\n except KeyError:\n self._hostmaskCache[id] = set([s])\n return id\n elif len(ids) == 0:\n raise KeyError, s\n else:\n log.error('Multiple matches found in user database. '\n 'Removing the offending hostmasks.')\n for (id, hostmask) in ids.iteritems():\n log.error('Removing %q from user %s.', hostmask, id)\n self.users[id].removeHostmask(hostmask)\n raise DuplicateHostmask, 'Ids %r matched.' % ids\n else: # Not a hostmask, must be a name.\n s = s.lower()\n try:\n return self._nameCache[s]\n except KeyError:\n for (id, user) in self.users.items():\n if s == user.name.lower():\n self._nameCache[s] = id\n self._nameCache[id] = s\n return id\n else:\n raise KeyError, s", "def handle_whoami(self, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n print(f'User {user} queried their identity')\n msg = f'You are currently user {user}'\n self.log_and_send(client_socket, msg)", "def get_user_id(self):\r\n message = self.q(css='BODY').text[0].strip()\r\n match = re.search(r' user_id ([^$]+)$', message)\r\n return match.groups()[0] if match else None", "def findUserNum(self, kvl):\n \n for k, v in kvl.items():\n if k == \"YourUserNum\":\n cid = int(v[0])\n return cid\n return None", "def find_channel(channels, user):\n for x in channels:\n if 'is_member' in channels[x]:\n continue\n if channels[x][\"user\"] == user:\n return channels[x][\"id\"]\n return \"\"", "def findUser(username):\n connector = appEngine.connect()\n userId = connector.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", username).fetchone()\n #selectInput = select([user]).where(user.column.userName == username)\n #db.execute(selectInput)\n return userId", "def get_connected_user():\n usernames = clients.keys()\n data = json.dumps(usernames)\n emit('on_client_list_received', data)", "def get_remote_addr(self):\n connection = self._open_db()\n cursor = connection.cursor()\n cursor.execute('SELECT remote_addr FROM sessions WHERE id = ?;', \\\n (self.sid,))\n remote_addr = cursor.fetchone()\n cursor.close()\n connection.close()\n return remote_addr[0]", "def find_port(addr, user):\n home = pwd.getpwuid(os.getuid()).pw_dir\n for name in os.listdir('%s/.ssh/' % home):\n if name.startswith('unixpipe_%s@%s_' % (user, addr,)):\n return int(name.split('_')[2])", "def user_id(self, user):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/team_news.phtml', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/standings.phtml', headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n for i in soup.find('table', cellpadding=2).find_all('tr'):\r\n try:\r\n if user == i.find_all('td')[2].text.encode('utf8'):\r\n return i.find('a')['href'].split('pid=')[1]\r\n except:\r\n continue\r\n return None", "def find_channel(channels, user):\n for x in channels:\n if 'is_member' in channels[x]:\n continue\n if \"user\" in channels[x] and channels[x][\"user\"] == user:\n return channels[x][\"id\"]\n\n return \"\"", "def find_conn(self, v):\n pass", "def get_user(self, user_id):\n\n i = self.gdb.nodes.indexes.get('users')\n if str(user_id).isalnum(): # numerical ID\n results = i.get('user_id', user_id) # always iterable\n else:\n results = i.get('screen_name', user_id) # always iterable\n\n if len(results) == 1:\n log.info('Found existing users, ID %s' % user_id)\n return results[0]\n else:\n log.info('No user in graph with ID %s' % user_id)\n return None", "def find(cls, host, user):\n cls.__check_parameters(host=host, user=user)\n if not hasattr(Connection, \"__pool__\"):\n return None\n cid = cls.generate_id(host, user)\n return Connection.__pool__.get(cid) # by default None is returned", "def search_user_by_id(self,id, cursor):\n sql = \"SELECT * FROM users WHERE userid = %s\"\n cursor.execute(sql, (id,))\n return cursor", "def _get_unknown_userid(self):\n cursor = self.conn.cursor()\n unknown_user_str = dbtypes.User.null\n cursor.execute(\"select id from users where uniqueid='%s'\" % unknown_user_str)\n return cursor.fetchone()[0]", "def _get_user_id(self, name):\n try:\n apiResponse = twitchAPI.twitchAPIGet(\"/users\", {\"login\": name}) #Try to get user id from API\n userID = apiResponse[\"data\"][0][\"id\"]\n except (KeyError, APIUnavailable):\n userID = input(\"Please enter the user id of the user: \")\n except IndexError: #If Twitch API does not return user id\n print(\"That user does not exist on Twitch.\")\n userID = False\n return(userID)", "def friend(tcp, udp, userId, data):\n\n # from server get address of potential friend\n tcp.sendMessage('SEARCH ' + data[0])\n address = tcp.receiveMessage().split()[-2:]\n address = (address[0], int(address[1]))\n\n # send friend request\n if address:\n udp.sendto('FRIEND ' + userId, address)\n print 'Sent friend request to ' + data[0]\n else: print 'Could not send friend request to ' + data[0]", "def get_user_id(username):\n # rv = query_db('select user_id from user where username = ?',\n # [username], one=True)\n # return rv[0] if rv else None\n # db = get_db()\n # print \"LOOKHERE UID\"\n rv = mongo.db.users.find_one({'username': username}, {'_id': []})\n print rv['_id']\n return rv['_id'] if rv else None", "def find_by_user(self, user):\n\t\tfor i in self.interrupts.keys():\n\t\t\tif self.interrupts[i].has_key(\"users\") and \\\n\t\t\t user in self.interrupts[i][\"users\"]:\n\t\t\t\treturn i\n\t\treturn None", "def find_by_port(self, port):\n for client in self.clients.values():\n if client.port == port:\n return client", "def get_IP(lst, usr):\r\n for element in lst:\r\n if element[0] == usr:\r\n return element[1]", "def __getUserIdByDCCConnection(self, c):\n try:\n UserId = self.__IpToUser[self.getIpStringByDCCConnection(c)]['userid']\n if UserId > 0:\n return UserId\n else:\n return NOT_AUTHED\n except KeyError:\n return NOT_AUTHED", "def find_subscriber(self, search):\n try :\n key_column = search[0] # get key column\n value = search[1] # get value\n customer_id = self.client.service.findSubscriber(self.username,self.password,\n key_column,value)\n except Exception, e :\n # TODO : user logger instead\n customer_id = 0\n\n return customer_id", "def get_nwsli(rpuid):\n rpuid = int(rpuid)\n for sid in NT.sts:\n if NT.sts[sid]['remote_id'] == rpuid:\n return sid\n return None", "def get_user(id):\n pass", "def get_client_by_socket(self, socket):\n candidate_connection_objects = [connection for connection in self if connection.socket() is socket]\n assert len(candidate_connection_objects) != 0, \"?? socket %s not found in list of client objects\" % socket\n assert len(\n candidate_connection_objects) == 1, \"?? socket %s appears in list of client objects multiple times\" % socket\n return candidate_connection_objects[0]", "def get_usr (conn, id):\n\n res = []\n\n try:\n csr = conn.cursor()\n\n cmd = \"SELECT * FROM {tbl} WHERE {col1} = {val1};\".\\\n format(tbl = _tbl_users,\n col1 = _tbl_users_col1, val1 = id)\n print(cmd)\n\n csr.execute(cmd)\n\n for row in csr:\n res.append(row)\n\n csr.close()\n\n except Exception as ex:\n print(\"Error - get_usr: {0}\".format(ex))\n rc_err = ex.args[0]\n return rc_err\n\n return rc_ok, res", "async def get_user_byid(request):\n user_id = request.match_info[\"user_id\"]\n try:\n user_id = int(user_id)\n except (ValueError, TypeError):\n return web.Response(text=\"Incorrect value for user_id\", status=400)\n\n currentuser = (\n request.cirrina.db_session.query(User)\n .filter(User.username == request.cirrina.web_session[\"username\"])\n .first()\n )\n\n if user_id == -1 or not currentuser.is_admin:\n user = currentuser\n else:\n user = request.cirrina.db_session.query(User).filter_by(id=user_id).first()\n\n if not user:\n return web.Response(status=404, text=\"User not found\")\n\n data = {\"username\": user.username, \"user_id\": user.id, \"is_admin\": user.is_admin}\n return web.json_response(data)", "def _LookupPeer(self, peer_id):\n key = self._GetServerKey(peer_id)\n values, placemark = self._dht.Get(key)\n if not values:\n raise NessieError('No peers returned for user id %r.' % peer_id)\n # NOTE(damonkohler): Need to accomodate for the possibility of multipe\n # values.\n value = self._Decrypt(values[0])\n host, port = value.split(':')\n port = int(port)\n return host, port", "async def lookup_friend_id(user: UserLookup,\n _: User = Depends(get_current_user),\n db: Session = Depends(get_db)):\n if user.email:\n return crud.lookup_friend_id_by_email(db, user.email)\n elif user.username:\n return crud.lookup_friend_id_by_username(db, user.username)\n else:\n raise HTTPInvalidUserQuery", "def get_username_from_connection(self, conn):\n dict_copy = self.all_clients\n for username in dict_copy.keys():\n if dict_copy[username] == conn:\n return username", "def search_user(message, search):\n found = []\n search = search.lower()\n for userid, user in iteritems(message._client.users):\n if search in user['name'].lower():\n found.append('{} ({})'.format(user['name'], userid))\n if len(found) == 0:\n message.reply('No user found by that key: {}.'.format(search))\n return\n message.reply('Users found: {}'.format(', '.join(found)))", "def lookup_netid(self, netid):\n self.setQuery(\"\"\"Select ?uid where {\n ?who <http://vivo.dartmouth.edu/ontology/netId> \"%s\" .\n ?who <http://vivo.dartmouth.edu/ontology/geiselId> ?uid .\n }\"\"\" % (netid))\n\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n return g['results']['bindings'][0]['uid']['value']\n except:\n return None", "def find_slack_user(self, slack_team_id, slack_user_id):\n\t\tif not isinstance(slack_team_id, str):\n\t\t\traise ValueError('slack_team_id must be a string, was %s' % (slack_team_id,))\n\t\tif not isinstance(slack_user_id, str):\n\t\t\traise ValueError('slack_team_id must be a string, was %s' % (slack_user_id,))\n\n\t\tcur = self.db.cursor()\n\t\ttry:\n\t\t\tsql = 'SELECT user_id FROM slack_user_001 WHERE slack_team_id = %s AND slack_user_id = %s'\n\t\t\tcur.execute(sql, (slack_team_id, slack_user_id))\n\t\t\tresults = cur.fetchall()\n\t\t\tif len(results) == 0:\n\t\t\t\treturn None\n\t\t\telif len(results) == 1:\n\t\t\t\treturn str(results[0][0])\n\t\t\telse:\n\t\t\t\traise WeirdStateError('Multiple users returned from slack_user which doesn\\'t make sense because that should have been a primary key')\n\t\tfinally:\n\t\t\tcur.close()", "def whois( self, mess, args):\n user = self.get_sender_username(mess)\n args = args.strip().replace(' ', '_')\n if user in self.users:\n self.log.info('%s queried whois %s.' % (user, args))\n if args in self.users.values():\n return filter(lambda u: self.users[u] == args, self.users)[0]\n else:\n return 'Nobody!'", "def identify_client(self,protocol):\n if protocol.resident:\n return protocol.peer\n #pdb.set_trace()", "def find_connection(self, id):\r\n\t\tfor player in self.established_connection_list:\r\n\t\t\tif (player.id == id):\r\n\t\t\t\treturn player", "def search_user(message, search):\n found = []\n search = search.lower()\n users = hf.get_users()\n for user in users:\n if search in user['name'].lower():\n found.append('{} ({})'.format(user['name'], user[\"id\"]))\n if len(found) == 0:\n message.reply('No user found by that key: {}.'.format(search))\n return\n message.reply('Users found: {}'.format(', '.join(found)))", "def nick(self):\n if(not SOCKET_TO_USERID.has_key(self.source)):\n new_userid = _char_list_to_string(random.sample(ALPHABET, USERID_LENGTH))\n while(USERID_TO_SOCKET.has_key(new_userid)):\n new_userid = _char_list_to_string(random.sample(ALPHABET, USERID_LENGTH))\n USERID_TO_SOCKET[new_userid] = self.source\n SOCKET_TO_USERID[self.source] = new_userid\n self.send()", "def get_uid(username):\n\t\tif username is None:\n\t\t\treturn\n\t\tcon = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')\n\t\twith con:\n\t\t\tquery = \"SELECT id from auth_user WHERE username=%s\" % (username)\n\t\t\tcur = con.cursor()\n\t\t\tcur.execute(query)\n\t\t\tdata = cur.fetchall()\n\t\t\tprint \"len(data)\"\n\t\t\tprint data\n\t\t\tif len(data) > 0:\n\t\t\t\treturn data[0]\n\t\t\treturn None", "def get_userid(node_name):\n url = XCATUrl().lsdef_node(''.join(['/', node_name]))\n info = xcat_request('GET', url)['info']\n\n with expect_invalid_xcat_resp_data():\n for s in info[0]:\n if s.__contains__('userid='):\n return s.strip().rpartition('=')[2]", "def get_identifier(self, request):\r\n return request.META.get('REMOTE_USER', 'nouser')", "def getUserUuid(userId):\n return searchForUser(userId)['uuid']", "def handler(packet):\r\n if packet.narg1 == query_id:\r\n self.remove_listener(FCTYPE.USERNAMELOOKUP, handler)\r\n if (not hasattr(packet, \"smessage\")) or not isinstance(packet.smessage, dict):\r\n future.set_result(None) # User doesn't exist\r\n else:\r\n future.set_result(packet.smessage)", "def query_user_id(conn):\n user_id = 0\n while (user_id != -1) is True:\n user_input = raw_input(\"Please enter a person ID: \")\n try:\n user_id = int(user_input)\n if user_id == -1:\n print \"User entered -1 - exiting\"\n break\n select_person_by_id(conn, user_id)\n select_pets_by_person(conn, user_id)\n except ValueError:\n print \"Please enter an integer\"", "def id(self) -> SocketID:\n _args: list[Arg] = []\n _ctx = self._select(\"id\", _args)\n return _ctx.execute_sync(SocketID)", "def select_person_by_id(conn, person_id):\n sql = \"\"\"SELECT * FROM person WHERE id=?\"\"\"\n cur = conn.cursor()\n try:\n cur.execute(sql, (person_id,))\n data = cur.fetchall()\n if data:\n userid = (data[0][0])\n print \"\\nQuerying for userID {}\\n\".format(userid)\n print sql_pp(cur, data)\n except OperationalError, msg:\n print \"SQL error {} while running our code\".format(msg)", "def lookup(self, user_id):\n raise NotImplementedError", "async def authorized_userid(self, identity):\r\n try:\r\n dct = json.loads(identity)\r\n async with self.db.execute(\r\n '''\r\n select count(*) from user WHERE username=? AND rowid=?\r\n ''', (dct['username'], dct['rowid'])\r\n ) as cursor:\r\n n = (await cursor.fetchone())[0]\r\n if n:\r\n return identity\r\n except Exception:\r\n pass\r\n return None", "def lookupUser_byID(self, user_id):\n sql = \"SELECT * FROM Users WHERE id='%s'\"\\\n % (user_id)\n res = self.execute(sql)\n reslist = res.fetchall()\n if reslist == []:\n return None\n else:\n return reslist[0]", "def find_user_by_username(self, username):\n user = None\n logging.info(\"channel finding %s in redis \" % username)\n key = \"%s:%s\" % (self.channel_id, username)\n # see if we have a timestamp in the room\n rank = self.redis_server.zrank(ENVIRONMENT['REDIS_PREFIX'] + \"users_timestamp\", key)\n logging.info(\"channel %s users_timestamp rank (%s): %s \" % (ENVIRONMENT['REDIS_PREFIX'], key, rank))\n if rank != None:\n # get our user from the chat server\n logging.info(\"found users_timestamp, fetching user\")\n user = find_user_by_username(username)\n\n if user != None:\n logging.info(\"found user by username (%s): %s\" % (key, username))\n return user\n else:\n logging.info(\"channel unable to find user by username (%s): '%s'\" % (key, username))\n return None", "def getSender(self):\n\n if self in WebSocketRouter.nodemap:\n return WebSocketRouter.nodemap[self]\n elif self not in WebSocketRouter.usermap:\n WebSocketRouter.usermap[self] = str(uuid4())\n debug(\"Added user py id: %s uuid: %s\" % \\\n (str(id(self)), WebSocketRouter.usermap[self]))\n return WebSocketRouter.usermap[self]", "def findUniqueUserID(userID):\n connector = appEngine.connect()\n userIdentifier = connector.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", userID).fetchone()\n #userIdentifier = db.session.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", userID)\n if type(userIdentifier) == type(None):\n return False # this means there is no user in the database yet\n else:\n return True # this means there is a user in the database", "async def getuserid(ctx, user=None):\n if user == None:\n await ctx.send(f\"Your user ID is `{ctx.message.author.id}`.\")\n elif user[:3] != \"<@!\":\n member = ctx.message.guild.get_member_named(user)\n await ctx.send(f\"The user ID of {user} is: `{member.id}`\")\n else:\n user = user.replace(\"<@!\", \"\").replace(\">\", \"\")\n await ctx.send(f\"The user ID of <@{user}> is `{user}`.\")", "def get_user_by_id(cur, id) -> str:\n cur.execute(f'''\n SELECT name FROM user WHERE id = {id} ''')\n return cur.fetchone()[0]", "def lookup(identifier):\n\n user = User.objects(id=identifier).first()\n \n if user:\n user.update(set__seen=datetime.utcnow()) # , set__host=request.remote_addr -- chicken-egg problem\n\n return user", "def find_by_id(self, uid):\n return self.mapper.find_by_id(uid)", "def get_user_id(body):\n pub_example = body.find_all('td', attrs={'class': 'gsc_a_t'})[0]\n user_text = pub_example.a['href']\n user_id = re.search('\\&user(.*?)\\&', user_text).group()[6:-1]\n return user_id", "def get_session_from_user(self, client_id):\n return self.connections[client_id][\"session_id\"]", "def find_remote_by_client_id(client_id):\n for remote in current_oauthclient.oauth.remote_apps.values():\n if remote.name == \"cern_openid\" and remote.consumer_key == client_id:\n return remote", "def get_node_by_server(self, ip, port, is_register=False):\n # print('List of nodes in stream ', self.get_server_address())\n # for node in self.nodes:\n # print(node.get_server_address(), node.is_register)\n node_address = (Node.parse_ip(ip), port)\n for node in self.nodes:\n if node.get_server_address() == node_address and node.is_register == is_register:\n return node\n return None", "def whoelse(sock, request):\n users = set()\n for s in connections.values():\n users.add(s['user'])\n response = { \"echo\": \"\\n\".join(users) }\n sock['conn'].sendall(json.dumps(response))", "def maybe_find_user(user_id):\n try:\n return find(user_id)\n except KeyError:\n return None", "def get_socket_hash(socket):\r\n return hash(socket.id_data.name + socket.node.name + socket.identifier)", "def is_user(id):\n return id.startswith('U')", "def get_primary_for(userid):", "def get_primary_for(userid):", "async def uid(message):\n return \"your user id is: {}\".format(message.user_id)", "def identify(cls, user_id):\n return cls.query.get(user_id)", "def socket(request):\n return request.param", "def _get_ids_from_ip(self, ip):\r\n try:\r\n # Does it look like an ip address?\r\n socket.inet_aton(ip)\r\n except socket.error:\r\n return []\r\n\r\n # Find the server via ip address. First try public ip, then private\r\n results = self.list_hardware(public_ip=ip, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]\r\n\r\n results = self.list_hardware(private_ip=ip, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]", "def userDocumentId(self, id: str) -> str:", "def login(user_ip):\n if user_ip in loginData['ip'].to_list():\n return loginData[loginData['ip'] == user_ip]['id'].item()\n else:\n user_id = loginData['id'].max() + 1\n loginData.loc[len(loginData)] = [user_id, user_ip]\n return user_id", "def _search_connection(self, host, login, passwd, conn):\n self._connections_lock.acquireRead()\n try:\n if (host in self._connections):\n connections = self._connections[host]\n\n for connection in connections:\n if (connection.login == login and connection.passwd == passwd and connection.type == conn):\n return connection\n finally:\n self._connections_lock.release()\n\n return None", "def find(self, uid):\n return self._root.find(uid)", "async def user_id(\n event,\n user_id: ('user_id', 'Get the id of an other user?', 'user') = None,\n):\n if user_id is None:\n user_id = event.user.id\n \n return str(user_id)", "def usersearch(q_user, page=0, splash=True, identify='forUsername'):\n\n user, _, term = (x.strip() for x in q_user.partition(\"/\"))\n if identify == 'forUsername':\n ret = channelfromname(user)\n if not ret: # Error\n return\n user, channel_id = ret\n\n else:\n channel_id = user\n\n # at this point, we know the channel id associated to a user name\n usersearch_id('/'.join([user, channel_id, term]), page, splash)", "def find_sis_user_id(search_term):\n path = 'v1/accounts/1/users'\n url = config.Canvas_base_api_url + path.format(\n account_id=config.Canvas_account_id) + '?search_term={search_term}'.format(search_term=search_term)\n headers = {'Authorization': 'Bearer {token}'.format(token=config.CanvasSISImportToken)}\n r = requests.get(url, headers=headers)\n rJson = r.json()\n try:\n sis_id = int(rJson[0]['sis_user_id'])\n except:\n sis_id = 'Unknown'\n return sis_id", "async def uid(message, user: ParamType.MIXER_USER):\n return \"@{} user id is: {}\".format(user.username, user.id)", "def ask_server_if_user_exists(self, sn):\n\n ### <------ Called from show_prompts\n print(\"//asking server to look up user...\")\n\n ### -------> Outbound to Server\n response = ServerOperations().is_user_here(sn)\n\n if response == True:\n print(f\"-=- Waiting for {sn} to accept file. Press A to abort.\")\n return True\n\n else:\n print(f\"{sn} not found. Try again.\")\n return False", "def submitter_netid(email,sid):\n netid_pattern = re.compile('[a-z]{1,4}[0-9]{1,5}')\n match = netid_pattern.search(sid)\n if match:\n return match.group(0)\n match = netid_pattern.search(email)\n if match:\n return match.group(0)\n raise ValueError(\"Could not find email address: %s\" % submitter )", "def _find_in_queue(self, nick):\n i = 0\n for user, msg in self._queue:\n if user == nick:\n return i\n i += 1\n return -1", "def id_check(self, message):\n matches = ID_SYNTAX.match(message)\n if matches:\n return matches.group(1)\n return None", "def identify_remote_router(remote_address):\n global DATA\n port = remote_address[1]\n for every_router in DATA[\"neighbor\"]:\n if every_router[2] is port:\n return every_router[0]", "def test_searchWildcardHigh(self):\n self.assertTrue(\n self.server.search_UID([b'1235:*'], self.seq, self.msg, (1234, 1)))", "def fuzzyMatchUserID(self, ip_addr, userID):\n self.cur.execute(\"SELECT userID FROM login WHERE event_success=1 AND ip_addr = (?)\", (ip_addr,))\n data=self.cur.fetchall()\n for id in data:\n if(levenshtein_ratio_and_distance(userID, id)>0.9):\n return 1\n return 0", "def __authUser(self, c, e):\n try:\n UserId = self.__database.getUserIdByBotKey(self.getParameterListByEvent(e)[0]);\n self.__IpToUser[self.getIpStringByDCCConnection(c)]['userid'] = int(UserId)\n return UserId\n except IndexError:\n return 0;", "def get_name_by_socket(self, socket):\n with self.register_lock:\n return self.socket_name[socket]", "def __find_matching_user(self, user):\n if not user.id in self.__users.keys():\n return user\n return self.__users[user.id]", "def with_id(cls, user_id, server, bucket=None):\n\t\tif ObjectId.is_valid(user_id):\n\t\t\tuser_id = ObjectId(user_id)\n\t\tres = cls.find_on({'type': 'user', '_id': user_id}, server, bucket)\n\t\tif res and len(res) > 0:\n\t\t\treturn res[0]\n\t\traise IDMException(\"no user with the given id “{}”\".format(user_id), 404)", "def find_supercomputer_id():\n hostname = socket.gethostname()\n if hostname.startswith('john') or hostname.startswith('farnarkle'):\n return 1\n elif hostname.startswith('mwa') or hostname.startswith('garrawarla'):\n return 2\n elif hostname.startswith('x86') or hostname.startswith('arm'):\n return 3\n else:\n logger.error('Unknown computer {}. Exiting'.format(hostname))\n sys.exit(1)", "def me(self): \n return self.users(\"identifier == $ApiUser\")", "def method_isloggedid(self, chat_id):\n with open('./package_login/logged.json') as f:\n data = json.load(f)\n\n find_it = False\n find_user = \"\"\n id_c = sha256(str(chat_id).rstrip().encode()).hexdigest()\n for x in data:\n if x['chat_id'] == id_c:\n find_it = True\n find_user = x\n break\n\n if find_it:\n if find_user['password'] != self.password:\n return 1\n else:\n return 0\n else:\n return 2", "async def get_user_id(conn, login=None, token=None):\n if login:\n query = db.users.select().where(db.users.c.login == login)\n user = await conn.fetch(query)\n if len(user) == 0:\n return None\n else:\n query = db.users.select().where(db.users.c.token == token)\n user = await conn.fetch(query)\n return user[0]['id']" ]
[ "0.6524464", "0.6177678", "0.6079417", "0.60470605", "0.5984913", "0.59413224", "0.5857293", "0.58146036", "0.5801265", "0.5664723", "0.5636931", "0.5636145", "0.5635148", "0.56256527", "0.56242967", "0.56242955", "0.56001824", "0.56000507", "0.55688393", "0.55479366", "0.5547746", "0.5501842", "0.5497427", "0.5485152", "0.5483139", "0.5482436", "0.5475664", "0.5473264", "0.54585505", "0.5455772", "0.5445724", "0.54421145", "0.5441488", "0.5433295", "0.54266864", "0.54069567", "0.5404658", "0.53965074", "0.53904784", "0.53762376", "0.53761846", "0.537001", "0.5356696", "0.53521246", "0.5342696", "0.53321165", "0.5331824", "0.5322941", "0.5319215", "0.5310758", "0.5308108", "0.5303131", "0.5302089", "0.52908695", "0.52778035", "0.52688104", "0.52507544", "0.5236621", "0.52322215", "0.5226349", "0.5218024", "0.52120847", "0.5207971", "0.5197834", "0.51951814", "0.51894027", "0.5183992", "0.517402", "0.51729965", "0.51716053", "0.5171237", "0.5166161", "0.5166161", "0.5163269", "0.51545984", "0.5145899", "0.5141013", "0.51321787", "0.5129768", "0.5128438", "0.512661", "0.5119264", "0.51094574", "0.5097918", "0.50959396", "0.509087", "0.50876796", "0.5085498", "0.50783414", "0.5075942", "0.50752985", "0.5069668", "0.50643027", "0.5056022", "0.50497645", "0.50485677", "0.5046768", "0.5033556", "0.5028608", "0.50272095" ]
0.527406
55
get a user id using its sid user has to be joined
def get_sid_id(cls, sid): id = redis.hget('sid-id', sid) if not id: return None return id.decode("utf-8")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user(id):\n pass", "def get_user_id(self):\n return self.id_user", "def fetch_current_user_id(s):", "def get_id(self): \n\t\treturn (self.user_id)", "def get_user_id():\n user_id = session.get(\"user_id\")\n return user_id if user_id else None", "def get_one_user():", "def get_user_from_id(user_id):\n return Users.query.filter_by(id=user_id).first()", "def get_user_sid(cls, user_id):\n\n sid = redis.hget('id-sid', user_id)\n if not sid:\n return None\n return sid.decode(\"utf-8\")", "def get_primary_for(userid):", "def get_primary_for(userid):", "def get_user(self, user_id):\n\n i = self.gdb.nodes.indexes.get('users')\n if str(user_id).isalnum(): # numerical ID\n results = i.get('user_id', user_id) # always iterable\n else:\n results = i.get('screen_name', user_id) # always iterable\n\n if len(results) == 1:\n log.info('Found existing users, ID %s' % user_id)\n return results[0]\n else:\n log.info('No user in graph with ID %s' % user_id)\n return None", "def lookupUser_byID(self, user_id):\n sql = \"SELECT * FROM Users WHERE id='%s'\"\\\n % (user_id)\n res = self.execute(sql)\n reslist = res.fetchall()\n if reslist == []:\n return None\n else:\n return reslist[0]", "def get_user_id(self):\n raise NotImplementedError", "def get_user_by_id(self, user_id):\n query = \"SELECT * FROM users WHERE user_id = %s\"\n self.cursor.execute(query,[user_id])\n result = self.cursor.fetchone()\n return result", "def user_id(self):\n return self.status.user[\"id\"]", "def identify(cls, user_id):\n return cls.query.get(user_id)", "def get_user_id(username):\n # rv = query_db('select user_id from user where username = ?',\n # [username], one=True)\n # return rv[0] if rv else None\n # db = get_db()\n # print \"LOOKHERE UID\"\n rv = mongo.db.users.find_one({'username': username}, {'_id': []})\n print rv['_id']\n return rv['_id'] if rv else None", "def _get_user_id(username):\n user_id = select(u.user_id for u in UserInformationData if u.username == username).first()\n\n return user_id", "def get_single_user(self, id):\n for user in self.users:\n if user['id'] == id:\n return user", "def get_id(self):\n return self.user_id", "def get_userid_profile(db, user_id):\n return db['user'].find_one({'_id': user_id})", "def get_uid(username):\n\t\tif username is None:\n\t\t\treturn\n\t\tcon = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')\n\t\twith con:\n\t\t\tquery = \"SELECT id from auth_user WHERE username=%s\" % (username)\n\t\t\tcur = con.cursor()\n\t\t\tcur.execute(query)\n\t\t\tdata = cur.fetchall()\n\t\t\tprint \"len(data)\"\n\t\t\tprint data\n\t\t\tif len(data) > 0:\n\t\t\t\treturn data[0]\n\t\t\treturn None", "def with_id(cls, user_id, server, bucket=None):\n\t\tif ObjectId.is_valid(user_id):\n\t\t\tuser_id = ObjectId(user_id)\n\t\tres = cls.find_on({'type': 'user', '_id': user_id}, server, bucket)\n\t\tif res and len(res) > 0:\n\t\t\treturn res[0]\n\t\traise IDMException(\"no user with the given id “{}”\".format(user_id), 404)", "def get_user(self, user_id):\n oauth_user = OAuthioUser.objects.filter(user__id=user_id)\n if oauth_user.exists():\n return oauth_user.get().user", "def user_by_id(self, user_id):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, password, phone, email, role\n FROM users WHERE user_id = %s\"\"\", (user_id, ))\n \n user_from_db = cur.fetchone()\n if cur.rowcount == 1: \n user_id, username, password, phone, email, role = user_from_db\n resp = dict(user_id=user_id, username=username, password=password, phone=phone, email=email, role=role)\n \n return resp\n return None", "def get_user_id(self, session, **kwargs):\n return None", "def get(userid):\n\n return ldapi.lookup(ld, 'uid', userid, cfg['ldap_users_base'])", "async def user_id(\n event,\n user_id: ('user_id', 'Get the id of an other user?', 'user') = None,\n):\n if user_id is None:\n user_id = event.user.id\n \n return str(user_id)", "def get_id(self):\r\n return self.username", "def get_user():\n try:\n userId = request.args.get('login_as')\n return users[int(userId)]\n except Exception:\n return None", "async def get_user_byid(request):\n user_id = request.match_info[\"user_id\"]\n try:\n user_id = int(user_id)\n except (ValueError, TypeError):\n return web.Response(text=\"Incorrect value for user_id\", status=400)\n\n currentuser = (\n request.cirrina.db_session.query(User)\n .filter(User.username == request.cirrina.web_session[\"username\"])\n .first()\n )\n\n if user_id == -1 or not currentuser.is_admin:\n user = currentuser\n else:\n user = request.cirrina.db_session.query(User).filter_by(id=user_id).first()\n\n if not user:\n return web.Response(status=404, text=\"User not found\")\n\n data = {\"username\": user.username, \"user_id\": user.id, \"is_admin\": user.is_admin}\n return web.json_response(data)", "def get_user_by_id(cls, userid):\n\n user = User.query.filter_by(user_id=userid).one()\n\n return user", "def get_user_id(username):\n rv = query_db('select user_id from user where username = ?',\n [username], one=True)\n return rv[0] if rv else None", "def get_user_id(username):\n rv = query_db('select user_id from user where username = ?',\n [username], one=True)\n return rv[0] if rv else None", "def get_user_id(username):\n rv = query_db('select user_id from user where username = ?',\n [username], one=True)\n return rv[0] if rv else None", "def _get_user_id(self, user: Optional[Dict[str, Any]]) -> Optional[str]:\n return user[\"id\"] if user and \"id\" in user else None", "def get_user(self, user_id):\n return None # noqa: WPS324", "def get_user_id(username):\n rv = query_db('select user_id from user where username = %s',\n [username], one=True)\n return rv[0] if rv else None", "def get_id(self) -> int:\n return self.user_id", "def getUserID(self):\n\t\treturn self.UserID", "def get_user_id(username):\n rv = query_db('SELECT user_id FROM users where username = ?', \n [username], one=True)\n return rv[0] if rv else None", "def user_by_id(user_id):\n user = User.query.filter(User.id == user_id).one_or_none()\n return user", "def get_user(self, user_id):\n uri = 'users/' + user_id\n return self.make_request(uri)", "def get_user_by_id(user_id):\n return User.query.get(user_id)", "def id2user(self):\n if self._id2user is None:\n self._id2user = {j: user for user, j in self.user2id.items()}\n return self._id2user", "def user_id(self, user):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/team_news.phtml', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/standings.phtml', headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n for i in soup.find('table', cellpadding=2).find_all('tr'):\r\n try:\r\n if user == i.find_all('td')[2].text.encode('utf8'):\r\n return i.find('a')['href'].split('pid=')[1]\r\n except:\r\n continue\r\n return None", "def user_left(cls, sid):\n id = redis.hget('sid-id', sid)\n redis.hdel('sid-id', sid)\n redis.hdel('id-sid', id)\n return id.decode(\"utf-8\")", "def get_user_id(username):\n rv = query_db('select user_id from users where username = ?',\n [username], one=True)\n return rv[0] if rv else None", "def get_user(self, user_id=None):\n raise NotImplementedError", "def lookup(self, user_id):\n return self.users.get(str(user_id))", "def get_user_by_id(self, id):\n\t\treturn self.users.get(id)", "def get_user_by_id(cur, id) -> str:\n cur.execute(f'''\n SELECT name FROM user WHERE id = {id} ''')\n return cur.fetchone()[0]", "def lookup(self, user_id):\n raise NotImplementedError", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def __find_matching_user(self, user):\n if not user.id in self.__users.keys():\n return user\n return self.__users[user.id]", "def getUser(self, id):\n if not isinstance(id, int):\n # Must be a string. Get the UserId first.\n id = self.getUserId(id)\n u = self.users[id]\n while isinstance(u, int):\n id = u\n u = self.users[id]\n u.id = id\n return u", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_id(event, destination=None):\n ret = event.source.user_id\n if not ret:\n ret = destination\n\n return ret", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def get_by_id(self):\n user_node = graph.find_one(\"User\",\n property_key=\"id\",\n property_value=self.id)\n return user_node", "def users_user_id_get(userId): # noqa: E501\n base.check_session()\n return _cleanuser(_finduser(userId))", "def get_accessible_user_id(self):\n ### DATABASE CODE GOES HERE\n return 1", "def getUserByuID(self, uID):\n cursor = self.conn.cursor()\n query = \"SELECT ufirstname, ulastname, udescription, urole, uclassification, email, pin \" \\\n \"FROM Users natural inner join Credential \" \\\n \"WHERE uID= %s;\"\n cursor.execute(query, (uID,))\n result = cursor.fetchone()\n return result", "def user(request, user_id):\n raise NotImplementedError", "def get_user_id(self, user):\n\n found_user = self.search(user)\n\n if found_user:\n return found_user[\"data\"][0][\"id\"]\n else:\n raise UserNotFound(\"User \" + user + \" not found.\")", "def get_user_id():\n csc_name = get_user_csc_name()\n if csc_name:\n return csc_name\n haka_id = get_user_haka_identifier()\n if haka_id:\n return haka_id\n return None", "def get_user_id(self, details, response):\n return response['uid']", "def get_userid():\n return _userid()", "def get_id(self, username):\n\n users_list = self.get_list()\n for user_info in users_list:\n if user_info['username'] == username:\n return user_info['id']\n # return None\n raise UserNotFoundException(\"User {0} not found\".format(username))", "async def getuserid(ctx, user=None):\n if user == None:\n await ctx.send(f\"Your user ID is `{ctx.message.author.id}`.\")\n elif user[:3] != \"<@!\":\n member = ctx.message.guild.get_member_named(user)\n await ctx.send(f\"The user ID of {user} is: `{member.id}`\")\n else:\n user = user.replace(\"<@!\", \"\").replace(\">\", \"\")\n await ctx.send(f\"The user ID of <@{user}> is `{user}`.\")", "def get_user(id):\n if (g.user.id == id):\n return jsonify(g.user.serialize)\n else:\n abort(400)", "def get_current_user_id():\n user = get_current_user()\n return user.pk if user and user.is_authenticated else None", "def _get_user_by_id(self, _id):\n user_resp = self._db.Users(database_pb2.UsersRequest(\n request_type=database_pb2.UsersRequest.FIND,\n match=database_pb2.UsersEntry(global_id=_id)))\n if user_resp.result_type != database_pb2.UsersResponse.OK:\n self._logger.warning(\n 'Could not find user: {}'.format(user_resp.error))\n return None\n if not len(user_resp.results):\n self._logger.warning('Could not find user.')\n return None\n return user_resp.results[0]", "def get_user_id(self, details, response):\n return response.get(\"sub\")", "def my_get_user(users_list, user_id):\n for user in users_list:\n if user.get(\"user_id\") == user_id:\n return user\n return None", "def load_user(uid):\n return User.query.get(uid)", "def __int__(self):\r\n return self.userid", "def get_user(self):\n return str(self.request.user.id)", "def GetUserId(odb, username, add_alt=0):\n\n print 'Unrecognized user %s.' % (username)\n alt = raw_input('Is this user in our DB (y/n)? ')\n if alt == 'y':\n main_name = raw_input('What is their primary username? ')\n user_id = odb.GetUserId(main_name)\n if user_id is None:\n user_id = GetUserId(odb, main_name)\n if add_alt:\n odb.AddAlt(user_id, username)\n return user_id\n\n else:\n return odb.AddUser(username)", "def getUserIdFromSteamId(self, steamId):\r\n self.execute(\"SELECT UserID FROM Player WHERE steamid=?\", steamId)\r\n value = self.cursor.fetchone()\r\n if value is None:\r\n return None\r\n return value[0]", "def select_user(user_id):\n return session.query(User).filter(User.id == user_id).first()", "def getUserUuid(userId):\n return searchForUser(userId)['uuid']", "def get(id):\n return User.query.filter_by(id=id).first()", "def user_id(self):\n return lamin_user_settings().id", "def user_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"user_id\")", "def _get_user_identifier(_khoros_object, _identifier, _where_clause, _allow_multiple, _display_warnings):\n _liql_query = f\"select {_identifier} from users where {_where_clause}\"\n _api_response = liql.perform_query(_khoros_object, liql_query=_liql_query, verify_success=True)\n _num_results = api.get_results_count(_api_response)\n if _num_results == 0:\n raise errors.exceptions.NotFoundResponseError\n elif _num_results > 1:\n _multiple_results_msg = \"Multiple results were retrieved when querying for the user in question.\"\n if _display_warnings:\n warnings.warn(_multiple_results_msg, RuntimeWarning)\n if not _allow_multiple:\n raise errors.exceptions.TooManyResultsError(_multiple_results_msg)\n _user_identifier = []\n for _user in _api_response['data']['items']:\n _item_val = int(_user[_identifier]) if _user[_identifier].isnumeric() else _user[_identifier]\n _user_identifier.append(_item_val)\n else:\n _item_val = _api_response['data']['items'][0][_identifier]\n _user_identifier = int(_item_val) if _item_val.isnumeric() else _item_val\n return _user_identifier", "def getByID(session, id):\n return session.query(User).filter(User.id == id).first()", "def get_user(self, user_id):\n return self.my_get_user(self.get_all_dbusers(), user_id)", "def get_user_from_uid(uid):\n id, tmp = uid.split('-')\n user = AuthUser.query.filter_by(id=id).first()\n if user and user.get_uid() == uid:\n return True\n return False", "def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")" ]
[ "0.71183956", "0.67369145", "0.67288226", "0.6676", "0.66554093", "0.6615446", "0.6597501", "0.6586899", "0.6580105", "0.6580105", "0.65061647", "0.6459379", "0.6445892", "0.64429665", "0.6428113", "0.64264065", "0.6411913", "0.6411254", "0.64103955", "0.64056945", "0.6374203", "0.63714004", "0.63547635", "0.63262075", "0.6325693", "0.6285259", "0.62830365", "0.62652445", "0.6255471", "0.62512356", "0.6250988", "0.6242954", "0.6237616", "0.6237616", "0.6237616", "0.6236853", "0.622389", "0.6222563", "0.62140554", "0.6191207", "0.61907536", "0.6190299", "0.61828357", "0.6181813", "0.6177733", "0.6177358", "0.6167072", "0.61670375", "0.61666584", "0.6164145", "0.614403", "0.61358273", "0.61255264", "0.6118591", "0.6118591", "0.6118591", "0.61182636", "0.6096299", "0.60958797", "0.60958797", "0.60958797", "0.60958797", "0.6090674", "0.6072637", "0.60583687", "0.60583687", "0.60583687", "0.60583687", "0.60583687", "0.60472244", "0.60414267", "0.6037474", "0.60371435", "0.60369056", "0.60351723", "0.6032029", "0.60307306", "0.60300386", "0.6027955", "0.602105", "0.60205716", "0.6019124", "0.6017526", "0.6007023", "0.6005596", "0.6004986", "0.5994574", "0.59871215", "0.5984003", "0.5979182", "0.59787357", "0.5969603", "0.59681505", "0.5966635", "0.59653085", "0.5963374", "0.5958997", "0.59589934", "0.59554774", "0.59529203", "0.59529203" ]
0.0
-1
when a user sends a new message to the server
def new_msg(cls, sender_id, recipient_id, text): sender = User.find(id=sender_id) sender_sid = cls.get_user_sid(sender.id) if is_group(recipient_id): recipient_group = Group.find(id=recipient_id) if not recipient_group: raise Exception('recipient was not found') if not recipient_group.has_user(sender): raise Exception('user is not a member of this group') cls._broadcast_group(sender, sender_sid, recipient_group, text) elif is_user(recipient_id): recipient = User.find(id=recipient_id) if not sender.is_friends(recipient): raise Exception('user is not friends with recipient') if recipient.blocked(sender): raise Exception('recipient has blocked you') if not recipient: raise Exception('recipient was not found') cls._broadcast_user(sender, sender_sid, recipient, text) else: raise Exception('bad recipient id')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_message(data):\n pass", "def message_handler(msg):\n logging.info(\"Message Text: %s\" % msg['msg'])\n\n message_entry = Message(request.sid, msg['room'], msg['msg'], msg['time'])\n if msg['msg'] != \"User has connected!\":\n logging.info(\"About to add to DB\")\n db.session.add(message_entry)\n db.session.commit()\n logging.info(\"Added to DB\")\n send(msg['msg'], room=msg['room'])", "def on_message(self, message):\n #print(f\"This message was sent: {message}\") # Writes to the console window (server side)\n self.write_message(f\"This message was sent: {message}\") # Writes message to sender", "def on_message(self, message):\n print \"Client %s received a message : %s\" % (self.id, message)", "def on_message(self, message):\n print \"Client %s received a message : %s\" % (self.id, message)\n self.write_message(\"Conn!\")", "def handle_message(self, message):", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def on_me_joined(self, raw_msg, **kwargs):", "def ServerSyncReceived(self,message):", "def receive_message(self, message):", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def message_callback(self, message):\n pass", "async def on_message(self, message: \"steam.Message\") -> None:", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def websock_message(self, user, client, message):\n service = client.service\n self.websock_handlers[service]['new_message'](user, client, message)\n return", "def handleMessage(msg):", "def receiveMessage(self, user, message):\n pass", "def receive_message(self, message):\r\n return", "def _handle_message(self, msg):\n self.event('message', msg)", "def handle_message(self, msg):\n pass", "def _on_update(self, message):\n LOG.info('user {0} ### message.body {1}'.format(self.client_id, message))\n if isinstance(message.body, six.string_types):\n LOG.info('user {0} message obj is a string!'.format(self.client_id))\n try:\n body = json.loads(message.body)\n except ValueError as e:\n LOG.error(\"user {0} Exception {1}\".format(self.client_id, e))\n LOG.error(\"user {0} Exception {1}\".format(self.client_id, message.body))\n LOG.error(\"user {0} Exception {1}\".format(self.client_id, message))\n try:\n if body:\n # 消息计数加一,用于统计是否是关闭连接前的最后一条消息\n \"\"\"\n with (yield self.msg_lock_2.acquire(timeout=10)):\n body['seq'] = self.incr_msg_send()\n # 将消息写入到redis online 队列中\n self.write_online_msg(body)\n LOG.info(\"user {0} body is {1}\".format(self.client_id, body))\n \"\"\"\n\n self._send_msg()\n \"\"\"\n # 开始ping,检查websocket是否可以连接到手机\n notify_pong = ioloop.IOLoop.current().time()\n # ping 自己\n self.ping(str(datetime.now()))\n yield tornado.gen.sleep(self.msg_ping_time)\n self.notify_pong = self.last_pong\n\n if self.notify_pong >= notify_pong:\n # 消息优先级:队列消息 > 当前接收的消息 > redis中的消息\n # 检查队列中是否有未收到回复的消息,重新发送\n self.send_online_msg()\n self.r.set(\"user:{0}:online\".format(self.client_id), \"True\")\n else:\n # timeout, 说明网络不好\n self.msg_ping_time += 0.5\n LOG.info(\"user {0} is offline, WebSocket ping timeout after 1s.\".format(self.client_id))\n if self.msg_ping_time > 3:\n self.r.set(\"user:{0}:online\".format(self.client_id), \"False\")\n # 从前面到消息到目前为止网络到现在仍然不好,将消息写回到redis中\n self.force_close()\n \"\"\"\n\n except(WebSocketClosedError, AttributeError):\n LOG.info('user {0} WebSocketClosedError when on_update'.format(self.client_id))\n self.force_close()\n\n # self.write_message(message.body)\n # if self.client_id == body['client_id']:\n # print(\"entered!\")\n # self.write_message(message.body)", "def on_message(self, message):\n self.write_message(u\"%s\" % message)", "def recieved_message(json, methods=['GET', 'POST']):\n json['username'] = session['username']\n socketio.emit('server message', json)\n message = Message(\n user_id = session['user_id'],\n room_id = json[\"room_id\"],\n sendTime = datetime.now(),\n content = json[\"content\"]\n )\n db.session.add(message)\n db.session.commit()", "def onMessage(self, message):\n raise NotImplementedError", "def on_message(\n self, client: mqtt.Client, userdata: typing.Any, msg: mqtt.MQTTMessage\n ) -> None:\n self.msgs.append(msg)", "def handle(self, message):", "def new_message(self, room, mess):\n pass", "def on_message(self,ws,message):\n pass", "def new_message(self, message):\n self.message_counter += 1\n self.message_buffer.append(str(message))\n self.event_loop()", "def message(message):\n\troom = session.get('room')\n\tprint('%s : message : %s' % (session, message['message']))\n\temit('_message', {'user_name': session.get('name'), 'message' : message['message']}, room=room, include_self=False)", "def receive(self, message):", "async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })", "async def chat_message(self, event):\n if self.user and not self.user.is_authenticated:\n return\n\n user_id = event['user_id']\n message = event['message']\n created_at = event['created_at']\n publisher_full_name = event['publisher_full_name']\n\n await self.send(text_data=json.dumps({\n 'user_id': user_id,\n 'created_at': created_at,\n 'message': \"{}\".format(message),\n 'publisher_full_name': publisher_full_name,\n }))", "def lastMessageReceived():", "async def chat_message(self, event):\n\n print(\"PublicChatConsumer\", \"chat_message from user\", event[\"user_id\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_MESSAGE,\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"user_id\": event[\"user_id\"],\n \"message\": event[\"message\"],\n \"natural_timestamp\": humanize_or_normal(timezone.now())\n })", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, message):\n log.debug(\"Protocol got message {message}\", message=message)\n if message['type'] == \"change\":\n self.handler.process_packet(message['packet'])\n self.send_packet()\n elif message['type'] == \"chat\":\n self.on_chat_message(message)\n elif message['type'] == \"action\":\n self.on_action(message)\n else:\n log.warn(\"Unrecognized message type {type}\", type=message['type'])", "def client(self,message):\n self.message = message\n self.run()", "async def on_message(message):\n response = None # will save the response from the bot\n if message.author == client.user:\n return # the message was sent by the bot\n if message.type is discord.MessageType.new_member:\n response = \"Welcome {}\".format(message.author) # a new member joined the server. Welcome him.\n else:\n # A message was send by the user.\n msg = message.content.lower()\n response = db.handle_msg(msg)\n\n if response:\n # bot sends response to the Discord API and the response is show\n # on the channel from your Discord server that triggered this method.\n embed = discord.Embed(description=response)\n await message.channel.send(embed=embed)", "async def chat_message(self, event):\n await self.send(\n {'type': \"websocket.send\",\n 'text': event['response_data']}\n )", "def on_message(self, msg):\n self.log.info(msg)", "def client_message_handler(self, message, client):\n LOG.debug(f\"Разбираем сообщение: {message}\")\n if (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_PRESENCE\n and s.KEY_TIME in message\n and s.KEY_USER in message\n ):\n if message[s.KEY_USER][s.KEY_ACCOUNT_NAME] not in self.names.keys():\n self.names[message[s.KEY_USER][s.KEY_ACCOUNT_NAME]] = client\n MSG.send(client, s.RESPONSE_200)\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Имя пользователя уже занято.\"\n MSG.send(client, response)\n self.clients.remove(client)\n client.close()\n return\n # Если это сообщение, то добавляем его в очередь сообщений.\n # Ответ не требуется.\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_MESSAGE\n and s.KEY_TIME in message\n and s.KEY_TO in message\n and s.KEY_FROM in message\n and s.KEY_MESSAGE in message\n ):\n self.messages.append(message)\n return\n # Если клиент выходит\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_EXIT\n and s.KEY_ACCOUNT_NAME in message\n ):\n self.clients.remove(self.names[message[s.KEY_ACCOUNT_NAME]])\n self.names[message[s.KEY_ACCOUNT_NAME]].close()\n del self.names[message[s.KEY_ACCOUNT_NAME]]\n return\n # Иначе отдаём Bad request\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Запрос не корректен\"\n MSG.send(client, response)\n return", "def respond_to_message(self):\n\n MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events)\n data = Converter(self.state).get_messages(meta_data=self.meta_data, message_data=self.message_data)\n\n outgoing_messages = data.get(\"messages\", [])\n events_to_publish = data.get(\"publish_events\", [])\n\n agent_messages = [message[\"message\"] for message in outgoing_messages if message[\"sending_to\"] == \"AGENT\"]\n user_messages = [message[\"message\"] for message in outgoing_messages if message[\"sending_to\"] == \"USER\"]\n\n agent_response = Util.send_messages(messages=agent_messages, sending_to=\"AGENT\")\n user_response = Util.send_messages(messages=user_messages, sending_to=\"USER\")\n\n if agent_response or user_response:\n\n Util.update_state(meta_data=self.meta_data, state=self.state)\n Util.log_events(meta_data=self.meta_data, state=self.state, events=events_to_publish)\n\n return 1", "async def new_message(self, message):\n user = self.scope['user']\n response_data = {\n 'message': message,\n 'username': user.get_full_name()\n }\n await self.create_chat_message(user, message)\n await self.channel_layer.group_send(\n self.conversation_name,\n {\n 'type': 'chat_message',\n 'response_data': json.dumps(response_data)\n }\n )", "def handle_my_custom_event(json, methods=['GET', 'POST']):\n data = dict(json)\n if \"name\" in data:\n db = DataBase()\n db.save_message(data[\"name\"], data[\"message\"])\n\n socketio.emit('message response', json)", "def on_message(self, _, message):\n with self.message_lock:\n self.messages.append(Message.deserialize(message))\n self.new_message_available.set()\n super().on_message(_, message)", "def message_new(\n self,\n event: Dict[str, Any]\n ) -> NoReturn:\n event = event[\"object\"][\"message\"]\n msg = event[\"text\"].lstrip(\"/\")\n peer_id = event[\"peer_id\"]\n from_id = event[\"from_id\"]\n msg_id = event[\"conversation_message_id\"]\n\n if peer_id in self.messages_to_delete:\n peer = CHAT_ID_OFFSET + config.USERBOT_CHATS[peer_id]\n new_messages_to_delete = []\n ids = []\n\n for item in self.messages_to_delete[peer_id]:\n if item['date'] > datetime.now():\n new_messages_to_delete.append(item)\n else:\n ids.append(item['id'])\n\n if new_messages_to_delete:\n self.messages_to_delete[peer_id] = new_messages_to_delete\n else:\n self.messages_to_delete.pop(peer_id)\n\n if ids:\n self.userbot.delete_messages(ids, peer)\n\n user = self.data.get_user(from_id, self) if from_id > 0 else None\n\n messages = self.get_messages(event)\n selected_message = messages[0] if len(messages) == 1 else None\n selected_user = (\n self.data.get_user(selected_message['from_id'], self)\n if selected_message and selected_message['from_id'] > 0 else None)\n\n try:\n self.commands.process(\n msg, peer_id, from_id, messages, msg_id,\n user, selected_user)\n except Exception as e:\n print(e)", "def on_bot_message():\n handle_bot_message(request.get_json())\n return \"ok\"", "async def on_chat_message(self, chat_message):\n pass", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def on_message(self, event):\n self.response = event.message\n self.connection.container.yield_() # Wake up the wait() loop to handle the message.", "async def on_message(self, message):\n\t\t# If message was sent in a guild\n\t\tif isinstance(message.channel, discord.TextChannel):\n\t\t\tguild = message.channel.guild\n\t\t\tleaderboard = self.leaderboards[str(guild.id)]\n\n\t\t\tif not message.author.bot:\n\t\t\t\t# Check message author\n\t\t\t\tif str(message.author.id) not in leaderboard[\"messageLeaderboard\"]:\n\t\t\t\t\tleaderboard[\"messageLeaderboard\"][str(message.author.id)] = 1\n\t\t\t\telse:\n\t\t\t\t\tleaderboard[\"messageLeaderboard\"][str(message.author.id)] += 1\n\n\t\t\t\t# Check for quotes\n\t\t\t\tif str(message.channel.id) == leaderboard[\"quotesChannel\"]:\n\t\t\t\t\tfor user in message.mentions:\n\t\t\t\t\t\tif str(user.id) not in leaderboard[\"quoteLeaderboard\"]:\n\t\t\t\t\t\t\tleaderboard[\"quoteLeaderboard\"][str(user.id)] = 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tleaderboard[\"quoteLeaderboard\"][str(user.id)] += 1\n\n\t\t\t\t# Check for emojis\n\t\t\t\tfor emoji in self.bot.emojis:\n\t\t\t\t\temojiName = \"<:\" + emoji.name + \":\" + str(emoji.id) + \">\"\n\t\t\t\t\tfor index in range(0, message.content.count(emojiName)):\n\t\t\t\t\t\tleaderboard[\"emojiLeaderboard\"][str(emoji.id)] += 1\n\n\n\t\t\tleaderboard[\"lastUpdate\"] = message.created_at.isoformat()\n\t\t\tawait self.update_state()", "def callback_message(self, message):\n return \"hi bro\"", "def _on_message(self, client, userdata, msg):\n # print 'receiving message'\n epoch_time = self._get_epoch_time()\n time_string = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime())\n if not self.file.closed:\n self.file.write(str(epoch_time) + ',' + time_string + \",\" + msg.topic + \",\" + str(msg.payload) + '\\n')", "async def new_post_message_listener(self, message: discord.Message) -> None:\n if not _channel.is_help_forum_post(message.channel):\n return\n\n await _message.notify_session_participants(message)\n\n if not message.author.bot and message.author.id != message.channel.owner_id:\n await _caches.posts_with_non_claimant_messages.set(message.channel.id, \"sentinel\")", "async def chat_message(self, event):\n await self.send_json(\n return_value(\n ACTION_MESSAGE,\n event['label'],\n event['username'],\n MSG_MESSAGE,\n event['message']\n )\n )", "def send_message(self, message):\n pass", "def on_message(client1, userdata, message):\n print(\"message received \" ,str(message.payload.decode(\"utf-8\")))", "def _send_message(self, e: Event):\n\n message = self.message_text.get(\"1.0\", 'end-1c').replace('\\n', \"\")\n\n if len(message) > 0:\n self.add_message_to_chat('you: ' + message)\n self._clear_message_text()\n self.connection_socket.send(bytes('them: ' + message, 'utf-8'))", "def send(self, event, message):\n pass", "def handle_messages(self):\n\n #Get the time at which the code started running\n current_time = datetime.datetime.now()\n\n #get all messages between now and the time where a message was last received\n messages = self.client.messages.list(\n date_sent_before = datetime.datetime.now()+ datetime.timedelta(hours = TIMEDIFFERENCE),\n date_sent_after = self.last_message_timing + datetime.timedelta(hours = TIMEDIFFERENCE)\n )\n\n #Iterate through all the new messages\n for record in messages:\n #If it is not from the Twilio Client\n if record.from_ != 'whatsapp:+14155238886':\n #Then update the timing of the last message to the current time\n self.last_message_timing = current_time\n #If the message sent is the '?' that seeks to get the number\n #of people in the queue\n if record.body == '?':\n #Get the data about people from firebase\n people_data = self.firebase.get_data('people_count')\n #Get the number of people queueing\n no_of_people = people_data['people_count']\n #Create a message from the API to tell the person\n #asking the number of people in the queue\n message = self.client.messages.create(\n body='The number of the people in the queue is {}'.format(no_of_people),\n from_='whatsapp:{sender_number}'.format(**self.config),\n to=record.from_\n )", "def post(self, request):\n # get message from request\n message = request.POST.get('message')\n # create message\n\n ChatMessage.objects.create(\n message=message, _from=request.user, _to=request.user.client.trainer.user)\n # return response\n return HttpResponse('success')", "def onMessage(self, msg):\n log.msg(str(msg))", "def callback_message( self, conn, mess):\n\n jid = mess.getFrom()\n props = mess.getProperties()\n text = mess.getBody()\n username = self.get_sender_username(mess)\n\n if username not in self.users.keys() + self.invited.keys():\n self.log.info(\"Ignored message from %s.\" % username)\n return\n\n self.log.debug(\"*** props = %s\" % props)\n self.log.debug(\"*** jid = %s\" % jid)\n self.log.debug(\"*** username = %s\" % username)\n self.log.debug(\"*** type = %s\" % type)\n self.log.debug(\"*** text = %s\" % text)\n\n # Ignore messages from before we joined\n if xmpp.NS_DELAY in props: return\n\n # If a message format is not supported (eg. encrypted), txt will be None\n if not text: return\n\n # Remember the last-talked-in thread for replies\n self._JabberBot__threads[jid] = mess.getThread()\n\n if ' ' in text:\n command, args = text.split(' ', 1)\n else:\n command, args = text, ''\n cmd = command\n self.log.debug(\"*** cmd = %s\" % cmd)\n\n # parse operators, commands, etc and if not, dump the message to the chat\n if self.apply_operator(mess, args):\n return\n\n if self.replace_text(username, mess):\n return\n\n if self.commands.has_key(cmd) and cmd != 'help':\n try:\n reply = self.commands[cmd](mess, args)\n except Exception, e:\n reply = traceback.format_exc(e)\n self.log.exception('An error happened while processing a message (\"%s\") from %s: %s\"' % (text, jid, reply))\n else:\n # In private chat, it's okay for the bot to always respond.\n # In group chat, the bot should silently ignore commands it\n # doesn't understand or aren't handled by unknown_command().\n default_reply = 'Unknown command: \"%s\". Type \"help\" for available commands.<b>blubb!</b>' % cmd\n if type == \"groupchat\": default_reply = None\n reply = self.unknown_command( mess, cmd, args)\n if reply is None:\n reply = default_reply\n\n if reply:\n self.send_simple_reply(mess,reply)\n\n self.log_to_mini_log(username, text)", "def receive_message(self, context, message):\r\n pass", "def receive_message(self):\r\n try:\r\n if self.is_connected:\r\n # Receive the messages.\r\n self.message_recv = self.server_connection.recv(1024)\r\n \r\n # Check if the message is not null.\r\n if self.message_recv != b\"\":\r\n\r\n # Decrypt the messages.\r\n self.message_recv = pickle.loads(self.message_recv)\r\n\r\n # Server request to update the online users list.\r\n if self.message_recv[0] == \"Update User\":\r\n self.updt_user = True\r\n self.data_user[\"Online_User\"] = self.message_recv[1]\r\n\r\n # Server request to exit the server.\r\n elif self.message_recv[0] == \"Exit Server\":\r\n self.new_msg = True\r\n self.message_recv[0] = [\"System\", \"Système\"]\r\n \r\n self.is_stopped = True\r\n self.is_connected = False\r\n\r\n else:\r\n self.new_msg = True\r\n\r\n # Avoid an error when shutting down the server.\r\n except ConnectionAbortedError as e:\r\n print(e)", "async def post(self):\n await self.handle_request(self.messages_new_api, 1)", "async def chat_message(self, event):\n\t\t# Send a message down to the client\n\t\tprint(\"DocumentChatConsumer: chat_message from user #\" + str(event))\n\t\ttimestamp = calculate_timestamp(timezone.now())\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"msg_type\": MSG_TYPE_MESSAGE,\n\t\t\t\t\"annotationId\": event['annotationId'],\n\t\t\t\t\"username\": event[\"username\"],\n\t\t\t\t\"user_id\": event[\"user_id\"],\n\t\t\t\t\"xfdfString\": event[\"message\"],\n\t\t\t\t\"natural_timestamp\": timestamp,\n\t\t\t},\n\t\t)", "def onMessageBegin(self, isBinary):", "def receive(self, msg):\n pass", "def handle_new_email(self):\n email = self.__email_handler.get_most_recent_email()\n message = self.__email_handler.get_email_body(email['id'])\n sender = self.__email_handler.get_email_sender(email['id'])\n\n if 'COMMAND' in message:\n\n if 'test' in message:\n print(f'{self.__source} Test Command Recieved')\n\n if 'new password' in message:\n print(f'{self.__source} New Password Command Recieved')\n # create new password\n # store new password\n # reset current password via screenscraper\n # notify users of change\n self.__email_handler.send(\n f'Password has been reset: {self.__password}')\n\n if 'get password' in message:\n print(f'{self.__source} Get Password Command Recieved')\n self.__email_handler.send('test', recipients=[sender])\n\n if 'add user' in message:\n print(f'{self.__source} Add User Command Recieved')\n self.__recipients.append(sender)\n # send email with message explaing email use\n\n if 'help' in message:\n print(f'{self.__source} Help Command Recieved')\n # send email with message explaing email use", "def post(self, request):\n # get message from request\n message = request.POST.get('message')\n # create message\n ChatMessage.objects.create(\n message=message, _from=request.user, _to=User.objects.get(pk=request.POST.get('user_id')))\n # return response\n return HttpResponse('success')", "def onMessageEnd(self):", "def on_message(ws, message):\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n message_dict = message_to_dict(message)\n print('[' + st + '] Event in channel: ' + message_dict['channel'] +\n '. Created by user: ' + message_dict['user'] + '. Event Type: ' +\n str(message_dict['type']) + '.')\n handle_response(message_dict)", "def message_received(self, message):\n \n # Routing\n if self.route_message(message) == True:\n return\n \n # Handlers?\n if len(self._handlers) > 0:\n for handler in self._handlers:\n handler(message)\n \n # Storage?\n else:\n timestamp = 0\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n self._messages.put(tuple([timestamp,message]))", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def after_send(self):", "def onMessage(self):\n \"\"\"\n Validates that the received message is from a student and then broadcasts the message to the rest of the class.\n\n @param self: self is the instance of this object.\n @param message: the message that is received\n @param student: the student that sent the message\n \"\"\"\n pass", "def on_your_host(self, connection, event):\r\n print(event)\r\n print(event.arguments)\r\n\r\n if(len(event.arguments) != 0):\r\n message = event.arguments[0]\r\n else:\r\n message = str(event.arguments)\r\n\r\n res = self.cursor.execute(\"\"\"SELECT * FROM `IRC_servers` WHERE `Registred_users_userID` = %s AND `serverID` = %s;\"\"\", (self.userID, connection.serverID))\r\n if res != 0:\r\n result = self.cursor.fetchall()\r\n serverID_res = int(result[0][0])\r\n print(\"serverID = {}\".format(serverID_res))\r\n\r\n if serverID_res == int(connection.serverID): # pokud se získané ID z databáze rovná tomu, které v sobě\r\n # uchovává connection, redundantní check, ale just4safety\r\n res = self.cursor.execute(\"\"\"INSERT INTO `IRC_other_messages` (IRC_servers_serverID,\r\n fromHostmask,\r\n messageBody,\r\n commandType,\r\n timeReceived)\r\n values (%s, %s, %s, %s, %s)\"\"\", (serverID_res, event.source, message, event.type.upper(),\r\n datetime.datetime.utcnow()))\r\n\r\n\r\n self.db.commit()", "def on_welcome(self, raw_msg, server, port, nickname, **kwargs):", "async def handleMessage(self, message: discord.Message):\n # Ignore on DMs.\n if not isinstance(message.channel, discord.TextChannel):\n return\n\n # ignore bot messages\n if message.author.bot:\n return\n\n await self.saveMessageTimestamp(message, datetime.now().timestamp())", "def add_new_message():\n print \"test\"\n\n msg = request.args.get('text')\n # req_data= request.get_json()\n # text = req_data['text']\n print \"msg data:\", msg\n print \"user id: \", g.user\n if 'user_id' == \"\":\n abort(status.HTTP_401_UNAUTHORIZED)\n\n if msg:\n print msg\n # db = get_db()\n # db.execute('''insert into message (author_id, text, pub_date)\n # values (?, ?, ?)''', (g.user[0], msg,\n # int(time.time())))\n # db.commit()\n data = mongo.db.message.insert(\n {'author_id': g.user[0], 'username': g.user[1], 'email': g.user[2], 'text': msg, 'pub_date': int(time.time())})\n print \"API UPDATE DATA\", data\n flash('Your message was recorded')\n # username = g.user\n # data=[]\n username = g.user[1]\n print \"username\", username\n # data.append({'username':row[2]})\n # print data\n\n Jobj = {'user_id': g.user[0], 'username': username, 'msg': msg}\n ############### REDIS cache invalidate #####################\n R_SERVER.delete(user_timeline_key)\n return jsonify(messages=Jobj, Status_code=status.HTTP_200_OK)", "async def message(self, ctx, *, message:str):\r\n serverid = ctx.message.server.id\r\n self.adkillr[serverid]['message'] = message\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)\r\n await self.bot.say(\"Message set!\")", "def listen(client, main):\n\n @client.event\n async def on_message_edit(old, message):\n main.message_handler(message, True)", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def handle_message(self, msg):\n self.messages.append({\n 'type': msg.category,\n 'module': msg.module,\n 'obj': msg.obj,\n 'line': msg.line,\n 'column': msg.column,\n 'path': msg.path,\n 'symbol': msg.symbol,\n 'message': msg.msg,\n 'message-id': msg.msg_id,\n })", "def receive_message():\n\n msg_data = flask.request.get_json(silent=True, force=True)\n\n name = msg_data['name']\n picture_url = msg_data['picture_url']\n text = msg_data['text']\n time_sent = msg_data['time_sent']\n settings_key = ndb.Key(settings.Settings, msg_data['settings'])\n\n logging.info(\"Recording entry {name: %s, text: %s, time_sent: %d}\",\n name, text, time_sent)\n\n new_sm = stored_message.StoredMessage(name=name,\n picture_url=picture_url,\n response_triggered=False,\n text=text,\n time_sent=datetime.fromtimestamp(\n time_sent),\n settings=settings_key)\n new_sm.put()\n\n return SUCCESS", "def on_msg(self, callback):\n self._msg_callback = callback", "def on_message(self, userdata, message):\n logging.debug(f\"Message arrived from {message.topic}\")\n self.process(userdata, message)", "def on_message(self, json_state):\n global receiving_message\n receiving_message = True\n global current_state\n current_state = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(json_state)\n global n_messages\n\n if not n_messages: #first message ever\n new_state = self.initialize_state(current_state)\n else:\n new_state = self.on_state_change(current_state)\n\n n_messages += 1\n if new_state: #if you return a new state send it back\n receiving_message = False", "def send(self, msg):\n self.message('Me', msg)", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "async def send(self, message):", "def send_message():\n incoming = request.get_json()\n message = Message(\n user_id = session['user_id'],\n room_id = incoming[\"room_id\"],\n sendTime = datetime.now(),\n content = incoming[\"content\"]\n )\n db.session.add(message)\n db.session.commit()\n return jsonify(\n content = incoming[\"content\"]\n )", "def update(self, msg):\n pass" ]
[ "0.752636", "0.74791414", "0.7469889", "0.73891926", "0.7261544", "0.71904874", "0.7182423", "0.7182423", "0.7182423", "0.7179466", "0.71528774", "0.714717", "0.7144339", "0.71373135", "0.7136156", "0.71050507", "0.71050507", "0.70408386", "0.7027871", "0.7018298", "0.6988954", "0.696009", "0.6944155", "0.6941223", "0.6918332", "0.6910909", "0.69107234", "0.6901692", "0.6901668", "0.69013333", "0.6892886", "0.6866676", "0.68214476", "0.6785759", "0.6768321", "0.6766212", "0.6757133", "0.6747367", "0.67456156", "0.67456156", "0.67242473", "0.67031044", "0.6694881", "0.668035", "0.6679666", "0.66737115", "0.6670358", "0.66654927", "0.66614586", "0.66558087", "0.66549367", "0.6643572", "0.6622169", "0.6612386", "0.6610201", "0.65994734", "0.65989405", "0.659445", "0.6583968", "0.6583359", "0.65533507", "0.6553098", "0.6552194", "0.6551769", "0.65444434", "0.65405387", "0.65312696", "0.6530423", "0.65135103", "0.65063626", "0.65032536", "0.6502945", "0.6499595", "0.6487254", "0.6484484", "0.6474435", "0.64723116", "0.645374", "0.6446918", "0.64462703", "0.64457816", "0.64435714", "0.64418834", "0.6435465", "0.64306813", "0.64228964", "0.64218485", "0.6415187", "0.64103734", "0.64103734", "0.64103734", "0.64096105", "0.6409289", "0.64063704", "0.6401724", "0.6400061", "0.63991916", "0.6396102", "0.6379097", "0.6377678", "0.6371959" ]
0.0
-1
broadcast a new user joining the group
def user_joined_group(cls, group, user): text = "{} joined the group chat".format(user.username) cls._broadcast_group(group, None, group, text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify_new_user(self, user):\n # join to default group\n g = self.root.get('community-general')\n if g:\n self.join_group(user, g)", "def join_server(self, data, user):\n # User will spawn in one of following rooms\n user.room = choice((\"100\", \"300\", \"800\", \"804\"))\n user.send([\"js\", \"-1\", \"1\", \"1\", \"0\", \"0\"])\n self.add(user)", "def on_joinuser(self, data):\n user_data = {\n 'un': data[3], # nick\n 'ml': data[4], # mod level\n 'st': data[5], # status related\n 'id': data[6], # ezcapechat user id\n 'su': data[7] # ?\n }\n if data[3] == self.users.client.nick:\n self.users.add_client_data(user_data)\n else:\n _user = self.users.add(data[3], user_data)\n print ('%s Joined the room.' % _user.nick)\n\n #BOT\n if (_user.nick.lower() in self.autogreet):\n self.send_public(\"%s, %s\" % (_user.nick, self.autogreet[_user.nick.lower()]))", "def on_join(data):\r\n\r\n username = data[\"username\"]\r\n room = data[\"room\"]\r\n join_room(room)\r\n\r\n # Broadcast that new user has joined\r\n send({\"msg\": username + \" has joined the \" + room + \" room.\"}, room=room)", "def _broadcast_group(cls, sender, sender_sid, group, text):\n # todo make this method async\n for recipient in group.get_users():\n if recipient == sender:\n continue\n cls._broadcast_user(sender, sender_sid, recipient, text, group.id)", "def join(self):\n channel = self.data[0]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(SOCKET_TO_USERID.get(self.source, None))\n\n if user_pseudonym and self.target:\n target_server = self.target[1]\n if(BANHANDLER.is_banned_from_channel(user_pseudonym, target_server, channel)):\n self.source[0].send(\":orcbot!~@localhost PRIVMSG \"+SOCKET_TO_USERID[self.source]+\" :You're banned from \"+channel+\"\\r\\n\")\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].sendall(self.message)\n self.send()", "def userJoined(self, user, channel):\n self.dispatch('population', 'userJoined', user, channel)", "def user_left_group(cls, group, user):\n text = \"{} left the group chat\".format(user.username)\n cls._broadcast_group(group, None, group, text)", "def join_player(self, data, user):\n self.remove(user)\n\n user.room = \"100\"\n user.x = \"0\"\n user.y = \"0\"\n user.frame = \"0\"\n\n self.add(user)", "def AddMemberToGroup(group_id,user_id):\r\n Group.AddMemberToGroup(group_id,user_id)", "def action_add_to_group(self, kwargs):\n user = kwargs[\"user\"]\n group = kwargs[\"group\"]\n\n if self.engine.add_user_to_group(user, group):\n info(f\"User {user} sucessfully added to {group}\")\n else:\n error(f\"Unable to add {user} to {group}, check privileges or dn\")", "def join(self, gid, group_fields=None):\n kwargs = {}\n if group_fields:\n kwargs['data'] = group_fields\n r = self.put(\"/user/groups/{gid:d}\".format(gid=gid), **kwargs )\n if r.status_code == 204:\n return { \"status\" : True, \"message\" : \"\"}\n return { \"status\" : False, \"message\" : r.json() }", "def broadcast_to_users(self, text: str, sending_group):\n if sending_group == \"global\":\n for user in self.__users.values():\n user.send_message(f\"broadcast from the server: {text}\")\n print(\"in broadcast to users global\")\n elif sending_group.isdigit():\n sending_group = int(sending_group)\n for user in self.__users.values():\n for station in user.stations:\n if station.line_number == sending_group:\n user.send_message(f\"broadcast from the server: {text}\")\n print(f\"in broadcast to users line{sending_group}\")", "def __send_broadcast_to_users(self, sending_group=\"global\"):\n\n if sending_group == \"global\":\n data = self.__global_broadcast_entry.get()\n self.__global_broadcast_entry.delete(0, 'end')\n print(f\"broad casting data: {data}\")\n self.__telegram_controller.broadcast_to_users(data, sending_group = \"global\")\n\n elif sending_group == \"line\":\n line = self.__line_number_broadcast_entry.get()\n if len(line) >0 and line.isnumeric():\n data = self.__line_text_broadcast_entry.get()\n self.__line_text_broadcast_entry.delete(0, 'end')\n self.__line_number_broadcast_entry.delete(0, 'end')\n self.__telegram_controller.broadcast_to_users(data, sending_group=line)\n else:\n print(f\"line number must be a number, {line}\")\n else:\n print(f\"{sending_group} is an invalid sending group\")", "def join_group(self, user, group, force=0):\n if not force and not group.can_join(user):\n raise NotEnoughPrivileges\n \n group.add_member(user)\n user.add_to_group(get_usergroup_database().get_usergroup(group.get_user_id()))\n if hasattr(user, 'karma_activity_credit'):\n # groups can join groups, and groups don't have karma_activity_credit\n user.karma_activity_credit()\n \n self._flush_user_data_caches(user)", "def userJoined(self, user, channel):\n # Send messasge to Server bot.\n self.data_in(text=\"\", type=\"joined\", user=\"server\", channel=channel,\n nicklist=[user])", "def add_user_to_group(user, group):\n Command.run(['usermod', '-a', '-G', user, group])", "def add_new_member(self, event):\n body = event['body']\n body = json.loads(body)\n\n required_fields = ['group_id', 'new_user_id']\n for f in required_fields:\n if f not in body:\n return get_bad_request('POST body missing field {}'.format(f))\n\n group_id = body['group_id']\n new_user_id = body['new_user_id']\n \n user = self.mealShareUsers.get_user_cognito_data(event)\n current_user = user['user_id']\n \n # Requesting user must already be a member\n if not self.mealShareGroups.is_user_in_group(current_user, str(group_id)):\n return {\n 'statusCode': 401,\n 'statusMessage': 'User {} is not a member of the group ID {} and can not add a person to it'.format(current_user, group_id),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }\n \n # Check if adding was successful\n success = self.mealShareGroups.add_user_to_group(new_user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully added {} to group {}'.format(new_user_id, group_id),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to add user {} to group {} by {}'.format(new_user_id, group_id, current_user),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }", "def userJoined(self, user, channel):\n ss = self.findSessions(channel)[0]\n user = user.decode(ss.encoding)\n r = ss.addNick(user)\n self.sendResponse(r)", "def add_member(self, user):\n user_in = user.get_groups()\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n print('user is already a member')\n return False\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "async def react_join(a: Message):\n if a.action.member_id == club_id:\n await a.answer(r_register_help)\n stats.jincr()", "def new_user(self, socket, name):\r\n for i in self.matches.values(): # Si hay creadas se buscan disponibles\r\n if i.available_to_join:\r\n i.add_new_client(socket, name)\r\n return\r\n # Si no se encontraron disponibles entonces se crea una\r\n new_match = Match(self.send_function)\r\n self.matches[new_match.match_id] = new_match\r\n new_match.add_new_client(socket, name)", "def joined(message):\n\tglobal GLOBAL_NUM_USERS\n\tGLOBAL_NUM_USERS = GLOBAL_NUM_USERS + 1\n\tprint(message)\n\tsession['name'] = message['name']\n\tsession['room'] = message['room']\n\troom = session.get('room')\n\tjoin_room(room)\n\tprint('%s : joined' % session)\n\temit('_joined', {'user_name': session.get('name'), 'num_users' : GLOBAL_NUM_USERS}, room=room)", "def join_room(self, data, user):\n # Filters out | to prevent string injection\n data[\"args\"] = [i.replace(\"|\", \"\") for i in data[\"args\"]]\n\n self.remove(user)\n\n user.room = data[\"args\"][1]\n user.x = data[\"args\"][2]\n user.y = data[\"args\"][3]\n user.frame = \"0\"\n\n self.add(user)", "async def join(self, gid):\n\t\tif self.group != None:\n\t\t\tif self.group.gid == gid:\n\t\t\t\traise exceptions.ClientError('IN_GROUP')\n\n\t\tif gid and not utilities.validate_string(gid):\n\t\t\traise exceptions.ClientError('INVALID_STRING')\n\n\t\tif gid:\n\t\t\tgroup = Group.register(gid)\n\t\telse:\n\t\t\ttries = 0\n\t\t\twhile 1:\n\t\t\t\tif tries >= 5:\n\t\t\t\t\traise exceptions.ClientError('INVALID_GROUP')\n\t\t\t\tgid = utilities.random_string(16)\n\t\t\t\tgroup = Group.register(gid)\n\t\t\t\tif len(group.members) == 0:\n\t\t\t\t\tbreak\n\t\t\t\ttries += 1\n\n\t\tif group.in_game:\n\t\t\traise exceptions.ClientError('IN_GAME')\n\n\t\tawait group.add(self)", "def add_member_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n user_id = str(args.get('user_id'))\n required_properties = {\n \"@odata.id\": f'https://graph.microsoft.com/v1.0/users/{user_id}'}\n client.add_member(group_id, required_properties)\n\n human_readable = f'User {user_id} was added to the Group {group_id} successfully.'\n return human_readable, NO_OUTPUTS, NO_OUTPUTS", "def joinedChannel(self, channel, users):\n pass", "def add_user_to_group(self, login, group):\n return self.request('put',\n '/groups/{}/users/{}'.format(group, login),\n msg='adding user {} to group {}'.format(login, group)\n )", "def add_to_group(user: User, group: Group) -> Result:\n if user.pw_name in group.gr_mem:\n return Result(State.unchanged)\n command([\"/usr/sbin/addgroup\", user.pw_name, group.gr_name])\n group.gr_mem.append(user.pw_name)\n return Result(State.success)", "def invite(self,roomName,user):\n\n self.sendCommand(roomName +\" /invite\",user)", "def add_user_to_group(backend, details, response, user, is_new=False, *args, **kwargs):\n \n if is_new:\n google_apps_add_group_task.apply_async([GAPPS_GROUP_NAME, user.email])", "def whisper(self,name):\n\n self.sendCommand(\"global /join\",name+self.userName+\" private\")\n self.master.after(300,self.sendCommand,name+self.userName+\" /invite\",name)", "async def join_room(self, room_id):\n print(\"PublicChatConsumer\", \"join_room\", self.scope[\"user\"])\n if self.scope[\"user\"].is_authenticated:\n try:\n room: PublicChatRoom = await get_room_or_error(room_id)\n except ClientError as e:\n await self.handle_client_error(e)\n else:\n # Add user to the room\n await connect_user(room, self.scope[\"user\"])\n\n # Set the room_id with the current room\n self.room_id = room_id\n\n # Add user to the group\n await self.channel_layer.group_add(\n room.group_name,\n self.channel_name\n )\n\n # Send acknowledgement to client\n await self.send_json({\n \"join\": str(room_id),\n \"username\": self.scope[\"user\"].username\n })\n\n # Send the total number of connected users to client\n connected_users_count = await get_connected_users_count(room)\n await self.channel_layer.group_send(\n room.group_name,\n {\n \"type\": \"connected.users.count\",\n \"connected_users_count\": connected_users_count\n }\n )", "def on_join(data):\n print(str(data))\n if models.Leaderboard.query.filter_by(\n username=data['user']).first() is None:\n add_user(data['user'])\n users, scores = calculate_scores()\n socketio.emit('leaderboard_info', {'users': users, 'scores': scores})", "def lobbyUserJoin(self, __userID):\n\n\t\t# Make sure the user is not already in mp lobby\n\t\tif (__userID not in self.usersInLobby):\n\t\t\t# We don't need to join #lobby, client will automatically send a packet for it\n\t\t\tself.usersInLobby.append(__userID)", "async def join(self, ctx, invite : discord.Invite):\r\n if ctx.message.author.id == \"481270883701358602\":\r\n await self.client.accept_invite(invite)\r\n await self.client.say(\"Joined the server.\")\r\n else:\r\n await self.client.say(\"**Owner only command.**\")", "def subscribe(self, group, user, reason=GroupSubscriptionReason.unknown):\n try:\n with transaction.atomic():\n self.create(\n user=user,\n group=group,\n project=group.project,\n is_active=True,\n reason=reason,\n )\n except IntegrityError:\n pass", "def on_join(data):\n quiz = store.get_quiz_by_user_id(data['user_id'])\n room = quiz.quiz_id\n\n # get and clean the users (no score)\n users = store.get_users_by_id(store.get_quiz_by_id(room).users)\n users_cleaned = [user.name for user in users]\n\n # emit the new users the to the room\n if room is not None:\n join_room(room)\n emit(\"current_players\", {\"users\": users_cleaned}, room=room)", "def on_join(data):\n username = request.sid\n room = data\n join_room(room)\n logging.info(username + ' has entered the room.')\n send(username + ' has entered the room.', room=room)", "def add(self, user):\n int_id = user.get_int_id(self.rooms)\n self.rooms[user.room][\"users\"].append(user)\n\n # Games\n if self.rooms[user.room][\"isGame\"] == \"true\":\n user.send([\"jg\", int_id, user.room])\n # Rooms\n else:\n user.send([\"jr\", int_id, user.room, self.get_strings(user.room)])\n self.packet.send_room([\"ap\", int_id, user.get_string()], user.room)", "async def on_member_join(member):\r\n pass", "def join_farm(self, request, pk):\n farm = self.get_object()\n user = request.user\n farm.add_member(user)\n return Response({}, status=status.HTTP_202_ACCEPTED)", "def handle(self, user):\n\n if not self.group_users:\n return\n for group_name, users in self.group_users.items():\n if user.username in users:\n group = self.get_group_from_db(group_name)\n user.groups.add(group)\n logger.info('Added {} to {}'.format(user.username, group_name))", "async def on_member_join(member):\n if boterate.has_member(member):\n boterate.update_member(member)\n else:\n boterate.insert_user(member)", "def join_room(self, client, room):\n if room.verify_if_is_invited(client):\n room.add_member(client)\n self.send_message('Te has unido a la sala {}'.format(room.get_name()), client)\n else:\n self.send_message('No estas invitado a la sala.', client)", "def notify(cls, user_id, message):\n # Find the subscription group for user.\n group = None if user_id is None else f\"user_{user_id}\"\n cls.broadcast(group=group, payload=message)", "def _broadcast_message_to_users(self, message):\n self.logger.info(f\"Broadcasting message `{message}`\")\n for id, name in self.users.items():\n time.sleep(.1) # Telegram servers does not let you send more than 30 messages per second\n try:\n self.updater.bot.sendMessage(int(id), message)\n\n except BaseException as e:\n traceback.print_exc()\n self.logger.info(f'Failed to broadcast message to {name} due to {e}')", "async def user_joined_button(self, payload: discord.RawReactionActionEvent) -> None:\n\n self.bits = flip_action_bits(LoggingActions.USER_JOINED, self.bits)\n await self.update_embed()", "def join_group(self, group: InterphoneGroup) -> None:\n self._group = group\n group.register(self)", "def handle_groupchat_invite(self, inv):\n logging.debug(\"MUC invite to %s from %s: %s\", inv['to'], inv[\"from\"], inv)\n if inv['from'] not in self.rooms.keys():\n self.xmpp.event(\"groupchat_invite\", inv)", "def broadcast(update: Update, context: CallbackContext) -> None:\n \n if str(update.message.chat_id) == str(ADMIN_CONVERSATION_ID):\n update_string = update.message.text[11:]\n logger.info(\"Admin did a broadcast of \" + str(update_string))\n users_list = users_table.all()\n for user in users_list:\n if user['subscribed'] == \"True\":\n try:\n context.bot.send_message(user['user'], parse_mode='HTML', text=update_string)\n logger.info(\"Broadcasted message to user \" + str(user['user']))\n except:\n e = sys.exc_info()[0]\n logger.info(str(e))\n logger.info(\"Got an exception sending message to \" + str(user['user']))", "def joined(self, channel):\n log.msg(\"[I have joined %s]\" % channel)\n self.msg(channel, \"user1: bonbon\")", "def irc_JOIN(self, prefix, params):\n user = re.match(self.user_regex, prefix)\n channel = params[0]\n\n self.logger.debug(\n \"%s!%s@%s joined %s\" %\n (user.group(1), user.group(2), user.group(3), channel)\n )\n\n self.event_manager.fire(\"irc.join\", user, channel)", "def start(self, update, context):\n # add or update to the sqlite table.\n chat = update.message.chat\n user_tuple = self.db_manager.create_user_tuple(chat.id)\n self.db_manager.add_new_user(user_tuple)\n self.logger.info(\n 'A new user with username: %s and chat_id: %s subscribed to the list.' % (chat.username, chat.id)\n )\n update.message.reply_text('Welcome! You have successfully subscribed to real time notification for a six '\n 'scored in IPL 2019 cricket match.')", "def broadcast(self, writer, message):\r\n for user in self.connection_pool:\r\n if user != writer:\r\n # We don't need to also broadcast to the user sending the message\r\n user.write(f\"{message}\\n\".encode())", "def broadcast(msg):\r\n for user in clients:\r\n msg_client(msg, user)", "async def group(ctx, *, new_group=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n # Can't be group-less\n if new_group is None:\n new_group = random.choice(changeable_groups)\n new_group = new_group.lower()\n author = ctx.message.author\n member_roles = author.roles\n server_roles = ctx.message.server.roles\n\n member_allowed = discord.utils.find(lambda r: r.name.lower() == required_role, member_roles)\n\n if not member_allowed:\n need_citizen = \"You must be a member of the {0} role to join a color group\"\n await amor_manager.say(need_citizen.format(required_role.title()))\n return\n\n if new_group in changeable_groups:\n # Remove the old group the user was in\n new_roles = [r for r in member_roles if not r.name.lower() in changeable_groups]\n # Get the proper object for the user's new group\n role = discord.utils.find(lambda r: r.name.lower() == new_group, server_roles)\n if role is not None:\n new_roles.append(role)\n await(amor_manager.replace_roles(author, *new_roles))\n await amor_manager.say('{0} moved to group {1}'.format(author.name, new_group))\n else:\n suggest = random.choice(changeable_groups)\n cant_join = \"`{0}` is not a color group you're allowed to join. Why not try `{1}`\"\n await amor_manager.say(cant_join.format(new_group, suggest))", "def joined(message):\n #room = session.get('room')\n room='abc'\n join_room(room)\n #emit('status', {'msg': session.get('name') + ' has entered the room.' + message['msg']}, room=room)\n emit('status', {'msg': 'Yao has entered the room.'}, room=room)\n #emit('status', {'msg': 'Yao has entered the room.'}, room='room1')", "def invite(self, mess, args):\n user = self.get_sender_username(mess)\n if user in self.users:\n self.send(args, '%s invited you to join %s. Say \",help\" to see how to join.' % (user, CHANNEL))\n self.invited['%s@%s' %(xmpp.JID(args).getNode(), xmpp.JID(args).getDomain())] = ''\n self.log.info( '%s invited %s.' % (user, args))\n self.save_state()\n self.message_queue.append('_%s invited %s_' % (self.users[user], args))", "def listen_channel_moderator_add(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:\n return self._subscribe('channel.moderator.add',\n '1',\n {'broadcaster_user_id': broadcaster_user_id},\n callback)", "def flash_broadcast(self,params):\n text = params['text']\n if self.participant:\n self.service.sendParticipants(self.name,'msg',{\"text\":text,\"sender\":self.name})\n else:\n self.notLoggedIn()", "def add_to_download_group(self, user):\r\n user.groups.add(self.dl_grp)", "def irc_INVITE(self, prefix, (user, channel)):\n self.join(channel)", "def on_me_joined(self, raw_msg, **kwargs):", "def userJoin(self, __userID):\n\n\t\tif (__userID not in self.connectedUsers):\n\t\t\tself.connectedUsers.append(__userID)", "def join(self, message, db_session):\n username = self.ts.get_user(message)\n user = db_session.query(db.User).filter(db.User.name == username).one_or_none()\n if not user:\n user = db.User(name=username)\n db_session.add(user)\n try:\n self.player_queue.push(username, user.times_played)\n self._add_to_whisper_queue(username, \"You've joined the queue.\")\n user.times_played += 1\n except RuntimeError:\n self._add_to_whisper_queue(username, \"You're already in the queue and can't join again.\")\n\n # queue_snapshot = copy.deepcopy(self.player_queue.queue)\n # self.command_queue.appendleft(('_insert_into_player_queue_spreadsheet',\n # {'username': username, 'times_played':user.times_played, 'player_queue': queue_snapshot}))", "def joined(self, channel):\n log.info(\"Joined %s.\", channel)\n # ask for the current list of users in the channel\n self.dispatch('presence', 'joined', channel)", "def test_new_user_subscriptions(self):\n r = self.app.get('/admin/groups/')\n dev_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[2]\n developer_id = dev_holder['data-group']\n with audits('add user test-user to Developer'):\n self.app.post('/admin/groups/add_user', params={\n 'role_id': developer_id,\n 'username': 'test-user'})\n p_nbhd = M.Neighborhood.query.get(name='Projects')\n p = M.Project.query.get(shortname='test', neighborhood_id=p_nbhd._id)\n uid = M.User.by_username('test-user')._id\n for ac in p.app_configs:\n sub = M.Mailbox.subscribed(\n user_id=uid, project_id=p._id, app_config_id=ac._id)\n assert not sub, 'New user subscribed to app %s' % ac", "def new_user(cls, user):\r\n pass", "def add_to_groups(self, username, groups):\n pass", "def register(self, user):\n if not self.get():\n user_node = user.get() # transform user object to user node object\n usergroup_node = Node(\"Usergroup\",\n groupname=self.groupname,\n id=uuid4().hex)\n graph.create(usergroup_node)\n ownership = Relationship(user_node, 'owns', usergroup_node)\n membership = Relationship(user_node, 'in', usergroup_node)\n graph.create(ownership)\n graph.create(membership)\n self.usergroup_node = usergroup_node\n self.id = usergroup_node['id']\n return usergroup_node\n return self", "def new_user(cls, user):\n pass", "def connect(self):\n try:\n if not self.scope['user'].is_authenticated:\n logger.error('User in not authenticated')\n self.close()\n\n user = Profile.objects.get_authenticated(self.scope['user'])\n group_name = user.group_name\n\n async_to_sync(self.channel_layer.group_add)(\n group_name,\n self.channel_name,\n )\n\n self.accept()\n except Exception as e:\n logger.error(e)\n self.close()", "def joined(message):\n room = session.get('room')\n id = message[\"id\"]\n date = message[\"date\"]\n mid = session.get('mid')\n join_room(room)\n\n\n \"\"\"Load chat history and broadcast to the user that just joined\"\"\"\n #load history\n #save to mongo\n client = MongoClient(\"mongodb+srv://save_info:[email protected]/chat?retryWrites=true&w=majority\")\n\n #sort by descending order of creation\n\n #get chatroom info\n\n db = client.get_database(\"chat_rooms\")\n chat = db[\"chat_rooms\"]\n query = {\"roomId\": room, \"mid\" : {\"$ne\": int(mid)}}\n chat_rooms = db[\"chat_rooms\"]\n chat_room = list(chat_rooms.find(query, {'_id': False}).limit(1))[0]\n partner_name = chat_room[\"name\"]\n partner_mid = chat_room[\"mid\"]\n\n query = {\"roomId\": room, \"mid\" : int(mid)}\n chat_room = list(chat_rooms.find(query, {'_id': False}).limit(1))[0]\n chat_link = chat_room[\"chat_link\"]\n\n emit('details', {'roomId': room, \"name\": session.get(\"name\"), \"mid\": int(mid), \"chat_link\":chat_link, \"partner_name\": partner_name, \"partner_mid\": partner_mid}, room=id)\n\n db = client.get_database(\"chat\")\n chat = db[\"chat\"]\n query = {\"roomId\": room}\n\n chatHistory = list(chat.find(query, {'_id': False}).limit(50))\n if len(chatHistory) > 0:\n #load chat history\n\n emit('chatHistory', {'chatHistory': chatHistory}, room=id)\n\n\n #delete all scheduled notifications to this user in this chatroom\n scheduler = db[\"scheduler\"]\n query = {\"receiver\": int(mid)}\n scheduler.delete_many(query)", "def add_user_to_group(self,username,groupname):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_add_user_to_group_query,{'username':username,'groupname':groupname,'username_field':self.sql_username_field,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: add_user_to_group: %s\" % (query,))\n\n cursor.execute(query)\n if cursor.rowcount > 0:\n db.commit()\n return True\n return False", "def new_user():\n pass", "def joined(self, channel):\n # Return user list to Server bot.\n self.get_nicklist()", "def invite_user(session, invitee):\n session.invite_event.clear()\n key = b64encode(messaging.common.pkc_encrypt(\n session.get_channel_key(), session.get_encryption_cert(invitee))).decode()\n msg = {\n kk.typ: kk.add_user,\n kk.inviter: session.user,\n kk.invitee: invitee,\n kk.chid: session.chan,\n kk.chkey: key,\n }\n msg[kk.signature] = b64encode(\n messaging.common.create_msg_sig(session, msg)).decode()\n messaging.common.send_msg(session.sock, msg, key=session.symkey)", "def joined(message):\n room = session.get('room')\n join_room(room)\n emit('status', {'msg': session.get('name') + ' has entered the room.'}, room=room)", "def joined(message):\n room = session.get('room')\n join_room(room)\n emit('status', {'msg': session.get('name') + ' has entered the room.'}, room=room)", "def add_to_group(_request, group_id, email):\n group = models.UserGroup.get_by_id(int(group_id))\n user_key = models.UserProfile.load(email).key()\n if group.users is None:\n group.users = []\n logging.warning('Group \"%s\" had a None users list', group.name)\n group.users.append(user_key)\n group.put()\n\n url = urlresolvers.reverse('views.admin.edit_user', args=[email])\n return http.HttpResponseRedirect(url)", "def joinMUC(self, room, nick, maxhistory=\"0\", password='', wait=False, pstatus=None, pshow=None, pfrom=None):\n stanza = self.xmpp.makePresence(pto=\"%s/%s\" % (room, nick), pstatus=pstatus, pshow=pshow, pfrom=pfrom)\n x = ET.Element('{http://jabber.org/protocol/muc}x')\n if password:\n passelement = ET.Element('{http://jabber.org/protocol/muc}password')\n passelement.text = password\n x.append(passelement)\n if maxhistory:\n history = ET.Element('{http://jabber.org/protocol/muc}history')\n if maxhistory == \"0\":\n history.attrib['maxchars'] = maxhistory\n else:\n history.attrib['maxstanzas'] = maxhistory\n x.append(history)\n stanza.append(x)\n if not wait:\n self.xmpp.send(stanza)\n else:\n #wait for our own room presence back\n expect = ET.Element(\"{%s}presence\" % self.xmpp.default_ns, {'from':\"%s/%s\" % (room, nick)})\n self.xmpp.send(stanza, expect)\n self.rooms[room] = {}\n self.ourNicks[room] = nick", "def broadcastUserRatingAvg(sContext, uRRDDTrain):\n userRatingAvgList = uRRDDTrain.map(lambda x: calcUserMeanRating(x)).collect()\n userRatingAvgDict = {}\n for (user, avgscore) in userRatingAvgList:\n userRatingAvgDict[user] = avgscore\n uRatingAvgBC = sContext.broadcast(userRatingAvgDict)# broadcast\n return uRatingAvgBC", "def start(self, update, context):\n\n telegram_user = update.message.from_user\n\n # Add new User if not exist\n if not self.db.get_user(telegram_id=telegram_user.id):\n message = (\n \"Hello! I don't think we've met before! I am an RSS News Bot and would like to help you to \"\n \"receive your favourite news in the future! Let me first set up a few things before we start... \"\n )\n update.message.reply_text(message)\n\n self.db.add_user(\n telegram_id=telegram_user.id,\n username=telegram_user.username,\n firstname=telegram_user.first_name,\n lastname=telegram_user.last_name,\n language_code=telegram_user.language_code,\n is_bot=telegram_user.is_bot,\n is_active=1,\n )\n self.db.update_user(telegram_id=telegram_user.id, is_active=1)\n message = \"You will now receive news! Use /help if you need some tips!\"\n update.message.reply_text(message)", "def addme(update: 'Update', context: 'CallbackContext'):\n user_id = update.effective_user.id\n chat_id = update.effective_chat.id\n chats = get_chat_ids(DB)\n\n if chat_id not in chats:\n update.message.reply_text('Did not work. Run this command inside the Ko-Lab group.')\n else:\n if add_member_id(DB, user_id): \n update.message.reply_text('I have added you to the whitelist. You can now send commands from outside the Ko-Lab chat.')\n else:\n update.message.reply_text('You are already on the whitelist.')", "def test__GuildJoinRequest__user():\n user_id = 202305170040\n user = User.precreate(user_id, name = 'Koishi')\n \n event = GuildJoinRequest(\n user = user,\n )\n \n output = event.user_id\n vampytest.assert_instance(output, int)\n vampytest.assert_eq(output, user_id)", "def _subscribe_users(self):\n group = self.env.ref('odoo_magento2_ept.group_connector_manager')\n if not group:\n return\n companies = self.mapped('company_id')\n domain = [('groups_id', '=', group.id)]\n if companies:\n domain.append(('company_id', 'child_of', companies.ids))\n users = self.env['res.users'].search(domain)\n self.message_subscribe_users(user_ids=users.ids)", "def test_join_after_invite(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n self.helper.invite(r1, u1, u2, tok=u1token)\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 0,\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], +1\n )\n self.assertEqual(\n r1stats_post[\"invited_members\"] - r1stats_ante[\"invited_members\"], -1\n )", "def joined(message):\n room = session.get('room')\n join_room(room)\n emit('status', {'msg': session.get('name') + ' joined'}, room=room)", "def join(user, tag, stop_time):\n def work():\n member = Member.get(user)\n member.remove_tag(tag)\n member.tags.append(tag)\n member.stop_times.append(stop_time)\n member.put()\n db.run_in_transaction(work)", "def addUserToGroup(self, user, group):\n return self.pm_getUserManager().addUserToGroup(self._unbox(user), self._unbox(group))", "def invite(self, room, jid, reason='', mfrom=''):\n msg = self.xmpp.makeMessage(room)\n msg['from'] = mfrom\n x = ET.Element('{http://jabber.org/protocol/muc#user}x')\n invite = ET.Element('{http://jabber.org/protocol/muc#user}invite', {'to': jid})\n if reason:\n rxml = ET.Element('{http://jabber.org/protocol/muc#user}reason')\n rxml.text = reason\n invite.append(rxml)\n x.append(invite)\n msg.append(x)\n self.xmpp.send(msg)", "def handle_join_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n words = lobby_command.split()\n roomname = words[1]\n print(f\"Handling join room {roomname} for {user}\")\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Requested roomname found..\")\n if user in _room.room_attrbts['members']:\n msg = f\"Client {user} is already a member of room {_room.name}\"\n self.log_and_send(client_socket, msg)\n return\n else:\n _room.room_attrbts['members'].add(user)\n msg = f\"{user} successfully joined membership of room {roomname}\"\n self.log_and_send(client_socket, msg)\n return\n msg = f'Client {user} passed invalid room. Could not join room {roomname}'\n self.log_and_send(client_socket, msg)\n return", "def _broadcast_user(cls, sender, sender_sid, recipient, text, chat_id=None):\n # todo make this method async\n recipient_sid = cls.get_user_sid(recipient.id)\n if not recipient_sid:\n cls._cache_msg(sender.id, recipient.id, text, chat_id)\n return\n data = {'sender_id': sender.id, 'recipient_id': recipient.id,\n 'text': text, 'chat_id': chat_id or 'private', 'time': time()}\n app.socketio.emit('message', data, room=recipient_sid)", "async def async_join_players(self, group_members):\n\n _LOGGER.debug(\n \"%s wants to add the following entities %s\",\n self.entity_id,\n str(group_members),\n )\n\n \"\"\"Make sure self.zone is or becomes master.\"\"\"\n await self.coordinator.data.zone_master(self.zone_id, True)\n\n entities = [\n entity\n for entity in self._casatunes_entities()\n if entity.entity_id in group_members\n ]\n\n for client in entities:\n if client != self:\n await self.coordinator.data.zone_join(self.zone_id, client.zone_id)\n\n await self.coordinator.async_refresh()\n await self.sync_master()", "def sendSubcribe(self):\n user_subcribe = {\"channel\": \"/meta/subscribe\",\n \"clientId\": self.client_id,\n \"subscription\": \"/user/{}\".format(self.user_id),\n \"id\": str(self.id),\n \"ext\": {\"access_token\": self.token,\n \"timestamp\": time.time()}\n }\n self.sendMessage(json.dumps(user_subcribe).encode('utf-8'))\n # Increment sending id, see GroupMe push api\n self.id += 1", "def add_user_group(self, groupname, ls_user):\n data = {\"groupname\": groupname, \"add_users\": ls_user}\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(u\"groups/{}\".format(groupname))\n res = requests.put(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code in [200, 201, 206]:\n return Response(0, res)\n else:\n return Response(res.status_code, res)", "async def join_group(self, group_id, captcha_token: TwoCaptcha = None):\n\n data = {}\n\n a = await self.request.just_request(url=f'https://groups.roblox.com/v1/groups/{group_id}/users', data=data,\n method='post')\n json_text = await a.json()\n if a.status == 403:\n if json_text['errors'][0]['message'] == \"You must pass the captcha test before joining this group.\":\n et = await captcha_token.solve(public_key=f'63E4117F-E727-42B4-6DAA-C8448E9B137F')\n data = {\n \"captchaToken\": f\"{et}\",\n \"captchaProvider\": \"PROVIDER_ARKOSE_LABS\"}\n b = await self.request.just_request(url=f'https://groups.roblox.com/v1/groups/{group_id}/users',\n data=data, method='post')\n jj = await b.json()\n return jj\n else:\n return json_text", "def test_user_is_group_moderator(self):\n thread = self.create_thread()\n user = self.create_user()\n thread.group.owners.add(user)\n message = thread.first_message\n message.status = 'pending'\n message.save()\n self.assertTrue(message.visible_to_user(user))", "def create_member(org_id, group_id, target_group_ids, sex, first_name, last_name, title_name, email):\n user = get_user_by_email(email)\n # --- falls e-mail schon existiert wird nichts unternommen\n if user != None:\n if org_id > 0: # nur bei Schulen wird die Schulnummer vorangestellt\n prefix = '%i_' % org_id\n else:\n prefix = ''\n user = User()\n username = get_username(prefix, first_name, last_name)\n user.username = username\n user.sex = sex\n user.first_name = first_name\n user.last_name = last_name\n user.email = email\n user.title = title_name\n user.is_staff = False\n user.is_active = True\n user.is_superuser = False\n user.date_joined = datetime.datetime.now()\n password = generate_passwd()\n user.set_password(password)\n user.save()\n set_user_org(org_id, user)\n send_password(email, username, password)\n set_user_group(user, get_group_by_id(group_id))\n for group in target_group_ids:\n set_user_group(user, get_group_by_id(group))\n transaction.commit()" ]
[ "0.66464144", "0.6612628", "0.65133005", "0.65117073", "0.64265233", "0.6303679", "0.6245498", "0.6235882", "0.6231058", "0.6172051", "0.61579597", "0.61176723", "0.60869974", "0.6015817", "0.6015379", "0.601436", "0.5992443", "0.59782267", "0.59572095", "0.59416044", "0.5917493", "0.5907165", "0.58975923", "0.58950806", "0.58592874", "0.58423334", "0.5827844", "0.5783919", "0.5748963", "0.5726046", "0.5719526", "0.5716758", "0.5695806", "0.5690688", "0.56807214", "0.5676846", "0.56737", "0.5669539", "0.56499845", "0.56495374", "0.564773", "0.5642806", "0.5627659", "0.56216466", "0.5619186", "0.5612769", "0.56099236", "0.56039876", "0.5595521", "0.5594905", "0.55851954", "0.5578682", "0.55685264", "0.5555213", "0.55483717", "0.55367047", "0.55145603", "0.5497497", "0.54931843", "0.54795694", "0.5461964", "0.5452606", "0.54337806", "0.54336035", "0.54196143", "0.54107314", "0.5408022", "0.5403927", "0.5403494", "0.5391653", "0.5388102", "0.53835195", "0.5362073", "0.53530675", "0.5350924", "0.5350754", "0.53501403", "0.53465056", "0.5337057", "0.5337057", "0.53322864", "0.5329869", "0.53227305", "0.53209865", "0.5316116", "0.530624", "0.5302738", "0.52918303", "0.52868766", "0.5279422", "0.5276929", "0.526942", "0.5267907", "0.5265066", "0.52643895", "0.5261965", "0.5257473", "0.5251469", "0.52504194", "0.5243689" ]
0.71382284
0
broadcast a user leaving the group
def user_left_group(cls, group, user): text = "{} left the group chat".format(user.username) cls._broadcast_group(group, None, group, text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def leave_room(self, label):\n user = self.user\n room = await self.get_room(label)\n\n await self.channel_layer.group_send(\n room.group_name,\n {\n 'type': 'chat.leave',\n 'label': label,\n 'username': user.username,\n 'title': room.name,\n }\n )\n # Remove that we're in the room\n self.rooms.discard(label)\n\n # Remove client from the group so he no longer get room messages\n await self.channel_layer.group_discard(\n room.group_name,\n self.channel_name\n )\n\n await self.send_json(\n return_value(\n ACTION_LEAVE, room.label, TO_ME, MSG_LEAVE, NO_MESSAGE\n )\n )", "def leave_group(self):\n\t\tself.sendMessage(ID_CTRL + \"LEAVE\", True)\n\t\tself.joinstate = 0\n\t\tself.createstate = 0\n\t\tself.__key = None", "def leave_group():\n incoming = request.get_json()\n Participant.delete_participant_with_user_id_and_room_id(session['user_id'], incoming['room_id'])\n return jsonify(results = incoming['room_id'])", "async def leave(self):\n\t\tif self.group == None:\n\t\t\traise exceptions.ClientError('NO_GROUP')\n\n\t\tawait self.group.remove(self)\n\n\t\tself.group = None", "def on_leave(data):\r\n\r\n username = data['username']\r\n room = data['room']\r\n leave_room(room)\r\n send({\"msg\": username + \" has left the room\"}, room=room)", "def user_joined_group(cls, group, user):\n text = \"{} joined the group chat\".format(user.username)\n cls._broadcast_group(group, None, group, text)", "def leave(self, user):\n membership = self.check_membership(user)\n if membership is not None and membership.role != 'O':\n if membership.role == 'B':\n membership.role = 'LB'\n else:\n membership.role = 'L'\n membership.save()", "def decline_invitation(self, user, group):\n if group.is_invited(user):\n group.remove_invitation(user)", "def on_leave(data):\n username = request.sid\n room = data\n leave_room(room)\n logging.info(username + ' has left the room.')\n send(username + ' has left the room.', room=room)", "def action_remove_from_group(self, kwargs):\n user = kwargs[\"user\"]\n group = kwargs[\"group\"]\n\n if self.engine.remove_user_from_group(user, group):\n info(f\"User {user} sucessfully removed from {group}\")\n else:\n error(f\"Unable to remove {user} from {group}, check privileges or dn\")", "def leave(self, message, db_session):\n username = self.ts.get_user(message)\n user = db_session.query(db.User).filter(db.User.name == username).one_or_none()\n if not user:\n user = db.User(name=username)\n db_session.add(user)\n for tup in self.player_queue.queue:\n if tup[0] == username:\n self.player_queue.queue.remove(tup)\n self._add_to_whisper_queue(username, \"You've left the queue.\")\n user.times_played -= 1\n break\n else:\n self._add_to_whisper_queue(username, \"You're not in the queue and must join before leaving.\")", "def handle_leave_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n words = lobby_command.split()\n roomname = words[1]\n print(f\"Handling leave room {roomname} for {user}\")\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Requested roomname found..\")\n if user not in _room.room_attrbts['members']:\n msg = f\"Client {user} is already NOT a member of room {_room.name}\"\n self.log_and_send(client_socket, msg)\n return\n else:\n _room.room_attrbts['members'].remove(user)\n msg = f\"User {user} successfully removed from room {roomname}\"\n self.log_and_send(client_socket, msg)\n return\n msg = f'Client {user} passed invalid room. Could not join room {roomname}'\n self.log_and_send(client_socket, msg)\n return", "def leave(msg: telebot.types.Message):\n if utils.in_menu(msg.from_user):\n bot.reply_to(\n msg,\n 'This command outside of game is useless.'\n )\n return\n\n game, user, opponent = utils.get_game_user_opponent(msg.from_user)\n if not game or not user:\n # todo log something\n return\n\n user.state = states.USER_IN_MENU\n user.losses += 1\n utils.update_user(user)\n bot.send_message(\n user.user_id,\n 'You surrendered.'\n )\n\n if opponent:\n opponent.state = states.USER_IN_MENU\n opponent.wins += 1\n utils.update_user(opponent)\n bot.send_message(\n opponent.user_id,\n 'Your opponent surrendered'\n )\n\n field = json.loads(game.field)\n sig = 1 if user == game.user1 else 2\n\n # changes users emojis to poop\n for i in range(len(field)):\n for j in range(len(field[i])):\n if field[i][j] == sig:\n field[i][j] = 4\n\n if opponent:\n utils.send_updated_field(bot, field, game, opponent)\n Game.delete_by_id(game.id)", "def userLeft(self, user, channel):\n ss = self.findSessions(channel)[0]\n user = user.decode(ss.encoding)\n self.sendResponse(ss.removeNick(user))", "def leaveMUC(self, room, nick, msg='', pfrom=None):\n if msg:\n self.xmpp.sendPresence(pshow='unavailable', pto=\"%s/%s\" % (room, nick), pstatus=msg, pfrom=pfrom)\n else:\n self.xmpp.sendPresence(pshow='unavailable', pto=\"%s/%s\" % (room, nick), pfrom=pfrom)\n del self.rooms[room]", "def delete_group(user):\n return 'do some magic!'", "async def chat_leave(self, event):\n await self.send_json(\n return_value(\n ACTION_WENT_OFFLINE,\n event['label'],\n event['username'],\n MSG_LEAVE,\n NO_MESSAGE\n )\n )", "def on_leave(self, event):\n self.pre_check(event)\n self.remove_player(event.guild.id)", "def unfollow_group(request, pk):\n group = get_object_or_404(Group, id=pk)\n\n # Check user is not member of the group\n if not group.members.filter(id=request.user.id).exists():\n actions.unfollow(request.user, group, send_action=False)\n request.user.userprofile.follow_groups.remove(group)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore.')\n # the group members can choose not follow the group anymore, but still\n # been the member\n else:\n actions.unfollow(request.user, group, send_action=False)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore. But you are still the one of the members of this group.')\n\n return redirect('groups:groups-detail', pk)", "def leave(ctx, network):\n return _leave(ctx.obj['client'], network)", "async def tod_leave(self, ctx, *args):\n try:\n self.players.remove(ctx.author)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.remove_roles(role)\n except ValueError:\n pass\n message = f\"{ctx.author.mention} has been removed from the game!\"\n await ctx.send(message)", "async def leave_room(self, room_id):\n print(\"PublicChatConsumer\", \"leave_room\")\n if self.scope[\"user\"].is_authenticated:\n try:\n room: PublicChatRoom = await get_room_or_error(room_id)\n except ClientError as e:\n await self.handle_client_error(e)\n else:\n # Remove user from room users\n await disconnect_user(room, self.scope[\"user\"])\n\n # Set room_id to None\n self.room_id = None\n\n # Remove user from the group\n await self.channel_layer.group_discard(\n room.group_name,\n self.channel_name\n )\n\n # Send the total number of connected users to the client\n connected_users_count = await get_connected_users_count(room)\n await self.channel_layer.group_send(\n room.group_name,\n {\n \"type\": \"connected.users.count\",\n \"connected_users_count\": connected_users_count\n }\n )", "def command_leavemessage(self, user, nick, channel, rest):\n\n params = rest.split(' ')\n if len(params) < 2:\n self.reply(\n channel,\n nick,\n self.formatDoc(\n \"Usage: {command_prefix}leavemessage target_nick message\"\n )\n )\n return False\n\n target = params[0]\n message = ' '.join(params[1:])\n\n with self.getDbSession() as db_session:\n db_session.add(\n Message(\n user=user,\n nick=nick,\n message_time=datetime.datetime.now(),\n to_nick=target,\n channel=channel,\n message=message\n )\n )\n\n self.reply(channel, nick, 'Message saved for %s' % target)", "async def async_unjoin_me(self):\n if self._multiroom_wifidirect:\n for dev in self._multiroom_group:\n for device in self.hass.data[DOMAIN].entities:\n if device._is_master: ## TODO!!!\n cmd = \"multiroom:SlaveKickout:{0}\".format(self._slave_ip)\n value = await self._master.async_call_linkplay_httpapi(cmd, None)\n self._master._position_updated_at = utcnow()\n\n else:\n cmd = \"multiroom:Ungroup\"\n value = await self.async_call_linkplay_httpapi(cmd, None)\n\n if value == \"OK\":\n if self._master is not None:\n await self._master.async_remove_from_group(self)\n # await self._master.async_schedule_update_ha_state(True)\n self._multiroom_unjoinat = utcnow()\n self._master = None\n self._is_master = False\n self._slave_mode = False\n self._slave_ip = None\n self._multiroom_group = []\n # await self.async_schedule_update_ha_state(True)\n\n else:\n _LOGGER.warning(\"Failed to unjoin_me from multiroom. \" \"Device: %s, Got response: %s\", self.entity_id, value)", "async def leaveserver(self, ctx, guild: int):\n guild = self.bot.get_guild(guild)\n await guild.leave()\n embed = discord.Embed(title=f\"left {guild.name} owned by: {guild.owner.name}\")\n embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)\n await ctx.message.delete()\n await ctx.send(embed=embed)", "def leave(self, *args, **kwargs):\n return self.bot.leave_chat(self.id, *args, **kwargs)", "def left(message):\n\tglobal GLOBAL_NUM_USERS\n\tGLOBAL_NUM_USERS = GLOBAL_NUM_USERS - 1\n\troom = session.get('room')\n\tleave_room(room)\n\tprint('%s : left' % session)\n\temit('_left', {'user_name': session.get('name'), 'num_users' : GLOBAL_NUM_USERS}, room=room)", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "def userLeft(self, user, channel):\n self.dispatch('population', 'userLeft', user, channel)", "def user_disappears(self, user):\n pass", "def mutate(self, info, user_id):\n del info\n assert self is None, \"Root `self` expected to be `None`!\"\n\n OnChatMessageSent.unsubscribe(group=f\"user_{user_id}\")\n\n return KickOutUser(success=True)", "def leave(self):\n self.subreddit._reddit.post(\n API_PATH[\"leavecontributor\"], data={\"id\": self.subreddit.fullname}\n )", "def leave(self, dest):\n targetSS, handle = dest\n connector, args = handle\n conn = connector(args)\n tr_id = get_tr_id()\n\n leave_msg = SSAP_MESSAGE_TEMPLATE % (str(self.node_id), str(targetSS),\n \"LEAVE\", str(tr_id), \"\")\n conn.connect()\n conn.send(leave_msg)\n # print \"Sent leave msg\"\n cnf = conn.receive()\n conn.close()\n if \"status\" in cnf and cnf[\"status\"] == M3_SUCCESS:\n tmp = [x for x in self.member_of if x != targetSS]\n self.member_of = tmp\n return True\n elif \"status\" in cnf:\n tmp = [x for x in self.member_of if x != targetSS]\n self.member_of = tmp\n raise SIBError(cnf[\"status\"])\n else:\n tmp = [x for x in self.member_of if x != targetSS]\n self.member_of = tmp\n raise SIBError(M3_SIB_ERROR)", "def __on_group_deleted(self, logger, *args):", "def test_04_self_unshare_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.unshare_group_with_user(meowers, dog)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertFalse(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_group_unshare_users(meowers)))", "async def leave_room(self, room_id):\n # Зарегистрированный пользователь находится в нашей области благодаря аутентификации ASGI middleware\n room = await get_room_or_error(room_id, self.scope[\"user\"])\n # Отправить сообщение, если оно включено\n if settings.NOTIFY_USERS_ON_ENTER_OR_LEAVE_ROOMS:\n await self.channel_layer.group_send(\n room.group_name,\n {\n \"type\": \"chat.leave\",\n \"room_id\": room_id,\n \"username\": self.scope[\"user\"].first_name,\n }\n )\n # Remove that we're in the room\n self.rooms.discard(room_id)\n # Remove them from the group so they no longer get room messages\n await self.channel_layer.group_discard(\n room.group_name,\n self.channel_name,\n )\n # Instruct their client to finish closing the room\n await self.send_json({\n \"leave\": str(room.id),\n })", "def disconnect(self, code):\n try:\n if not self.scope['user'].is_authenticated:\n logger.error('User in not authenticated')\n self.close()\n\n user = Profile.objects.get(user=self.scope['user'])\n group_name = user.group_name\n\n self.channel_layer.group_discard(group_name, self.channel_name)\n except Exception as e:\n logger.error(e)", "async def quit_room(self, label):\n room = await self.get_room(label)\n if not room.is_ready:\n await self.exit_room(room)\n self.rooms.discard(label)\n await self.channel_layer.discard(\n room.group_name,\n self.channel_name\n )\n await self.channel_layer.group_send(\n room.group_name,\n {\n 'type': 'chat.quit',\n 'label': label,\n 'username': self.user.username,\n 'title': room.name,\n }\n )\n else:\n self.send_json(\n return_value(\n ACTION_DENIED,\n label,\n TO_ME,\n MSG_LEAVE,\n NO_MESSAGE\n )\n )", "async def leave(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n else:\n player = ctx.message.author.name\n if player.lower() not in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}, you cannot leave the game if you have not joined\".format(player))\n elif player == tod_games[room]['host']:\n await amor_manager.say(\"{}, you cannot leave the game you're the host\".format(player))\n else:\n del tod_games[room]['participants'][player.lower()]\n await amor_manager.say(\"{} has left Truth or Dare.\".format(player))", "def join_player(self, data, user):\n self.remove(user)\n\n user.room = \"100\"\n user.x = \"0\"\n user.y = \"0\"\n user.frame = \"0\"\n\n self.add(user)", "def do_leave(self):\n res = self.entity.do_leave(self.context)\n if res:\n return self.RES_OK, 'Node successfully left cluster.'\n else:\n return self.RES_ERROR, 'Node failed in leaving cluster.'", "def update_user_backward(apps, schema_editor):\n Group.objects.all().delete()", "def remove_from_group(_request, group_id, email):\n group = models.UserGroup.get_by_id(int(group_id))\n user_key = models.UserProfile.load(email).key()\n if group.users is None:\n group.users = []\n logging.warning('Group \"%s\" had a None users list' % group.name)\n group.users.remove(user_key)\n group.put()\n\n url = urlresolvers.reverse('views.admin.edit_user', args=[email])\n return http.HttpResponseRedirect(url)", "def on_removeuser(self, username):\n self.users.remove(username)\n print ('%s left the room.' % username)", "def handle_exit_room_session(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n for room in self.rooms:\n if user in room.room_attrbts['active']:\n room.room_attrbts['active'].remove(user)\n msg = f'User {user} is no longer active in room {room.name}.'\n print(msg)\n return\n msg = f'Room {room.name} not found or user {user} is not yet a member. NONACTIVE'\n self.log_and_send(client_socket, msg)\n return", "def test_persistent_group_peer_dropped(dev):\n form(dev[0], dev[1], reverse_init=True)\n invite_from_cli(dev[0], dev[1])\n\n logger.info(\"Remove group on the GO and try to invite from the client\")\n dev[0].global_request(\"REMOVE_NETWORK all\")\n invite(dev[1], dev[0])\n ev = dev[1].wait_global_event([\"P2P-INVITATION-RESULT\"], timeout=10)\n if ev is None:\n raise Exception(\"No invitation result seen\")\n if \"status=8\" not in ev:\n raise Exception(\"Unexpected invitation result: \" + ev)\n networks = dev[1].list_networks(p2p=True)\n if len(networks) > 0:\n raise Exception(\"Unexpected network block on client\")\n\n logger.info(\"Verify that a new group can be formed\")\n form(dev[0], dev[1], reverse_init=True)", "def uninvite_org_or_player(self, event):\n org, pc = self.get_org_or_dompc(self.lhs)\n if event:\n if org:\n if org not in event.orgs.all():\n raise self.CalCmdError(\"That organization is not invited.\")\n event.remove_org(org)\n else:\n if pc not in event.dompcs.all():\n raise self.CalCmdError(\"They are not invited.\")\n event.remove_guest(pc)\n else:\n proj = self.project\n if org:\n if org.id not in proj[\"org_invites\"]:\n raise self.CalCmdError(\"That organization is not invited.\")\n proj[\"org_invites\"].remove(org.id)\n else:\n if pc.id in proj[\"hosts\"] or pc.id in proj[\"gms\"]:\n raise self.CalCmdError(\"Remove them as a host or gm first.\")\n if pc.id not in proj[\"invites\"]:\n raise self.CalCmdError(\"They are not invited.\")\n proj[\"invites\"].remove(pc.id)\n self.msg(\"{wRemoved {c%s{w's invitation.\" % (pc or org))", "def remove_from_group(user: User, group: Group) -> Result:\n if user.pw_name not in group.gr_mem:\n return Result(State.unchanged)\n command([\"/usr/sbin/deluser\", user.pw_name, group.gr_name])\n group.gr_mem.remove(user.pw_name)\n return Result(State.success)", "def __unregister(self, args = []):\n\n try:\n self.__cm.send(p.T_LEAVE,[])\n reply = self.__cm.receive()\n if (reply.type != p.T_ACK):\n raise Exception, \"Unregistering from server was not successfull. Disconnecting anyway!\"\n \n except Exception,e:\n self.__handleError('Leave', e)", "def remove(self, user):\n self.packet.send_room([\"rp\", user.get_int_id(self.rooms),\n user.data.id], user.room)\n self.rooms[user.room][\"users\"].remove(user)", "def test_persistent_group_peer_dropped2(dev):\n form(dev[0], dev[1])\n invite_from_go(dev[0], dev[1])\n\n logger.info(\"Remove group on the client and try to invite from the GO\")\n dev[1].global_request(\"REMOVE_NETWORK all\")\n invite(dev[0], dev[1])\n ev = dev[0].wait_global_event([\"P2P-INVITATION-RESULT\"], timeout=10)\n if ev is None:\n raise Exception(\"No invitation result seen\")\n if \"status=8\" not in ev:\n raise Exception(\"Unexpected invitation result: \" + ev)\n networks = dev[1].list_networks(p2p=True)\n if len(networks) > 0:\n raise Exception(\"Unexpected network block on client\")\n\n logger.info(\"Verify that a new group can be formed\")\n form(dev[0], dev[1])", "def remove_member_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n user_id = str(args.get('user_id'))\n client.remove_member(group_id, user_id)\n\n human_readable = f'User {user_id} was removed from the Group \"{group_id}\" successfully.'\n return human_readable, NO_OUTPUTS, NO_OUTPUTS", "def test_logged_user_in_group_can_leave(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_can_access(self, self.url,\n post_redirect_url=expected_url)\n\n self.assertNotIn(logged_user, self.group.users.all())\n self.assertNotIn(self.group, logged_user.joined_groups.all())", "def remove_from_earth(sender, instance, **kwargs):\n\tgrplst = instance.groups_as_string.split(\", \")\n\tmail = instance.associated_user.email\n\t#loop over list\n\tfor grp in grplst:\n\t\trequests.delete(\"https://api.mailgun.net/v3/lists/{}@arenbergorkest.be/members/{}\".format(grp,mail),auth=('api', settings.MAILGUN_API_KEY))", "async def leave(ctx, *, check=\"\"):\r\n # if botv.isAdmin(ctx.message.author) and check == \"now, bot\":\r\n # if necessary, save checks can go here; check presently commented out because botv can\r\n # fail to initialize in testing\r\n await bot.say(\"Allan, please add dialogue!\")\r\n quit()", "def __kick_passenger(self, user, reason):\n\n try:\n if user.id not in self.__users.keys():\n print(\"the person you're trying to delete doesn't exist.\")\n return\n\n if reason == \"kicked all passengers by an admin\": # the ususal case, made a standart message so users won't be nervous\n user.send_message(\n f\"Hello {user.name.split(' ')[0]}, your request has been removed.\\n\"\n f\"Simply place another one if it's still relevant.\\n\\nBest regards, Bus4U team\")\n\n else: # in case of something spacial\n print(f\"reason '{reason}'\")\n user.send_message(\n f\"hello {user.name.split(' ')[0]}, it looks like you've been kicked out of the system for: {reason}\")\n del self.__users[user.id]\n except Exception as e:\n print(\"Some Error accrued\")", "def IgmpLeave(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"igmpLeave\", payload=payload, response_object=None)", "def ws_disconnect(message):\n language = message.channel_session['knocker']\n grLangUser = Group('knocker-{0}-{1}'.format(language, \n message.user.id))\n grLangUser.discard(message.reply_channel)", "def userLeft(self, user, channel):\n # Send messasge to Server bot.\n self.data_in(text=\"\", type=\"left\", user=\"server\", channel=channel,\n nicklist=[user])", "def leave_union(self):\n if self.union is None:\n return f'{self.username} is not a member of any guild'\n\n if self.union.has_member(self):\n union_name = self.union.name\n self.union = None\n self.save()\n return f'{self.username} has been removed from {union_name}'", "def remove_from_group(self, org, contact, group):\n pass", "def test_05_self_can_downgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))", "async def leave_room(self, document_id):\n\t\tprint(\"DocumentChatConsumer: leave_room\")\n\t\tis_auth = is_authenticated(self.scope[\"user\"])\n\t\troom = await get_room_or_error(document_id)\n\n\t\t# Remove user from \"users\" list\n\t\tif is_auth:\n\t\t\tawait disconnect_user(room, self.scope[\"user\"])\n\n\t\t# Remove that we're in the room\n\t\tself.document_id = None\n\t\t# Remove them from the group so they no longer get room messages\n\t\tawait self.channel_layer.group_discard(\n\t\t\troom.group_name,\n\t\t\tself.channel_name,\n\t\t)\n\n\t\t# send the new user count to the room\n\t\tnum_connected_users = get_num_connected_users(room)\n\t\tawait self.channel_layer.group_send(\n\t\troom.group_name,\n\t\t\t{\n\t\t\t\t\"type\": \"connected.user.count\",\n\t\t\t\t\"connected_user_count\": num_connected_users,\n\t\t\t}\n\t\t)", "def do_del_group(dbsync, group):\n pass", "def test_leave(self):\n client = self.mock_client(\n [\n defer.succeed(Mock(error_code=0)),\n ]\n )\n coord = self.make_coordinator(client)\n coord.coordinator_broker = Mock()\n coord.member_id = \"m1\"\n coord.generation_id = \"g1\"\n de = coord.send_leave_group_request()\n self.successResultOf(de)\n self.assertEqual(coord.member_id, \"\")\n self.assertIsNone(coord.generation_id)", "def test_deluser(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.run_function(\"group.adduser\", [self._group, self._user])\n self.assertTrue(self.run_function(\"group.deluser\", [self._group, self._user]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertNotIn(self._user, str(group_info[\"members\"]))", "async def nogroup(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n author = ctx.message.author\n roles = author.roles\n for role in roles:\n if role.name.lower() in changeable_groups:\n roles.remove(role)\n await amor_manager.replace_roles(author, *roles)\n await amor_manager.say('{0} removed from color groups'.format(author.name))", "def _broadcast_group(cls, sender, sender_sid, group, text):\n # todo make this method async\n for recipient in group.get_users():\n if recipient == sender:\n continue\n cls._broadcast_user(sender, sender_sid, recipient, text, group.id)", "def disconnect_user_group(self, id_user:int, id_group:int) -> set:\n try:\n check_value = self.cursor.execute(f\"SELECT COUNT(id_user) FROM {table_users_groups} WHERE id_group={id_group};\").fetchone()\n check_value = check_value[0] if check_value else 0\n if check_value:\n self.cursor.execute(f\"DELETE FROM {table_users_groups} WHERE id_user={id_user} AND id_group={id_group};\")\n self.connection.commit() \n if check_value == 1:\n return True, True, True\n return True, False, True\n else:\n return True, False, False\n except Exception as e:\n msg = f'We have problems with the connection deletion between user and group. Mistake: {e}'\n self.proceed_error(msg)\n return False, False, False", "def unblock(self):\n data = {'container': self._reddit.user.me().fullname,\n 'name': str(self), 'type': 'enemy'}\n url = API_PATH['unfriend'].format(subreddit='all')\n # PRAW5 REMOVE (return statement)\n return self._reddit.post(url, data=data)", "def leave_event(self, event_id):\n event = Event.objects.get(id=event_id)\n self.event_attending.remove(event)\n self.save()\n event.save()", "def unsubscribe( self, mess, args):\n user = self.get_sender_username(mess)\n if not user in self.users:\n return 'You are not subscribed!'\n else:\n user = self.users.pop(user)\n self.message_queue.append('_%s has left the channel_' % user)\n self.log.info( '%s unsubscribed from the broadcast.' % user)\n self.save_state()\n return 'You are now unsubscribed.'", "def delusers(self, args):\n\n if len(args) < 2:\n print(self.addusers.__doc__)\n return\n\n gname = args[0]\n users = args[1:]\n\n g = sr.group(gname)\n\n if not g.in_db:\n print(\"Group '%s' not found.\" % ( gname ))\n return\n\n not_members = g.user_rm( users )\n g.save()\n\n for uname in not_members:\n print(\"Unable to remove non-member '%s' from '%s'\" % ( gname, uname ))", "def rm_user_group(self, groupname, ls_user):\n data = {\"groupname\": groupname, \"rm_users\": ls_user}\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(u\"groups/{}\".format(groupname))\n res = requests.put(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code in [200, 206]:\n return Response(0, res)\n else:\n return Response(res.status_code, res)", "def broadcast(self, msg, mtype = 'message', back = True):\n for p in DixitConnection.participants:\n if back or (DixitConnection.participants[p] != self):\n DixitConnection.participants[p].emit(mtype, msg)", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "def test_users_can_leave_a_group(self):\n USER = \"alice\"\n USER_ID = self.USERS[USER][\"id\"]\n url = reverse(\n 'communities:membership-detail',\n kwargs={\n 'community_id': self.GROUP_ID,\n 'user_id': USER_ID,\n }\n )\n self.login_as(\"alice\")\n with self.assertNumQueries(5):\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(response.data, None)\n \n self.assertEqual(Membership.objects.filter(\n community_id=self.GROUP_ID, user_id=USER_ID).count(), 0)", "def on_leave(data):\n logger.info(f\"Leaving: {data}\")\n to = data[\"to\"]\n if to in TO_OPTIONS.keys():\n leave_room(to)\n logger.info(f\"Rooms: {rooms()}\")\n else:\n logger.warning(f\"{to} not in TO_OPTIONS\")", "def leave(bot, event, conversation_id=None, *args):\n\n arglist = list(args)\n\n if conversation_id == \"quietly\":\n arglist.append(\"quietly\")\n conversation_id = False\n\n if not conversation_id:\n conversation_id = event.conv_id\n\n yield from command.run(bot, event, *[\"convleave\", \"id:\" + conversation_id, \" \".join(arglist)])", "async def async_remove_from_group(self, device):\n if device.entity_id in self._multiroom_group:\n self._multiroom_group.remove(device.entity_id)\n# await self.async_schedule_update_ha_state(True)\n\n if len(self._multiroom_group) <= 1:\n self._multiroom_group = []\n self._is_master = False\n self._slave_list = None\n\n for member in self._multiroom_group:\n for player in self.hass.data[DOMAIN].entities:\n if player.entity_id == member and player.entity_id != self.entity_id:\n await player.async_set_multiroom_group(self._multiroom_group)", "async def unregister(self):\n\t\tif self.group != None:\n\t\t\tif self.group.in_game:\n\t\t\t\tfor team in self.group.game.teams:\n\t\t\t\t\tif self in team:\n\t\t\t\t\t\tself.group.game.teams.remove(team)\n\t\t\t\t\t\tbreak\n\n\t\t\tawait self.group.remove(self)\n\n\t\tshared.users.remove(self)", "def IgmpLeave(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self.href }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('igmpLeave', payload=payload, response_object=None)", "def disconnect_user(room: PublicChatRoom, user) -> bool:\n return room.disconnect_user(user)", "def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def user_logged_out(self, sender, request, user, **kwargs):", "def _leave(self, *args):\n if not self.game:\n raise ServerException('not playing a game')\n self.game.leave(self)\n self.game = self.player = None", "def leave(self):\n self.remove(\n self.subreddit._reddit.config.username or self.subreddit._reddit.user.me()\n )", "def left(message):\n room = session.get('room')\n leave_room(room)\n emit('status', {'msg': session.get('name') + ' s\\'est déconnecté.'}, room=room)", "def test_persistent_group_peer_dropped3(dev):\n form(dev[0], dev[1], reverse_init=True)\n invite_from_cli(dev[0], dev[1])\n\n logger.info(\"Remove group on the GO and try to invite from the client\")\n dev[0].global_request(\"REMOVE_NETWORK all\")\n invite(dev[1], dev[0], use_listen=False)\n ev = dev[1].wait_global_event([\"P2P-INVITATION-RESULT\"], timeout=10)\n if ev is None:\n raise Exception(\"No invitation result seen\")\n if \"status=8\" not in ev:\n raise Exception(\"Unexpected invitation result: \" + ev)\n networks = dev[1].list_networks(p2p=True)\n if len(networks) > 0:\n raise Exception(\"Unexpected network block on client\")\n\n time.sleep(0.2)\n logger.info(\"Verify that a new group can be formed\")\n form(dev[0], dev[1], reverse_init=True, r_listen=False)", "def on_station_user_invite_rejected(self, func):\n self._set_event_handler(\"stations\")\n self._events.on_station_user_invite_rejected(func)", "def leave_farm(self, request, pk):\n farm = self.get_object()\n user = request.user\n farm.remove_member(user)\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def remove_RSVP(eid, gid):\n check_admin()\n\n guestList = GuestList.query.filter_by(event_id=eid).all()\n for guest in guestList:\n print(\"guest.guest_id: \" + str(guest.guest_id))\n print(\"gid: \" + str(gid))\n if guest.guest_id == gid:\n guest.is_attending=False\n db.session.commit()\n \n flash('You have successfully set a user as not attending.')\n\n # redirect to the events page\n return redirect(url_for('admin.event_RSVPlist', id=eid))\n\n return render_template(title=\"Removed RSVP\")", "def left(message):\n room = session.get('room')\n leave_room(room)\n emit('status', {'msg': session.get('name') + ' has left the room.'}, room=room)", "def left(message):\n room = session.get('room')\n leave_room(room)\n emit('status', {'msg': session.get('name') + ' has left the room.'}, room=room)", "def left(message):\n room = session.get('room')\n leave_room(room)\n emit('status', {'msg': session.get('name') + ' has left the room.'}, room=room)", "def left(message):\n room = session.get('room')\n leave_room(room)\n emit('status', {'msg': session.get('name') + ' has left the room.'}, room=room)", "def userJoined(self, user, channel):\n self.dispatch('population', 'userJoined', user, channel)", "def unban(self):\n\n if self.get_permissions()['banned']:\n member_group = Group.query.filter(\n Group.admin == False,\n Group.super_mod == False,\n Group.mod == False,\n Group.guest == False,\n Group.banned == False\n ).first()\n\n self.primary_group_id = member_group.id\n self.save()\n return True\n return False", "def fleave(var, wrapper, message):\n\n for person in re.split(\" +\", message):\n person = person.strip()\n if not person:\n continue\n\n target, _ = users.complete_match(person, get_players())\n dead_target = None\n if var.PHASE in var.GAME_PHASES:\n dead_target, _ = users.complete_match(person, var.DEADCHAT_PLAYERS)\n if target is not None:\n if wrapper.target is not channels.Main:\n wrapper.pm(messages[\"fquit_fail\"])\n return\n\n msg = [messages[\"fquit_success\"].format(wrapper.source, target)]\n if get_main_role(target) != \"person\" and var.ROLE_REVEAL in (\"on\", \"team\"):\n msg.append(messages[\"fquit_goodbye\"].format(get_reveal_role(target)))\n if var.PHASE == \"join\":\n player_count = len(list_players()) - 1\n to_say = \"new_player_count\"\n if not player_count:\n to_say = \"no_players_remaining\"\n msg.append(messages[to_say].format(player_count))\n\n wrapper.send(*msg)\n\n if var.PHASE != \"join\":\n if target.nick in var.PLAYERS:\n var.DCED_PLAYERS[target.nick] = var.PLAYERS.pop(target.nick)\n\n add_dying(var, target, \"bot\", \"fquit\", death_triggers=False)\n kill_players(var)\n\n elif dead_target is not None:\n leave_deadchat(var, dead_target, force=wrapper.source)\n if wrapper.source not in var.DEADCHAT_PLAYERS:\n wrapper.pm(messages[\"admin_fleave_deadchat\"].format(dead_target))\n\n else:\n wrapper.send(messages[\"not_playing\"].format(person))\n return", "def _unlisten(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is: \")\n for i in users:\n print(users[i][\"name\"])\n name = False\n while not name: #Loop until valid name given\n name = input(\"Please enter the user that you would no longer like to be listening to events for: \")\n userID = self._get_user_id(name)\n if not userID:\n name = False\n #Output\n command = \"unlisten {0}\".format(userID)\n return(command)" ]
[ "0.68877256", "0.6836113", "0.6618386", "0.6293848", "0.62480164", "0.6209803", "0.61928344", "0.61679953", "0.61453825", "0.6137914", "0.61103255", "0.6071488", "0.59881574", "0.59806395", "0.5946936", "0.592674", "0.5914093", "0.59117216", "0.59084827", "0.5901252", "0.58909315", "0.5847027", "0.5838013", "0.58291817", "0.5789759", "0.5788652", "0.5755686", "0.5749129", "0.57347345", "0.57271326", "0.57018375", "0.56832784", "0.5674653", "0.5671523", "0.5669292", "0.566431", "0.5663076", "0.5658219", "0.56241125", "0.5598875", "0.5589783", "0.5552525", "0.5544389", "0.55314666", "0.552495", "0.551495", "0.5510654", "0.5506964", "0.55049926", "0.5500635", "0.5499185", "0.5488445", "0.5480651", "0.54792833", "0.5473814", "0.54522544", "0.5445833", "0.54441637", "0.54287505", "0.54040694", "0.53865874", "0.5382276", "0.5382076", "0.5362116", "0.5356092", "0.53454864", "0.53440547", "0.534299", "0.53347176", "0.5333024", "0.5330011", "0.53288674", "0.5314667", "0.53046924", "0.52988833", "0.5291637", "0.5284726", "0.52809006", "0.52778643", "0.5274708", "0.5273506", "0.52702904", "0.52659917", "0.5252198", "0.5244006", "0.523188", "0.5223638", "0.52156085", "0.52006364", "0.52004755", "0.51958054", "0.51921797", "0.5173822", "0.5173822", "0.5173822", "0.5173822", "0.5163806", "0.5162209", "0.51592606", "0.5146975" ]
0.7276704
0
broadcast a new message to a group chat
def _broadcast_group(cls, sender, sender_sid, group, text): # todo make this method async for recipient in group.get_users(): if recipient == sender: continue cls._broadcast_user(sender, sender_sid, recipient, text, group.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_groupchat_message(self, msg):\n self.xmpp.event('groupchat_message', msg)\n self.xmpp.event(\"muc::%s::message\" % msg['from'].bare, msg)", "def sendMessage(self, message):\n\t\tm = domish.Element((None, 'message'))\n\t\tm['from'] = self.jid\n\t\tm['to'] = self.room\n\t\tm['type'] = 'groupchat'\n\t\tm.addElement('body', content = message)\n\t\tself.xmlstream.send(m)", "def new_chat_message(cls, chatroom, text, sender):\n cls.broadcast(\n group=chatroom,\n payload={\"chatroom\": chatroom, \"text\": text, \"sender\": sender},\n )", "def broadcast(msg, prefix=\"\",ChatRoom=None): # prefix is for name identification. \n if not ChatRoom == None :\n for sock,name in ChatRooms[ChatRoom]:\n sock.send(bytes(prefix, \"utf8\")+msg)", "def send_group_message(self, data):\n return self.__json_call('chat.postMessage', data)", "def broadcast(self, message):\n self._send('broadcast', message)", "def broadcast(self, name, msg, color='yellow'):\n name = str(name)\n msg = str(msg)\n self.output.broadcasts.append({\n 'name': name,\n 'msg': msg,\n 'color': str(color),\n 'botname': self._botname,\n 'botowner': self._botowner,\n })\n self._set_lastsaid('[BROADCAST] {0}: {1}'.format(name, msg))", "def broadcast(msg):\r\n for user in clients:\r\n msg_client(msg, user)", "def chat(request):\n message = '{}: {}'.format(request.form['user'], request.form['message'])\n if message:\n ChatNamespace.broadcast('message', message)\n return Response()", "async def new_message(self, message):\n user = self.scope['user']\n response_data = {\n 'message': message,\n 'username': user.get_full_name()\n }\n await self.create_chat_message(user, message)\n await self.channel_layer.group_send(\n self.conversation_name,\n {\n 'type': 'chat_message',\n 'response_data': json.dumps(response_data)\n }\n )", "def broadcast(self,msg, UDP=False):\n if DEBUG: print \"class GlabPythonManager, function: broadcast\"\n if DEBUG and len(msg) < 10000: print \"class GlabPythonManager, function: broadcast\"\n \n if UDP: \n self.multicast.protocol.send(msg)\n return\n \n for key, connection in self.connection_manager.default_websocket_connections.iteritems():\n try:\n pass\n self.connection_manager.send(msg,connection)\n except AttributeError:\n if DEBUG: print \"Error: Failed to send broadcast\"\n pass \n \n '''\n for key, peer_server in self.connection_manager.peer_servers.iteritems():\n if not peer_server.ip == '10.1.1.112':\n continue\n try:\n self.connection_manager.send(msg,peer_server)\n except AttributeError:\n if DEBUG: print \"Error: Failed to send broadcast\"\n pass\n '''\n \n \n for key, connection in self.listener.openConnections.iteritems():\n continue\n try:\n if DEBUG: print \"broadcasting to the protocol:\", connection.ConnectionUID\n connection.transport.write(msg)\n except AttributeError:\n if DEBUG: print \"Error: Failed to send broadcast\"\n pass\n \n \n #for client in self.wsfactory.openConnections.keys():\n #self.wsfactory.openConnections[client].sendMessage(messagestring)", "async def broadcast(self, msg):\n if not self._session:\n await self._create_session()\n \n if isinstance(msg, str):\n msg = Message(msg)\n assert isinstance(msg, Message)\n msg.set_recipient(-1)\n msg.set_sender(self._network._robot.id)\n await self._session.put(self._network.SERVER_ADDR + '/api/send', json=msg.to_dict())\n return msg", "def new_msg(cls, sender_id, recipient_id, text):\n\n sender = User.find(id=sender_id)\n sender_sid = cls.get_user_sid(sender.id)\n\n if is_group(recipient_id):\n\n recipient_group = Group.find(id=recipient_id)\n\n if not recipient_group:\n raise Exception('recipient was not found')\n if not recipient_group.has_user(sender):\n raise Exception('user is not a member of this group')\n\n cls._broadcast_group(sender, sender_sid,\n recipient_group, text)\n\n elif is_user(recipient_id):\n\n recipient = User.find(id=recipient_id)\n if not sender.is_friends(recipient):\n raise Exception('user is not friends with recipient')\n\n if recipient.blocked(sender):\n raise Exception('recipient has blocked you')\n\n if not recipient:\n raise Exception('recipient was not found')\n\n cls._broadcast_user(sender, sender_sid, recipient,\n text)\n\n else:\n\n raise Exception('bad recipient id')", "def flash_broadcast(self,params):\n text = params['text']\n if self.participant:\n self.service.sendParticipants(self.name,'msg',{\"text\":text,\"sender\":self.name})\n else:\n self.notLoggedIn()", "def handle_groupchat_subject(self, msg):\n self.xmpp.event('groupchat_subject', msg)", "async def broadcast(self):\n with await self.redis as connection:\n await connection.execute_pubsub(\"subscribe\", self.channel)\n try:\n while True:\n room = await self.channel.get(encoding=\"utf-8\")\n await self.ws.send(message)\n except websockets.ConnectionClosed as e:\n print(f\"<ChatManager:broadcast>[error] {e}\")\n await self.connection_closed()", "def _broadcast_user(cls, sender, sender_sid, recipient, text, chat_id=None):\n # todo make this method async\n recipient_sid = cls.get_user_sid(recipient.id)\n if not recipient_sid:\n cls._cache_msg(sender.id, recipient.id, text, chat_id)\n return\n data = {'sender_id': sender.id, 'recipient_id': recipient.id,\n 'text': text, 'chat_id': chat_id or 'private', 'time': time()}\n app.socketio.emit('message', data, room=recipient_sid)", "def message(self, msg):\n if msg['type'] in ('chat', 'normal'):\n msg.reply(\"Thanks for sending\\n%(body)s\" % msg).send()", "def broadcast(msg):\n\n for sock in clients:\n sock.send(bytes(msg, \"utf-8\"))", "async def sendmessage(bot: fido, channel: str, sender: str, args: List[str]):\n if len(args) == 0:\n return \"Usage: \" + IRC.commandPrefix + \"groupadd <groupname> <nickname> <phonenumber>\"\n if not get_group(args[0]):\n await bot.message(channel, f\"Group not found: {args[0]}\")\n return\n send_to_group(args[0], ' '.join(args[1:]))\n await bot.message(channel, \"Message sent.\")", "async def chat_message(self, event):\n if self.user and not self.user.is_authenticated:\n return\n\n user_id = event['user_id']\n message = event['message']\n created_at = event['created_at']\n publisher_full_name = event['publisher_full_name']\n\n await self.send(text_data=json.dumps({\n 'user_id': user_id,\n 'created_at': created_at,\n 'message': \"{}\".format(message),\n 'publisher_full_name': publisher_full_name,\n }))", "def broadcast_message(msg: str):\r\n\tfor ip in _clients.keys():\r\n\t\tsend_message(ip, msg)", "def broadcast(self, msg, mtype = 'message', back = True):\n for p in DixitConnection.participants:\n if back or (DixitConnection.participants[p] != self):\n DixitConnection.participants[p].emit(mtype, msg)", "def broadcast(bot, event, *args):\n if args:\n subcmd = args[0]\n parameters = args[1:]\n if subcmd == \"info\":\n \"\"\"display broadcast data such as message and target rooms\"\"\"\n conv_info = [\"<b>{}</b> ... {}\".format(get_conv_name(_), _.id_) for _ in _internal[\"broadcast\"][\"conversations\"]]\n if not _internal[\"broadcast\"][\"message\"]:\n bot.send_message_parsed(event.conv, _(\"broadcast: no message set\"))\n return\n if not conv_info:\n bot.send_message_parsed(event.conv, _(\"broadcast: no conversations available\"))\n return\n bot.send_message_parsed(event.conv, _(\n \"<b>message:</b><br />\"\n \"{}<br />\"\n \"<b>to:</b><br />\"\n \"{}\".format(_internal[\"broadcast\"][\"message\"],\n \"<br />\".join(conv_info))))\n elif subcmd == \"message\":\n \"\"\"set broadcast message\"\"\"\n message = ' '.join(parameters)\n if message:\n if message.lower().strip().startswith(tuple([_.lower() for _ in bot._handlers.bot_command])):\n bot.send_message_parsed(event.conv, _(\"broadcast: message not allowed\"))\n return\n _internal[\"broadcast\"][\"message\"] = message\n else:\n bot.send_message_parsed(event.conv, _(\"broadcast: message must be supplied after subcommand\"))\n elif subcmd == \"add\":\n \"\"\"add conversations to a broadcast\"\"\"\n if parameters[0] == \"groups\":\n \"\"\"add all groups (chats with users > 2)\"\"\"\n for conv in bot.list_conversations():\n if len(conv.users) > 2:\n _internal[\"broadcast\"][\"conversations\"].append(conv)\n elif parameters[0] == \"ALL\":\n \"\"\"add EVERYTHING - try not to use this, will message 1-to-1s as well\"\"\"\n for conv in bot.list_conversations():\n _internal[\"broadcast\"][\"conversations\"].append(conv)\n else:\n \"\"\"add by wild card search of title or id\"\"\"\n search = \" \".join(parameters)\n for conv in bot.list_conversations():\n if search.lower() in get_conv_name(conv).lower() or search in conv.id_:\n _internal[\"broadcast\"][\"conversations\"].append(conv)\n _internal[\"broadcast\"][\"conversations\"] = list(set(_internal[\"broadcast\"][\"conversations\"]))\n bot.send_message_parsed(event.conv, _(\"broadcast: {} conversation(s)\".format(len(_internal[\"broadcast\"][\"conversations\"]))))\n elif subcmd == \"remove\":\n if parameters[0].lower() == \"all\":\n \"\"\"remove all conversations from broadcast\"\"\"\n _internal[\"broadcast\"][\"conversations\"] = []\n else:\n \"\"\"remove by wild card search of title or id\"\"\"\n search = \" \".join(parameters)\n removed = []\n for conv in _internal[\"broadcast\"][\"conversations\"]:\n if search.lower() in get_conv_name(conv).lower() or search in conv.id_:\n _internal[\"broadcast\"][\"conversations\"].remove(conv)\n removed.append(\"<b>{}</b> ({})\".format(get_conv_name(conv), conv.id_))\n if removed:\n bot.send_message_parsed(event.conv, _(\"broadcast: removed {}\".format(\", \".join(removed))))\n elif subcmd == \"NOW\":\n \"\"\"send the broadcast - no turning back!\"\"\"\n context = { \"explicit_relay\": True } # prevent echos across syncrooms\n for conv in _internal[\"broadcast\"][\"conversations\"]:\n bot.send_message_parsed(conv, _internal[\"broadcast\"][\"message\"], context=context)\n bot.send_message_parsed(event.conv, _(\"broadcast: message sent to {} chats\".format(len(_internal[\"broadcast\"][\"conversations\"]))))\n else:\n bot.send_message_parsed(event.conv, _(\"broadcast: /bot broadcast [info|message|add|remove|NOW] ...\"))\n else:\n bot.send_message_parsed(event.conv, _(\"broadcast: /bot broadcast [info|message|add|remove|NOW]\"))", "def message(message):\n\troom = session.get('room')\n\tprint('%s : message : %s' % (session, message['message']))\n\temit('_message', {'user_name': session.get('name'), 'message' : message['message']}, room=room, include_self=False)", "def send_message(self, message, user, msg_type=MSG_TYPE_MESSAGE):\n final_msg = {'room': str(self.id), 'message': message, 'username': user.username, 'msg_type': msg_type}\n\n # Send out the message to everyone in the room\n self.websocket_group.send(\n {\"text\": json.dumps(final_msg)}\n )", "def broadcast():\n # global receiving_message\n # if not receiving_message:\n router.broadcast(clients.copy(), json.dumps(current_state))", "async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })", "def add_chat_message(self, message):\n try:\n data = message.to_json()\n key = ENVIRONMENT['REDIS_PREFIX'] + \"chat_messages:%s\" % self.channel_id\n \n logging.info(data)\n \n self.redis_server.rpush(key, data)\n self.redis_server.publish(ENVIRONMENT['REDIS_PREFIX'] + 'chat_messages', data)\n except Exception, e:\n logging.info(\"ERROR adding message %s: %s\" % (message, e))\n raise", "def send_message(self, message, user, msg_type=MSG_TYPE_MESSAGE):\n final_msg = {'room': str(self.id), 'message': message, 'user_id': str(user.id), 'nombre': user.nombre, 'apellidos': user.apellidos, 'msg_type': msg_type}\n mensaje = Mensaje(mensaje=message, emisor=user, room=self)\n mensaje.save()\n self.websocket_group.send({\n 'text': json.dumps({\n 'mensaje': final_msg,\n 'type': 'message',\n 'msg_type': msg_type\n })\n })", "def sendmessage(user,roomid):\n message = request.form['message']\n channel.send_message(user+roomid,message)", "def start(self, event):\n self.send_presence()\n self.get_roster()\n self.send_message(mto=self.recipient, mbody=self.msg, mtype='chat')\n self.disconnect(wait=True)", "def notify(cls, user_id, message):\n # Find the subscription group for user.\n group = None if user_id is None else f\"user_{user_id}\"\n cls.broadcast(group=group, payload=message)", "async def chat_message(self, event):\n await self.send_json(\n return_value(\n ACTION_MESSAGE,\n event['label'],\n event['username'],\n MSG_MESSAGE,\n event['message']\n )\n )", "async def chat_message(self, event):\n\n print(\"PublicChatConsumer\", \"chat_message from user\", event[\"user_id\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_MESSAGE,\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"user_id\": event[\"user_id\"],\n \"message\": event[\"message\"],\n \"natural_timestamp\": humanize_or_normal(timezone.now())\n })", "def __sendMessage(self):\n # TODO: Switch to this when implemented\n \n msg = self.ui.inputWidget.toPlainText()\n self.ui.inputWidget.clear()\n strv = StringView()\n strv.appendText(unicode(msg))\n self._amsn_conversation.sendMessage(strv)\n self.ui.textEdit.append(\"<b>/me says:</b><br>\"+unicode(msg)+\"\")", "def broadcast(self, message):\n for s in self.connections:\n s.send(bytes(message, encoding='utf-8'))", "def broadcast(bot, event, *args):\n if args:\n subcmd = args[0]\n parameters = args[1:]\n if subcmd == \"info\":\n \"\"\"display broadcast data such as message and target rooms\"\"\"\n\n conv_info = [ \"<b><pre>{}</pre></b> ... <pre>{}</pre>\".format(bot.conversations.get_name(convid), convid) \n for convid in _internal[\"broadcast\"][\"conversations\"] ]\n\n if not _internal[\"broadcast\"][\"message\"]:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: no message set\"))\n return\n\n if not conv_info:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: no conversations available\"))\n return\n\n yield from bot.coro_send_message(event.conv, _(\n \"<b>message:</b><br />\"\n \"{}<br />\"\n \"<b>to:</b><br />\"\n \"{}\".format(_internal[\"broadcast\"][\"message\"],\n \"<br />\".join(conv_info))))\n\n elif subcmd == \"message\":\n \"\"\"set broadcast message\"\"\"\n message = ' '.join(parameters)\n if message:\n if message.lower().strip().startswith(tuple([_.lower() for _ in bot._handlers.bot_command])):\n yield from bot.coro_send_message(event.conv, _(\"broadcast: message not allowed\"))\n return\n _internal[\"broadcast\"][\"message\"] = message\n\n else:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: message must be supplied after subcommand\"))\n\n elif subcmd == \"add\":\n \"\"\"add conversations to a broadcast\"\"\"\n if parameters[0] == \"groups\":\n \"\"\"add all groups (chats with users > 1, bot not counted)\"\"\"\n for convid, convdata in bot.conversations.get().items():\n if(len(convdata[\"participants\"]) > 1):\n _internal[\"broadcast\"][\"conversations\"].append(convid)\n\n elif parameters[0] == \"ALL\":\n \"\"\"add EVERYTHING - try not to use this, will message 1-to-1s as well\"\"\"\n for convid, convdata in bot.conversations.get().items():\n _internal[\"broadcast\"][\"conversations\"].append(convid)\n\n else:\n \"\"\"add by wild card search of title or id\"\"\"\n search = \" \".join(parameters)\n for convid, convdata in bot.conversations.get().items():\n if search.lower() in convdata[\"title\"].lower() or search in convid:\n _internal[\"broadcast\"][\"conversations\"].append(convid)\n\n _internal[\"broadcast\"][\"conversations\"] = list(set(_internal[\"broadcast\"][\"conversations\"]))\n yield from bot.coro_send_message(event.conv, _(\"broadcast: {} conversation(s)\".format(len(_internal[\"broadcast\"][\"conversations\"]))))\n\n elif subcmd == \"remove\":\n if parameters[0].lower() == \"all\":\n \"\"\"remove all conversations from broadcast\"\"\"\n _internal[\"broadcast\"][\"conversations\"] = []\n\n else:\n \"\"\"remove by wild card search of title or id\"\"\"\n search = \" \".join(parameters)\n removed = []\n for convid in _internal[\"broadcast\"][\"conversations\"]:\n if search.lower() in bot.conversations.get_name(convid).lower() or search in convid:\n _internal[\"broadcast\"][\"conversations\"].remove(convid)\n removed.append(\"<b><pre>{}</pre></b> (<pre>{}</pre>)\".format(bot.conversations.get_name(convid), convid))\n\n if removed:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: removed {}\".format(\", \".join(removed))))\n\n elif subcmd == \"NOW\":\n \"\"\"send the broadcast - no turning back!\"\"\"\n context = { \"explicit_relay\": True } # prevent echos across syncrooms\n for convid in _internal[\"broadcast\"][\"conversations\"]:\n yield from bot.coro_send_message(convid, _internal[\"broadcast\"][\"message\"], context=context)\n yield from bot.coro_send_message(event.conv, _(\"broadcast: message sent to {} chats\".format(len(_internal[\"broadcast\"][\"conversations\"]))))\n\n else:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: /devilbot broadcast [info|message|add|remove|NOW] ...\"))\n\n else:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: /devilbot broadcast [info|message|add|remove|NOW]\"))", "async def twitch_group(message: discord.Message, _: utils.placeholder):\n pass", "def user_joined_group(cls, group, user):\n text = \"{} joined the group chat\".format(user.username)\n cls._broadcast_group(group, None, group, text)", "async def receive(self, text_data):\n if self.user and not self.user.is_authenticated:\n return\n\n text_data_json = json.loads(text_data)\n message = text_data_json['message']\n\n full_name = \"{} {}\".format(self.user.first_name, self.user.last_name)\n if full_name == \" \":\n full_name = \"--\"\n\n try:\n room = Rooms.objects.get(name=self.room_name)\n except Rooms.DoesNotExist:\n return\n\n chat_object = Chat.objects.create(user_id=self.user.id, message=message, room=room)\n\n created_at = chat_object.created_at.strftime('%H:%M:%S %Y/%m/%d')\n\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n 'type': \"chat_message\",\n 'message': message,\n 'user_id': self.user.id,\n 'publisher_full_name': full_name,\n 'created_at': created_at,\n }\n )", "def broadcast(self, msg):\n for client in self.clients.values():\n send_data(client.socket, msg)", "def broadcast(self,message_type,message):\n for socket in self.connections:\n if socket != self.server_socket:\n self.sendToSocket(socket,message_type,message)", "def send_to_all(self, msg: str) -> None:\n print(f'{self._name} sends a public message: {msg}')\n self._group.publish_message(sender=self, msg=msg)", "def chat(self, p: str):\n message = p[1:]\n if message.strip() != '':\n message: str = f\"{self.username} says: {message[:80]}\"\n self.broadcast(packet.construct_log_packet(message), include_self=True)\n self.logger.log(message)", "async def chat_message(self, event):\n await self.send(\n {'type': \"websocket.send\",\n 'text': event['response_data']}\n )", "async def message(self, message):\n\t\tif not self.group:\n\t\t\traise exceptions.ClientError('NO_GROUP')\n\n\t\tsanitized_message = utilities.sanitize_string(message)\n\n\t\tif not (0 < len(sanitized_message) < 100):\n\t\t\traise exceptions.ClientError('INVALID_MESSAGE')\n\n\t\tif self.group.game.in_progress and len(self.group.game.rounds) > 0:\n\t\t\tcurrent_round = self.group.game.rounds[-1]\n\t\t\tif current_round.answerer == self and not current_round.finished:\n\t\t\t\tfor index, data in enumerate(current_round.words):\n\t\t\t\t\tword = data['word']\n\t\t\t\t\tif word.lower().strip() == sanitized_message.lower().strip():\n\t\t\t\t\t\tawait current_round.answer(word)\n\t\t\telif current_round.questioner == self:\n\t\t\t\traise exceptions.ClientError('CANT_MESSAGE')\n\n\t\tawait self.group.send(1, 'CHAT_MESSAGE', {\n\t\t\t'user': self.as_safe_dict(),\n\t\t\t'message': sanitized_message\n\t\t})", "def send_message(self, message):\n msg_bytes = (\n f'{self.username}{self.delimiter}{message}'\n ).encode('utf-8')\n self.socket.writeDatagram(\n qtc.QByteArray(msg_bytes),\n qtn.QHostAddress.Broadcast,\n self.port\n )", "def send_message(self, message):\n \n msgPacket = serverbound.play.ChatPacket()\n msgPacket.message = message\n self.connection.write_packet(msgPacket)", "def send(self, msg):\n self.message('Me', msg)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\r\n for sock in clients:\r\n sock.send(bytes(prefix, \"utf8\")+msg)", "def send_chat_message(self, channel, message):\r\n self._send(\"PRIVMSG #{0} :{1}\".format(channel, message))", "async def on_message(self, msg: Message):\n try:\n cmsg = await WechatyMessage(msg)\n except NotImplementedError as e:\n logger.debug(\"[WX] {}\".format(e))\n return\n except Exception as e:\n logger.exception(\"[WX] {}\".format(e))\n return\n logger.debug(\"[WX] message:{}\".format(cmsg))\n room = msg.room() # 获取消息来自的群聊. 如果消息不是来自群聊, 则返回None\n isgroup = room is not None\n ctype = cmsg.ctype\n context = self._compose_context(ctype, cmsg.content, isgroup=isgroup, msg=cmsg)\n if context:\n logger.info(\"[WX] receiveMsg={}, context={}\".format(cmsg, context))\n self.produce(context)", "def handle_groupchat_invite(self, inv):\n logging.debug(\"MUC invite to %s from %s: %s\", inv['to'], inv[\"from\"], inv)\n if inv['from'] not in self.rooms.keys():\n self.xmpp.event(\"groupchat_invite\", inv)", "def chat(sock, msg):\r\n message = \"PRIVMSG {} :{}\\r\\n\".format(cfg.CHAN, msg)\r\n #print(\"Sending: \"+message)\r\n sock.send(message.encode(\"utf-8\"))", "def publish_message(self, sender, msg: str) -> None:\n for member in self._all_members.values():\n if member is not sender: # The sender himself should not receive the message.\n member.receive_message(sender_name=sender.name, msg=msg)", "def send_group_message(self, recipient_group_id: str, text: str, block: bool = False) -> None:\n payload = {\n \"type\": \"send\",\n \"username\": self.username,\n \"recipientGroupId\": recipient_group_id,\n \"messageBody\": text,\n }\n self._send_command(payload, block)", "def broadcast(self, clients, msg):\n self.server.broadcast(clients, msg)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\tfor sock in clients:\n\t\tsock.send(bytes(prefix, \"utf8\")+msg)", "def send(self, message, sender):\n chatclient.receive_chat_message(message, sender)\n return {}", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\n for sock in clients:\n sock.send(bytes(prefix, \"utf8\")+msg)", "def broadcast(self, msg):\n asyncio.run_coroutine_threadsafe(self.coro.broadcast(msg), self._robot._event_loop)", "async def send_room_message(self, room_id, message):\n print(\"PblicChatConsumer\", \"send_room_message\")\n user = self.scope[\"user\"]\n\n if self.room_id is not None:\n if str(room_id) != str(self.room_id):\n raise ClientError(\"ROOM_ACCESS_DENIED\", \"Room access denied\")\n elif not user.is_authenticated:\n raise ClientError(\"AUTH_ERRO\", \"Not authenticated to join\")\n else:\n raise ClientError(\"ROOM_ACCESS_DENIED\", \"Room access denied\")\n\n room: PublicChatRoom = await get_room_or_error(room_id)\n await create_new_public_room_chat(room, user, message)\n await self.channel_layer.group_send(\n room.group_name,\n {\n \"type\": \"chat.message\", # chat_message\n \"profile_image\": (user.profile_image.url\n if user.profile_image else None),\n \"username\": user.username,\n \"user_id\": user.id,\n \"message\": message,\n }\n )", "def broadcast_to_users(self, text: str, sending_group):\n if sending_group == \"global\":\n for user in self.__users.values():\n user.send_message(f\"broadcast from the server: {text}\")\n print(\"in broadcast to users global\")\n elif sending_group.isdigit():\n sending_group = int(sending_group)\n for user in self.__users.values():\n for station in user.stations:\n if station.line_number == sending_group:\n user.send_message(f\"broadcast from the server: {text}\")\n print(f\"in broadcast to users line{sending_group}\")", "def notify_channel(message):\n slack_client.api_call(\n \"chat.postMessage\",\n channel=\"#andela_socials\",\n text=message,\n as_user=True,\n reply_broadcast=True,\n )", "def new_message(self, room, mess):\n pass", "def broadcast(self, msg_type, msg, t=5):\n return None", "def sendMessage(sock, message):\n messageTemp = \"PRIVMSG \" + channel +\" :\" +message\n sock.send((messageTemp+ \"\\n\").encode())", "def joined(message):\n room = session.get('room')\n id = message[\"id\"]\n date = message[\"date\"]\n mid = session.get('mid')\n join_room(room)\n\n\n \"\"\"Load chat history and broadcast to the user that just joined\"\"\"\n #load history\n #save to mongo\n client = MongoClient(\"mongodb+srv://save_info:[email protected]/chat?retryWrites=true&w=majority\")\n\n #sort by descending order of creation\n\n #get chatroom info\n\n db = client.get_database(\"chat_rooms\")\n chat = db[\"chat_rooms\"]\n query = {\"roomId\": room, \"mid\" : {\"$ne\": int(mid)}}\n chat_rooms = db[\"chat_rooms\"]\n chat_room = list(chat_rooms.find(query, {'_id': False}).limit(1))[0]\n partner_name = chat_room[\"name\"]\n partner_mid = chat_room[\"mid\"]\n\n query = {\"roomId\": room, \"mid\" : int(mid)}\n chat_room = list(chat_rooms.find(query, {'_id': False}).limit(1))[0]\n chat_link = chat_room[\"chat_link\"]\n\n emit('details', {'roomId': room, \"name\": session.get(\"name\"), \"mid\": int(mid), \"chat_link\":chat_link, \"partner_name\": partner_name, \"partner_mid\": partner_mid}, room=id)\n\n db = client.get_database(\"chat\")\n chat = db[\"chat\"]\n query = {\"roomId\": room}\n\n chatHistory = list(chat.find(query, {'_id': False}).limit(50))\n if len(chatHistory) > 0:\n #load chat history\n\n emit('chatHistory', {'chatHistory': chatHistory}, room=id)\n\n\n #delete all scheduled notifications to this user in this chatroom\n scheduler = db[\"scheduler\"]\n query = {\"receiver\": int(mid)}\n scheduler.delete_many(query)", "def test_broadcast_message(self):\n\n typhoonae.websocket.broadcast_message('My broadcast message.')", "def sendmessage(user,gameid):\n message = request.form['message']\n channel.send_message(user+gameid,message)", "def notification_broadcast(actor, key, **kwargs):\n channel_layer = get_channel_layer()\n id_value = kwargs.pop(\"id_value\", None)\n recipient = kwargs.pop(\"recipient\", None)\n payload = {\n \"type\": \"receive\",\n \"key\": key,\n \"actor_name\": actor.username,\n \"id_value\": id_value,\n \"recipient\": recipient,\n }\n async_to_sync(channel_layer.group_send)(\"notifications\", payload)", "def player_team_broadcast(self, player_ip, *args):\r\n\t\ttry:\r\n\t\t\tmessage = args\r\n\t\t\tteam_type = self._teammates[player_ip] # KeyError\r\n\t\texcept KeyError:\t# Invaild player team\r\n\t\t\tself._comm_server.send_message(player_ip, \"send-team fail\")\r\n\t\telse:\r\n\t\t\tfrom_ID = self._teams[team_type].get_player_info_by_IP(player_ip).ID\r\n\r\n\t\t\tmsg_str = \"\"\r\n\t\t\tfor msg_block in message:\r\n\t\t\t\tmsg_str += \" \" + msg_block\r\n\r\n\t\t\tfor to_ip, team in self._teammates.items():\r\n\t\t\t\tif team is team_type and to_ip != player_ip:\r\n\t\t\t\t\tself._comm_server.send_message(to_ip, \"send-from {0}{1}\" \\\r\n\t\t\t\t\t\t.format(from_ID, msg_str))\r\n\r\n\t\t\tself._comm_server.send_message(player_ip, \"send-team ok\")", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\r\n\r\n for sock in clients:\r\n sock.send(bytes(prefix, \"utf8\") + msg)", "def broadcast(self, writer, message):\r\n for user in self.connection_pool:\r\n if user != writer:\r\n # We don't need to also broadcast to the user sending the message\r\n user.write(f\"{message}\\n\".encode())", "def send_message(userid):\n\tsc.api_call(\n\t\t\"chat.postMessage\",\n\t\tchannel=userid,\n\t\ttext=\"Hey there, just wanted to remind you to join <#CQCKS8UN6|secret-snowflake-fa19> by Wednesday night, if you want to participate in Secret Santa this year. It will be lots of fun!\",\n\t\tusername=\"Reminder\",\n\t\ticon_emoji=\":santa:\"\n\t)", "def sendchat(self, the_id, msg):\r\n the_id = Client.toroomid(the_id)\r\n self.tx_cmd(FCTYPE.CMESG, the_id, 0, 0, msg)\r\n #@TODO - Emote encoding\r", "def broadcast(message):\n for client in CLIENTS:\n client.send(message)", "def send_to(self, target, msg):\n\t\tif self.cid is None:\n\t\t\traise UsageError(\"Not in a group!\")\n\t\tidb, payload = msg[0], msg[1:]\n\t\tself.sendMessage(idb + chr(target) + payload, True)", "def new_message_from_conn(self, friend, msg):\n print(\"new_msg signal activated with friend\",friend,\"and msg\",msg)\n\n if not self.stack.get_child_by_name(friend):\n new_chat_window = chat_layout.ChatLayout(orientation=Gtk.Orientation.VERTICAL,friend=friend)\n new_chat_window.show_all()\n self.stack.add_titled(new_chat_window, friend, friend)\n\n child = self.move_to_child(friend)\n child.append_friend_text(msg)", "def async_pubnub_message(\n self, device_id: str, date_time: datetime, message: dict[str, Any]\n ) -> None:\n device = self.get_device_detail(device_id)\n activities = activities_from_pubnub_message(device, date_time, message)\n activity_stream = self.activity_stream\n assert activity_stream is not None\n if activities:\n activity_stream.async_process_newer_device_activities(activities)\n self.async_signal_device_id_update(device.device_id)\n activity_stream.async_schedule_house_id_refresh(device.house_id)", "def send_message(self, recipient: str, msg: str) -> None:\n print(f'{self._name} sends a message to {recipient}: {msg}')\n self._group.private_message(sender=self, recipient_name=recipient, msg=msg)", "def joined(message):\n #room = session.get('room')\n room='abc'\n join_room(room)\n #emit('status', {'msg': session.get('name') + ' has entered the room.' + message['msg']}, room=room)\n emit('status', {'msg': 'Yao has entered the room.'}, room=room)\n #emit('status', {'msg': 'Yao has entered the room.'}, room='room1')", "async def chat_message(self, event):\n\t\t# Send a message down to the client\n\t\tprint(\"DocumentChatConsumer: chat_message from user #\" + str(event))\n\t\ttimestamp = calculate_timestamp(timezone.now())\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"msg_type\": MSG_TYPE_MESSAGE,\n\t\t\t\t\"annotationId\": event['annotationId'],\n\t\t\t\t\"username\": event[\"username\"],\n\t\t\t\t\"user_id\": event[\"user_id\"],\n\t\t\t\t\"xfdfString\": event[\"message\"],\n\t\t\t\t\"natural_timestamp\": timestamp,\n\t\t\t},\n\t\t)", "def joingroup_command(update,context):\n update.message.reply_text('Want to chat with other CTF players or ask questions to admins? Use the following channel:\\r\\nhttps://t.me/joinchat/CYsj-xwzlFqIbQPPeo04bw')", "def chat(data):\n username, message_text, room = data['username'], data['message'], data['room']\n message = Message(username, message_text, int(room))\n db.session.add(message)\n db.session.commit()\n emit('response', {'username': username, 'message': {'id': message.id, 'text': message.text}}, room=room)", "async def on_chat_message(self, chat_message):\n pass", "def broadcast(mensagem, prefixo = \"\"):\n for sock in clients:\n sock.send(bytes(prefixo, \"utf8\") + mensagem)", "def send_message(groupID, clientID, clientMessage, timestamp): # Unfinish\n for socket in GROUP_LIST[groupID]:\n message = ''\n try :\n socket.send(message)\n except :\n # broken socket connection may be, chat client pressed ctrl+c for example\n socket.close()\n CONNECTION_LIST.remove(socket)", "def flash_msg(self, params):\n if params.has_key('receiver'): name = params['receiver']\n else: \n if self.participant: \n group = self.service.groupOfParticipant(self.participant)\n if group: \n member_avail = filter(lambda x:x.status == LISTEN and x.name != self.name,group.members)\n if member_avail:\n member = member_avail.pop()\n name = member.name\n else:\n self.notLoggedIn()\n return\n if params.has_key('text'): text = params['text']\n else: return\n\n logger.writeLog(\"%s@%s said:'%s'\" % (self.name,self.transport.hostname,text))\n \n if self.participant:\n msgMethod = self.participant.directMessage\n try:\n self.service.sendParticipants(self.name,\"botmsg\",{\"text\":text,\"sender\":self.name})\n msgMethod(name,text)\n except:\n self.receiveDirectCommand(\"msg\",{\"sender\":\"MsgServ\",\"text\":\"cant send text, probably there is no user to listen\"})\n else:\n self.notLoggedIn()", "def on_chat_message(self, message):\n if message['target'] == '':\n self.service.chat_all(message['text'], self.name)\n else:\n targets = list(filter(lambda p: p.name == message['target'], self.service.protocols))\n print(targets)\n if len(targets) == 1:\n target = targets[0]\n target.send_chat(message['text'], self.name, target.name, whisper=True)\n if self.name != target.name:\n self.send_chat(message['text'], self.name, target.name, whisper=True)\n else:\n log.warn(\"Trying to chat player {name}, but this player is not found!\",\n name=message['target'])", "def message_new(\n self,\n event: Dict[str, Any]\n ) -> NoReturn:\n event = event[\"object\"][\"message\"]\n msg = event[\"text\"].lstrip(\"/\")\n peer_id = event[\"peer_id\"]\n from_id = event[\"from_id\"]\n msg_id = event[\"conversation_message_id\"]\n\n if peer_id in self.messages_to_delete:\n peer = CHAT_ID_OFFSET + config.USERBOT_CHATS[peer_id]\n new_messages_to_delete = []\n ids = []\n\n for item in self.messages_to_delete[peer_id]:\n if item['date'] > datetime.now():\n new_messages_to_delete.append(item)\n else:\n ids.append(item['id'])\n\n if new_messages_to_delete:\n self.messages_to_delete[peer_id] = new_messages_to_delete\n else:\n self.messages_to_delete.pop(peer_id)\n\n if ids:\n self.userbot.delete_messages(ids, peer)\n\n user = self.data.get_user(from_id, self) if from_id > 0 else None\n\n messages = self.get_messages(event)\n selected_message = messages[0] if len(messages) == 1 else None\n selected_user = (\n self.data.get_user(selected_message['from_id'], self)\n if selected_message and selected_message['from_id'] > 0 else None)\n\n try:\n self.commands.process(\n msg, peer_id, from_id, messages, msg_id,\n user, selected_user)\n except Exception as e:\n print(e)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\n for sock in clients:\n sock.send(bytes(prefix) + msg)", "def __send_broadcast_to_users(self, sending_group=\"global\"):\n\n if sending_group == \"global\":\n data = self.__global_broadcast_entry.get()\n self.__global_broadcast_entry.delete(0, 'end')\n print(f\"broad casting data: {data}\")\n self.__telegram_controller.broadcast_to_users(data, sending_group = \"global\")\n\n elif sending_group == \"line\":\n line = self.__line_number_broadcast_entry.get()\n if len(line) >0 and line.isnumeric():\n data = self.__line_text_broadcast_entry.get()\n self.__line_text_broadcast_entry.delete(0, 'end')\n self.__line_number_broadcast_entry.delete(0, 'end')\n self.__telegram_controller.broadcast_to_users(data, sending_group=line)\n else:\n print(f\"line number must be a number, {line}\")\n else:\n print(f\"{sending_group} is an invalid sending group\")", "def _send(self, receiver_group, receiver_id, typ, msg):\n self._out.append((receiver_group, receiver_id, (typ, msg)))", "def post(self, request):\n # get message from request\n message = request.POST.get('message')\n # create message\n ChatMessage.objects.create(\n message=message, _from=request.user, _to=User.objects.get(pk=request.POST.get('user_id')))\n # return response\n return HttpResponse('success')", "def broadcast_data (sock, message):\n #Do not send the message to master socket and the client who has send us the message\n for socket in CONNECTION_LIST:\n if socket != server_socket and socket != sock :\n try :\n socket.send(message)\n except :\n # broken socket connection may be, chat client pressed ctrl+c for example\n socket.close()\n CONNECTION_LIST.remove(socket)", "def broadcast(self, txt):\n for chan in self.state['joinedchannels']:\n self.say(chan, txt)", "def test_group_notification_called(self):\n sender = self.create_user()\n thread = self.create_thread(sender=sender)\n newmessage = mommy.make(Message, thread=thread, sender=sender)\n send_message(newmessage.pk)\n self.groupnotify_mock.assert_called_with(newmessage.pk)", "def handle_groupchat_error_message(self, msg):\n self.xmpp.event('groupchat_message_error', msg)\n self.xmpp.event(\"muc::%s::message_error\" % msg['from'].bare, msg)" ]
[ "0.74668014", "0.73062366", "0.70124036", "0.6886197", "0.67108417", "0.6665333", "0.66605836", "0.6642074", "0.66353", "0.66307837", "0.66065943", "0.6557014", "0.65497166", "0.64740735", "0.6465451", "0.6459765", "0.64553064", "0.64509314", "0.6406889", "0.63932914", "0.6390402", "0.6384195", "0.63646096", "0.63460255", "0.634117", "0.6328908", "0.6323584", "0.6277235", "0.62738746", "0.62718785", "0.6261451", "0.62563527", "0.6245401", "0.6234614", "0.6225083", "0.61888766", "0.6174518", "0.6171737", "0.61661845", "0.6160117", "0.61572236", "0.61558676", "0.61476505", "0.6146857", "0.610579", "0.610562", "0.6103549", "0.6090029", "0.6084005", "0.6083736", "0.6077754", "0.60693496", "0.60672265", "0.60535336", "0.6045618", "0.6045269", "0.603967", "0.6036718", "0.6029038", "0.60248536", "0.60235775", "0.6019383", "0.6018881", "0.60127234", "0.60114825", "0.5984735", "0.59717274", "0.5962528", "0.5949214", "0.592464", "0.5920948", "0.5920538", "0.5918141", "0.5907913", "0.59001714", "0.5895096", "0.58845514", "0.588096", "0.5879432", "0.58687794", "0.5858407", "0.58541286", "0.5853179", "0.58516014", "0.58448464", "0.58413005", "0.584064", "0.5837042", "0.58363736", "0.5828829", "0.58091515", "0.5803904", "0.5799466", "0.57981277", "0.5785073", "0.5774495", "0.5771454", "0.57645434", "0.5747466", "0.5743142" ]
0.67548275
4
broadcast a new message to a user
def _broadcast_user(cls, sender, sender_sid, recipient, text, chat_id=None): # todo make this method async recipient_sid = cls.get_user_sid(recipient.id) if not recipient_sid: cls._cache_msg(sender.id, recipient.id, text, chat_id) return data = {'sender_id': sender.id, 'recipient_id': recipient.id, 'text': text, 'chat_id': chat_id or 'private', 'time': time()} app.socketio.emit('message', data, room=recipient_sid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(cls, user_id, message):\n # Find the subscription group for user.\n group = None if user_id is None else f\"user_{user_id}\"\n cls.broadcast(group=group, payload=message)", "def broadcast(msg):\r\n for user in clients:\r\n msg_client(msg, user)", "def broadcast(self, message):\n self._send('broadcast', message)", "def flash_broadcast(self,params):\n text = params['text']\n if self.participant:\n self.service.sendParticipants(self.name,'msg',{\"text\":text,\"sender\":self.name})\n else:\n self.notLoggedIn()", "def message(cls, user, message, context):\r\n q.enqueue(new_message_worker, args=(user, message, context), result_ttl=0)\r\n pass", "def message(message):\n\troom = session.get('room')\n\tprint('%s : message : %s' % (session, message['message']))\n\temit('_message', {'user_name': session.get('name'), 'message' : message['message']}, room=room, include_self=False)", "def broadcast(self, writer, message):\r\n for user in self.connection_pool:\r\n if user != writer:\r\n # We don't need to also broadcast to the user sending the message\r\n user.write(f\"{message}\\n\".encode())", "async def chat_message(self, event):\n if self.user and not self.user.is_authenticated:\n return\n\n user_id = event['user_id']\n message = event['message']\n created_at = event['created_at']\n publisher_full_name = event['publisher_full_name']\n\n await self.send(text_data=json.dumps({\n 'user_id': user_id,\n 'created_at': created_at,\n 'message': \"{}\".format(message),\n 'publisher_full_name': publisher_full_name,\n }))", "def chat(request):\n message = '{}: {}'.format(request.form['user'], request.form['message'])\n if message:\n ChatNamespace.broadcast('message', message)\n return Response()", "def send_message(userid):\n\tsc.api_call(\n\t\t\"chat.postMessage\",\n\t\tchannel=userid,\n\t\ttext=\"Hey there, just wanted to remind you to join <#CQCKS8UN6|secret-snowflake-fa19> by Wednesday night, if you want to participate in Secret Santa this year. It will be lots of fun!\",\n\t\tusername=\"Reminder\",\n\t\ticon_emoji=\":santa:\"\n\t)", "def _broadcast_message_to_users(self, message):\n self.logger.info(f\"Broadcasting message `{message}`\")\n for id, name in self.users.items():\n time.sleep(.1) # Telegram servers does not let you send more than 30 messages per second\n try:\n self.updater.bot.sendMessage(int(id), message)\n\n except BaseException as e:\n traceback.print_exc()\n self.logger.info(f'Failed to broadcast message to {name} due to {e}')", "async def new_message(self, message):\n user = self.scope['user']\n response_data = {\n 'message': message,\n 'username': user.get_full_name()\n }\n await self.create_chat_message(user, message)\n await self.channel_layer.group_send(\n self.conversation_name,\n {\n 'type': 'chat_message',\n 'response_data': json.dumps(response_data)\n }\n )", "def publish(self, info, user_id):\n del info\n event = {\"user_id\": user_id.value, \"payload\": self}\n\n return OnChatMessageSent(event=event)", "async def pm(self, ctx, user_id: int, *, message: str):\n user = discord.utils.get(ctx.bot.get_all_members(), id=user_id)\n e = discord.Embed(colour=discord.Colour.red(), description=message)\n\n if ctx.bot.user.avatar_url:\n e.set_author(\n name=f\"Message from {ctx.author} | {ctx.author.id}\",\n icon_url=ctx.bot.user.avatar_url,\n )\n else:\n e.set_author(name=f\"Message from {ctx.author} | {ctx.author.id}\")\n\n try:\n await user.send(embed=e)\n except discord.HTTPException:\n await ctx.send(\"Sorry, I couldn't deliver your message to {}\".format(user))\n else:\n await ctx.send(\"Message delivered to {}\".format(user))", "def send_message(self, message, user, msg_type=MSG_TYPE_MESSAGE):\n final_msg = {'room': str(self.id), 'message': message, 'username': user.username, 'msg_type': msg_type}\n\n # Send out the message to everyone in the room\n self.websocket_group.send(\n {\"text\": json.dumps(final_msg)}\n )", "def receiveMessage(self, user, message):\n pass", "def broadcast():\n # global receiving_message\n # if not receiving_message:\n router.broadcast(clients.copy(), json.dumps(current_state))", "def sendmessage(user,roomid):\n message = request.form['message']\n channel.send_message(user+roomid,message)", "async def chat_message(self, event):\n\n print(\"PublicChatConsumer\", \"chat_message from user\", event[\"user_id\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_MESSAGE,\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"user_id\": event[\"user_id\"],\n \"message\": event[\"message\"],\n \"natural_timestamp\": humanize_or_normal(timezone.now())\n })", "def send_message(self, message, user, msg_type=MSG_TYPE_MESSAGE):\n final_msg = {'room': str(self.id), 'message': message, 'user_id': str(user.id), 'nombre': user.nombre, 'apellidos': user.apellidos, 'msg_type': msg_type}\n mensaje = Mensaje(mensaje=message, emisor=user, room=self)\n mensaje.save()\n self.websocket_group.send({\n 'text': json.dumps({\n 'mensaje': final_msg,\n 'type': 'message',\n 'msg_type': msg_type\n })\n })", "def broadcast(self, name, msg, color='yellow'):\n name = str(name)\n msg = str(msg)\n self.output.broadcasts.append({\n 'name': name,\n 'msg': msg,\n 'color': str(color),\n 'botname': self._botname,\n 'botowner': self._botowner,\n })\n self._set_lastsaid('[BROADCAST] {0}: {1}'.format(name, msg))", "def broadcast(update: Update, context: CallbackContext) -> None:\n \n if str(update.message.chat_id) == str(ADMIN_CONVERSATION_ID):\n update_string = update.message.text[11:]\n logger.info(\"Admin did a broadcast of \" + str(update_string))\n users_list = users_table.all()\n for user in users_list:\n if user['subscribed'] == \"True\":\n try:\n context.bot.send_message(user['user'], parse_mode='HTML', text=update_string)\n logger.info(\"Broadcasted message to user \" + str(user['user']))\n except:\n e = sys.exc_info()[0]\n logger.info(str(e))\n logger.info(\"Got an exception sending message to \" + str(user['user']))", "def websock_message(self, user, client, message):\n service = client.service\n self.websock_handlers[service]['new_message'](user, client, message)\n return", "def new_chat_message(cls, chatroom, text, sender):\n cls.broadcast(\n group=chatroom,\n payload={\"chatroom\": chatroom, \"text\": text, \"sender\": sender},\n )", "async def broadcast(self, msg):\n if not self._session:\n await self._create_session()\n \n if isinstance(msg, str):\n msg = Message(msg)\n assert isinstance(msg, Message)\n msg.set_recipient(-1)\n msg.set_sender(self._network._robot.id)\n await self._session.put(self._network.SERVER_ADDR + '/api/send', json=msg.to_dict())\n return msg", "def sendmessage(user,gameid):\n message = request.form['message']\n channel.send_message(user+gameid,message)", "def test_broadcast_message(self):\n\n typhoonae.websocket.broadcast_message('My broadcast message.')", "def SendMessage(service, user_id, message):\n\n message_resp = (service.users().messages().send(userId=user_id, body=message).execute())\n print(\"Sucessfull!!! \", message_resp)", "def mutate(self, info, message, user_id=None):\n del info\n assert self is None, \"Root `self` expected to be `None`!\"\n\n # Notify subscribers.\n OnChatMessageSent.notify(message=message, user_id=user_id)\n\n return SendChatMessage.Output(message=message, user_id=user_id)", "def onMessage(self):\n \"\"\"\n Validates that the received message is from a student and then broadcasts the message to the rest of the class.\n\n @param self: self is the instance of this object.\n @param message: the message that is received\n @param student: the student that sent the message\n \"\"\"\n pass", "def broadcast(self, msg):\n asyncio.run_coroutine_threadsafe(self.coro.broadcast(msg), self._robot._event_loop)", "def post(self, request):\n # get message from request\n message = request.POST.get('message')\n # create message\n ChatMessage.objects.create(\n message=message, _from=request.user, _to=User.objects.get(pk=request.POST.get('user_id')))\n # return response\n return HttpResponse('success')", "def notify_user(message, slack_id):\n return slack_client.api_call(\n \"chat.postMessage\",\n channel=slack_id,\n text=message,\n as_user=True,\n reply_broadcast=True,\n )", "def broadcast(self, message):\n for s in self.connections:\n s.send(bytes(message, encoding='utf-8'))", "def new_msg(cls, sender_id, recipient_id, text):\n\n sender = User.find(id=sender_id)\n sender_sid = cls.get_user_sid(sender.id)\n\n if is_group(recipient_id):\n\n recipient_group = Group.find(id=recipient_id)\n\n if not recipient_group:\n raise Exception('recipient was not found')\n if not recipient_group.has_user(sender):\n raise Exception('user is not a member of this group')\n\n cls._broadcast_group(sender, sender_sid,\n recipient_group, text)\n\n elif is_user(recipient_id):\n\n recipient = User.find(id=recipient_id)\n if not sender.is_friends(recipient):\n raise Exception('user is not friends with recipient')\n\n if recipient.blocked(sender):\n raise Exception('recipient has blocked you')\n\n if not recipient:\n raise Exception('recipient was not found')\n\n cls._broadcast_user(sender, sender_sid, recipient,\n text)\n\n else:\n\n raise Exception('bad recipient id')", "def send_whisper_message(self, channel, user, message):\r\n self._send(\"PRIVMSG #{0} :/w {1} {2}\".format(channel, user, message))", "def whisper(sock, user, msg):\r\n chat(sock, \"/w {} {}\".format(user, msg))", "def broadcast(msg):\n\n for sock in clients:\n sock.send(bytes(msg, \"utf-8\"))", "def send(self, msg):\n self.message('Me', msg)", "def privmsg(self, user, channel, incoming_message):\n sendTo = channel\n incoming_message = incoming_message.lower()\n\n if sendTo:\n response = get_response(incoming_message, user)\n if response:\n self.msg(sendTo, response)\n log.msg(\n \"sent message to {receiver}, triggered by {sender}:\\n\\t{quote}\"\n .format(receiver=sendTo, sender=sendTo, quote=None)\n )", "def broadcast(msg, prefix=\"\",ChatRoom=None): # prefix is for name identification. \n if not ChatRoom == None :\n for sock,name in ChatRooms[ChatRoom]:\n sock.send(bytes(prefix, \"utf8\")+msg)", "async def broadcast(self):\n with await self.redis as connection:\n await connection.execute_pubsub(\"subscribe\", self.channel)\n try:\n while True:\n room = await self.channel.get(encoding=\"utf-8\")\n await self.ws.send(message)\n except websockets.ConnectionClosed as e:\n print(f\"<ChatManager:broadcast>[error] {e}\")\n await self.connection_closed()", "def broadcast(self, clients, msg):\n self.server.broadcast(clients, msg)", "async def message(self, ctx:utils.Context, user:discord.User, *, content:str):\n\n await user.send(content)", "async def chat_message(self, event):\n await self.send_json(\n return_value(\n ACTION_MESSAGE,\n event['label'],\n event['username'],\n MSG_MESSAGE,\n event['message']\n )\n )", "def on_message(self, message):\n #print(f\"This message was sent: {message}\") # Writes to the console window (server side)\n self.write_message(f\"This message was sent: {message}\") # Writes message to sender", "def broadcast_message(user_ids, message, entities=None, sleep_between=0.4, parse_mode=None):\n logger.info(f\"Going to send message: '{message}' to {len(user_ids)} users\")\n\n for user_id in user_ids:\n try:\n send_message(user_id=user_id, text=message, entities=entities, parse_mode=parse_mode)\n logger.info(f\"Broadcast message was sent to {user_id}\")\n except Exception as e:\n logger.error(f\"Failed to send message to {user_id}, reason: {e}\" )\n time.sleep(max(sleep_between, 0.1))\n\n logger.info(\"Broadcast finished!\")", "def send_user_message(self, channel_id, message):\n self.slack_client.api_call('chat.postMessage', as_user='true', channel=channel_id, text=message)", "async def _dm(self, ctx, user: str, *, message: str = None):\n if user is None:\n await ctx.send(\"Provided no user to search for.\")\n return\n else:\n try:\n user = ctx.guild.get_member_named(user)\n if user is None:\n user = ctx.guild.get_member(int(user))\n except Exception as e:\n await ctx.send(f\"Failed to fetch user: {e}\")\n \n if user is None:\n await ctx.send(f\"Failed to find that user: {user}\")\n return\n\n if user.bot is True:\n await ctx.send(\"I cannot send messages to other bots pandejo.\")\n return\n\n if not user.dm_channel:\n await user.create_dm()\n try:\n e = discord.Embed(description=message, color=discord.Colour.blurple())\n e.set_author(name=f\"Message from {ctx.author}!\", icon_url=ctx.author.avatar_url)\n e.set_footer(text=f\"Sent at {arrow.now(tz='US/Eastern').strftime('%X')} EST\", icon_url=ctx.bot.user.avatar_url)\n await user.send(embed=e)\n await ctx.send(f\"Sent your message to {user}.\")\n except Exception as e:\n await ctx.send(f\"Failed to send message to {user}. {e}\")", "async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })", "async def relay_message(message, current_user=None):\n users = [\n send_message(user, message)\n for user in app['websockets']\n if user is not current_user\n ]\n if not users:\n return\n\n await wait(users)", "def new_message(self, room, mess):\n pass", "def broadcast(self, msg_type, msg, t=5):\n return None", "def notify_user_message(self, user, session_id, message):\n if message is None:\n self.log.warning(\"None message received\")\n raise NotificationError(\"None message received\")\n\n # Message must be in the form \"CHAT|<message>\".\n msg_tokens = message.split('|')\n if len(msg_tokens) != 2 or msg_tokens[0] != \"CHAT\":\n self.log.warning(\"Wrong message received\")\n raise NotificationError(\"Wrong message received\")\n\n # Retrieves the user session info associated with the session_id.\n with self.sessions_lock:\n session_info = self.sessions.get(session_id)\n if session_info is None:\n raise NotificationError((\"Session lost! Please reload the \"\n \"browser page(s).\"))\n\n # Extracts from the info the IP and the user agent, to identify the\n # originator of the # message.\n ipaddress = session_info[\"REMOTE_IP\"]\n user_agent = session_info[\"USER_AGENT\"]\n\n # Sends the message to be pushed to the browsers.\n self.data_adapter.send_message(ipaddress, user_agent, msg_tokens[1])", "def broadcast_message(msg: str):\r\n\tfor ip in _clients.keys():\r\n\t\tsend_message(ip, msg)", "def broadcast(self,message_type,message):\n for socket in self.connections:\n if socket != self.server_socket:\n self.sendToSocket(socket,message_type,message)", "def send_message(self, message):\n msg_bytes = (\n f'{self.username}{self.delimiter}{message}'\n ).encode('utf-8')\n self.socket.writeDatagram(\n qtc.QByteArray(msg_bytes),\n qtn.QHostAddress.Broadcast,\n self.port\n )", "def broadcast(bot, event, *args):\n if args:\n subcmd = args[0]\n parameters = args[1:]\n if subcmd == \"info\":\n \"\"\"display broadcast data such as message and target rooms\"\"\"\n conv_info = [\"<b>{}</b> ... {}\".format(get_conv_name(_), _.id_) for _ in _internal[\"broadcast\"][\"conversations\"]]\n if not _internal[\"broadcast\"][\"message\"]:\n bot.send_message_parsed(event.conv, _(\"broadcast: no message set\"))\n return\n if not conv_info:\n bot.send_message_parsed(event.conv, _(\"broadcast: no conversations available\"))\n return\n bot.send_message_parsed(event.conv, _(\n \"<b>message:</b><br />\"\n \"{}<br />\"\n \"<b>to:</b><br />\"\n \"{}\".format(_internal[\"broadcast\"][\"message\"],\n \"<br />\".join(conv_info))))\n elif subcmd == \"message\":\n \"\"\"set broadcast message\"\"\"\n message = ' '.join(parameters)\n if message:\n if message.lower().strip().startswith(tuple([_.lower() for _ in bot._handlers.bot_command])):\n bot.send_message_parsed(event.conv, _(\"broadcast: message not allowed\"))\n return\n _internal[\"broadcast\"][\"message\"] = message\n else:\n bot.send_message_parsed(event.conv, _(\"broadcast: message must be supplied after subcommand\"))\n elif subcmd == \"add\":\n \"\"\"add conversations to a broadcast\"\"\"\n if parameters[0] == \"groups\":\n \"\"\"add all groups (chats with users > 2)\"\"\"\n for conv in bot.list_conversations():\n if len(conv.users) > 2:\n _internal[\"broadcast\"][\"conversations\"].append(conv)\n elif parameters[0] == \"ALL\":\n \"\"\"add EVERYTHING - try not to use this, will message 1-to-1s as well\"\"\"\n for conv in bot.list_conversations():\n _internal[\"broadcast\"][\"conversations\"].append(conv)\n else:\n \"\"\"add by wild card search of title or id\"\"\"\n search = \" \".join(parameters)\n for conv in bot.list_conversations():\n if search.lower() in get_conv_name(conv).lower() or search in conv.id_:\n _internal[\"broadcast\"][\"conversations\"].append(conv)\n _internal[\"broadcast\"][\"conversations\"] = list(set(_internal[\"broadcast\"][\"conversations\"]))\n bot.send_message_parsed(event.conv, _(\"broadcast: {} conversation(s)\".format(len(_internal[\"broadcast\"][\"conversations\"]))))\n elif subcmd == \"remove\":\n if parameters[0].lower() == \"all\":\n \"\"\"remove all conversations from broadcast\"\"\"\n _internal[\"broadcast\"][\"conversations\"] = []\n else:\n \"\"\"remove by wild card search of title or id\"\"\"\n search = \" \".join(parameters)\n removed = []\n for conv in _internal[\"broadcast\"][\"conversations\"]:\n if search.lower() in get_conv_name(conv).lower() or search in conv.id_:\n _internal[\"broadcast\"][\"conversations\"].remove(conv)\n removed.append(\"<b>{}</b> ({})\".format(get_conv_name(conv), conv.id_))\n if removed:\n bot.send_message_parsed(event.conv, _(\"broadcast: removed {}\".format(\", \".join(removed))))\n elif subcmd == \"NOW\":\n \"\"\"send the broadcast - no turning back!\"\"\"\n context = { \"explicit_relay\": True } # prevent echos across syncrooms\n for conv in _internal[\"broadcast\"][\"conversations\"]:\n bot.send_message_parsed(conv, _internal[\"broadcast\"][\"message\"], context=context)\n bot.send_message_parsed(event.conv, _(\"broadcast: message sent to {} chats\".format(len(_internal[\"broadcast\"][\"conversations\"]))))\n else:\n bot.send_message_parsed(event.conv, _(\"broadcast: /bot broadcast [info|message|add|remove|NOW] ...\"))\n else:\n bot.send_message_parsed(event.conv, _(\"broadcast: /bot broadcast [info|message|add|remove|NOW]\"))", "def __sendMessage(self):\n # TODO: Switch to this when implemented\n \n msg = self.ui.inputWidget.toPlainText()\n self.ui.inputWidget.clear()\n strv = StringView()\n strv.appendText(unicode(msg))\n self._amsn_conversation.sendMessage(strv)\n self.ui.textEdit.append(\"<b>/me says:</b><br>\"+unicode(msg)+\"\")", "def notify(guid, message):", "def on_me_joined(self, raw_msg, **kwargs):", "def broadcast(self, msg, mtype = 'message', back = True):\n for p in DixitConnection.participants:\n if back or (DixitConnection.participants[p] != self):\n DixitConnection.participants[p].emit(mtype, msg)", "def post(self, request):\n # get message from request\n message = request.POST.get('message')\n # create message\n\n ChatMessage.objects.create(\n message=message, _from=request.user, _to=request.user.client.trainer.user)\n # return response\n return HttpResponse('success')", "def broadcast(message):\n for client in CLIENTS:\n client.send(message)", "def broadcast(self, message, *args):\n\t\tmethod = getattr(self, message, None)\n\t\tif method:\n\t\t\tmethod(*args)", "def subscribe( self, mess, args):\n user = self.get_sender_username(mess)\n if user in self.users:\n return 'You are already subscribed.'\n else:\n self.users[user] = user\n self.invited.pop(user)\n self.message_queue.append('_%s has joined the channel_' % user)\n self.log.info('%s subscribed to the broadcast.' % user)\n self.save_state()\n return 'You are now subscribed.'", "def notify_channel(message):\n slack_client.api_call(\n \"chat.postMessage\",\n channel=\"#andela_socials\",\n text=message,\n as_user=True,\n reply_broadcast=True,\n )", "def on_message(self, message):\n print \"Client %s received a message : %s\" % (self.id, message)", "def message_routed(self, message):\n \n # Send it through the transport\n self.send_message(message = message)", "def on_message(self, message):\n self.write_message(u\"%s\" % message)", "def broadcast(bot, event, *args):\n if args:\n subcmd = args[0]\n parameters = args[1:]\n if subcmd == \"info\":\n \"\"\"display broadcast data such as message and target rooms\"\"\"\n\n conv_info = [ \"<b><pre>{}</pre></b> ... <pre>{}</pre>\".format(bot.conversations.get_name(convid), convid) \n for convid in _internal[\"broadcast\"][\"conversations\"] ]\n\n if not _internal[\"broadcast\"][\"message\"]:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: no message set\"))\n return\n\n if not conv_info:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: no conversations available\"))\n return\n\n yield from bot.coro_send_message(event.conv, _(\n \"<b>message:</b><br />\"\n \"{}<br />\"\n \"<b>to:</b><br />\"\n \"{}\".format(_internal[\"broadcast\"][\"message\"],\n \"<br />\".join(conv_info))))\n\n elif subcmd == \"message\":\n \"\"\"set broadcast message\"\"\"\n message = ' '.join(parameters)\n if message:\n if message.lower().strip().startswith(tuple([_.lower() for _ in bot._handlers.bot_command])):\n yield from bot.coro_send_message(event.conv, _(\"broadcast: message not allowed\"))\n return\n _internal[\"broadcast\"][\"message\"] = message\n\n else:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: message must be supplied after subcommand\"))\n\n elif subcmd == \"add\":\n \"\"\"add conversations to a broadcast\"\"\"\n if parameters[0] == \"groups\":\n \"\"\"add all groups (chats with users > 1, bot not counted)\"\"\"\n for convid, convdata in bot.conversations.get().items():\n if(len(convdata[\"participants\"]) > 1):\n _internal[\"broadcast\"][\"conversations\"].append(convid)\n\n elif parameters[0] == \"ALL\":\n \"\"\"add EVERYTHING - try not to use this, will message 1-to-1s as well\"\"\"\n for convid, convdata in bot.conversations.get().items():\n _internal[\"broadcast\"][\"conversations\"].append(convid)\n\n else:\n \"\"\"add by wild card search of title or id\"\"\"\n search = \" \".join(parameters)\n for convid, convdata in bot.conversations.get().items():\n if search.lower() in convdata[\"title\"].lower() or search in convid:\n _internal[\"broadcast\"][\"conversations\"].append(convid)\n\n _internal[\"broadcast\"][\"conversations\"] = list(set(_internal[\"broadcast\"][\"conversations\"]))\n yield from bot.coro_send_message(event.conv, _(\"broadcast: {} conversation(s)\".format(len(_internal[\"broadcast\"][\"conversations\"]))))\n\n elif subcmd == \"remove\":\n if parameters[0].lower() == \"all\":\n \"\"\"remove all conversations from broadcast\"\"\"\n _internal[\"broadcast\"][\"conversations\"] = []\n\n else:\n \"\"\"remove by wild card search of title or id\"\"\"\n search = \" \".join(parameters)\n removed = []\n for convid in _internal[\"broadcast\"][\"conversations\"]:\n if search.lower() in bot.conversations.get_name(convid).lower() or search in convid:\n _internal[\"broadcast\"][\"conversations\"].remove(convid)\n removed.append(\"<b><pre>{}</pre></b> (<pre>{}</pre>)\".format(bot.conversations.get_name(convid), convid))\n\n if removed:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: removed {}\".format(\", \".join(removed))))\n\n elif subcmd == \"NOW\":\n \"\"\"send the broadcast - no turning back!\"\"\"\n context = { \"explicit_relay\": True } # prevent echos across syncrooms\n for convid in _internal[\"broadcast\"][\"conversations\"]:\n yield from bot.coro_send_message(convid, _internal[\"broadcast\"][\"message\"], context=context)\n yield from bot.coro_send_message(event.conv, _(\"broadcast: message sent to {} chats\".format(len(_internal[\"broadcast\"][\"conversations\"]))))\n\n else:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: /devilbot broadcast [info|message|add|remove|NOW] ...\"))\n\n else:\n yield from bot.coro_send_message(event.conv, _(\"broadcast: /devilbot broadcast [info|message|add|remove|NOW]\"))", "def send_message(user_id, message):\r\n try:\r\n service = get_service('token.pickle')\r\n message = (service.users().messages().send(userId=user_id, body=message).execute())\r\n print('Message Id: %s' % message['id'])\r\n return message\r\n\r\n except errors.HttpError as error:\r\n print('An error occurred: %s' % error)", "async def chat_message(self, event):\n await self.send(\n {'type': \"websocket.send\",\n 'text': event['response_data']}\n )", "def sendMessage(self, message):\n\t\tm = domish.Element((None, 'message'))\n\t\tm['from'] = self.jid\n\t\tm['to'] = self.room\n\t\tm['type'] = 'groupchat'\n\t\tm.addElement('body', content = message)\n\t\tself.xmlstream.send(m)", "async def chat_message(self, event):\n\t\t# Send a message down to the client\n\t\tprint(\"DocumentChatConsumer: chat_message from user #\" + str(event))\n\t\ttimestamp = calculate_timestamp(timezone.now())\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"msg_type\": MSG_TYPE_MESSAGE,\n\t\t\t\t\"annotationId\": event['annotationId'],\n\t\t\t\t\"username\": event[\"username\"],\n\t\t\t\t\"user_id\": event[\"user_id\"],\n\t\t\t\t\"xfdfString\": event[\"message\"],\n\t\t\t\t\"natural_timestamp\": timestamp,\n\t\t\t},\n\t\t)", "def flash_msg(self, params):\n if params.has_key('receiver'): name = params['receiver']\n else: \n if self.participant: \n group = self.service.groupOfParticipant(self.participant)\n if group: \n member_avail = filter(lambda x:x.status == LISTEN and x.name != self.name,group.members)\n if member_avail:\n member = member_avail.pop()\n name = member.name\n else:\n self.notLoggedIn()\n return\n if params.has_key('text'): text = params['text']\n else: return\n\n logger.writeLog(\"%s@%s said:'%s'\" % (self.name,self.transport.hostname,text))\n \n if self.participant:\n msgMethod = self.participant.directMessage\n try:\n self.service.sendParticipants(self.name,\"botmsg\",{\"text\":text,\"sender\":self.name})\n msgMethod(name,text)\n except:\n self.receiveDirectCommand(\"msg\",{\"sender\":\"MsgServ\",\"text\":\"cant send text, probably there is no user to listen\"})\n else:\n self.notLoggedIn()", "def message(self, msg):\n if msg['type'] in ('chat', 'normal'):\n msg.reply(\"Thanks for sending\\n%(body)s\" % msg).send()", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\r\n for sock in clients:\r\n sock.send(bytes(prefix, \"utf8\")+msg)", "def myself(self, mess, args):\n user = self.get_sender_username(mess)\n if user in self.users:\n self.message_queue.append('_%s %s_' % (self.users[user], args))\n self.log.info( '%s says %s in third person.' % (user, args))", "def recieved_message(json, methods=['GET', 'POST']):\n json['username'] = session['username']\n socketio.emit('server message', json)\n message = Message(\n user_id = session['user_id'],\n room_id = json[\"room_id\"],\n sendTime = datetime.now(),\n content = json[\"content\"]\n )\n db.session.add(message)\n db.session.commit()", "def joined(message):\n\tglobal GLOBAL_NUM_USERS\n\tGLOBAL_NUM_USERS = GLOBAL_NUM_USERS + 1\n\tprint(message)\n\tsession['name'] = message['name']\n\tsession['room'] = message['room']\n\troom = session.get('room')\n\tjoin_room(room)\n\tprint('%s : joined' % session)\n\temit('_joined', {'user_name': session.get('name'), 'num_users' : GLOBAL_NUM_USERS}, room=room)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\tfor sock in clients:\n\t\tsock.send(bytes(prefix, \"utf8\")+msg)", "def consume_user_message(self, message):\n pass", "def send_message(self, text):\n self.redis.publish('message_to_user', json.dumps((self.operator_token, text)))", "def start_message(self, update, context):\n\n user = self.User(update)\n output = \"Greetings, we're happy that you decided to join and use the Bus4U service!\\n\" \\\n \"in order to see all the possible commands you can type /help\\n\" \\\n \"Also we want you to know that every command that you type and the server response will\" \\\n \"be logged and you can access your history with /history.\\n\\n\" \\\n \"we hope you'll enjoy the product and wish you the best.\\n Never Miss a Bus.\"\n user.send_message(output)\n self.data_base.log(user, \"*Showed Greeting Message*\")", "def message(cls, user, message, context):\n q.enqueue(foo, args=(user, message, context), result_ttl=0)\n pass", "def on_message(self, mid, author_id, author_name, message, metadata):\n message_type = \"user\"\n thread_id = metadata[\"delta\"][\"messageMetadata\"][\"threadKey\"]\n # print(\"%s said (in %s): %s\" % (author_id, thread_id, message))\n if \"threadFbId\" in thread_id:\n thread_id = thread_id[\"threadFbId\"]\n message_type = \"group\"\n else:\n thread_id = thread_id[\"otherUserFbId\"]\n\n print(message)\n sys.stdout.flush()\n if message.lower() == \"help\":\n response = [self.__doc__] + [mod.__doc__ for mod in self.modules]\n self.send(thread_id, \"\\n\".join(response), message_type=message_type)\n else:\n for mod in self.modules:\n response = mod.parse_message(message, author_id, self.uid,\n thread_id, metadata)\n if response is not None:\n self.send(thread_id, response, message_type=message_type)\n break", "def send_message(self, message):\n source_guid = str(uuid.uuid1())\n date = time.strftime(\"%H:%M:%S\")\n self.api.send_message(\n self.conversation_type,\n self.cid,\n source_guid,\n message[:1000]\n )\n if self.api.send_message(self.conversation_type, self.cid, source_guid, message):\n self.append_message(source_guid, 'me', date, message[:1000])\n if len(message) > 1000:\n self.send_message(message[1000:])", "def send_message(service, user_id, message):\r\n try:\r\n message = (service.users().messages().send(userId=user_id, body=message)\r\n .execute())\r\n print('Message Id: %s' % message['id'])\r\n return message\r\n except:\r\n print('An error occurred')", "def client(self,message):\n self.message = message\n self.run()", "def send_message(username, message):\n add_messages(username, message)\n return redirect(username)", "def broadcast(message):\n waiting = []\n try:\n while True:\n waiting.append(BROADCAST_QUEUE.get(block=False))\n except Empty:\n pass\n print('Broadcasting {} messages'.format(len(waiting)))\n for item in waiting:\n item.set(message)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\n for sock in clients:\n sock.send(bytes(prefix, \"utf8\")+msg)", "def send_message(service, user_id, message):\n try:\n message = (service.users().messages().send(userId=user_id, body=message).execute())\n print(\"Message Id: %s\" % message['id'])\n return message\n except:\n print(\"An error occurred.\")", "def broadcast(self, msg):\n for client in self.clients.values():\n send_data(client.socket, msg)", "def on_message(data):\n pass", "def listen_channel_moderator_add(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:\n return self._subscribe('channel.moderator.add',\n '1',\n {'broadcaster_user_id': broadcaster_user_id},\n callback)", "def key( self, mess, args):\n user = mess.getFrom()\n if user in self.users:\n return 'You are already subscribed.'\n else:\n self.users[user] = args\n self.log( '%s subscribed to the broadcast.' % user)\n return 'You are now subscribed.'", "def chatReceiveMessage(self, chat, user, message):\n self.on_message(user, message, False, False, chat)", "def message_new(\n self,\n event: Dict[str, Any]\n ) -> NoReturn:\n event = event[\"object\"][\"message\"]\n msg = event[\"text\"].lstrip(\"/\")\n peer_id = event[\"peer_id\"]\n from_id = event[\"from_id\"]\n msg_id = event[\"conversation_message_id\"]\n\n if peer_id in self.messages_to_delete:\n peer = CHAT_ID_OFFSET + config.USERBOT_CHATS[peer_id]\n new_messages_to_delete = []\n ids = []\n\n for item in self.messages_to_delete[peer_id]:\n if item['date'] > datetime.now():\n new_messages_to_delete.append(item)\n else:\n ids.append(item['id'])\n\n if new_messages_to_delete:\n self.messages_to_delete[peer_id] = new_messages_to_delete\n else:\n self.messages_to_delete.pop(peer_id)\n\n if ids:\n self.userbot.delete_messages(ids, peer)\n\n user = self.data.get_user(from_id, self) if from_id > 0 else None\n\n messages = self.get_messages(event)\n selected_message = messages[0] if len(messages) == 1 else None\n selected_user = (\n self.data.get_user(selected_message['from_id'], self)\n if selected_message and selected_message['from_id'] > 0 else None)\n\n try:\n self.commands.process(\n msg, peer_id, from_id, messages, msg_id,\n user, selected_user)\n except Exception as e:\n print(e)" ]
[ "0.7328094", "0.72830236", "0.71688133", "0.7001179", "0.6999174", "0.69816566", "0.6812357", "0.6784205", "0.6777518", "0.6767178", "0.6728205", "0.6724078", "0.6669847", "0.6652716", "0.6629219", "0.6618152", "0.6594717", "0.6588109", "0.6582829", "0.65520984", "0.6547883", "0.6518425", "0.64669234", "0.6435723", "0.6418103", "0.63906485", "0.63905007", "0.6355097", "0.6334074", "0.6329063", "0.6328876", "0.6328411", "0.6312109", "0.63119555", "0.630575", "0.62994087", "0.6288603", "0.6287981", "0.6285852", "0.62838376", "0.6282567", "0.62427574", "0.62364215", "0.6230955", "0.6223655", "0.622274", "0.6213873", "0.6211348", "0.620611", "0.6190643", "0.61760134", "0.6173314", "0.61715704", "0.6169332", "0.6163397", "0.6154062", "0.61532754", "0.61517537", "0.6148036", "0.61425954", "0.61310023", "0.6127888", "0.61227685", "0.61069643", "0.6101172", "0.60989493", "0.6098373", "0.6094402", "0.6087082", "0.6085835", "0.6082751", "0.6070633", "0.6069621", "0.6066216", "0.6063278", "0.6061094", "0.605547", "0.6045732", "0.60395414", "0.6038761", "0.6021561", "0.6016036", "0.60143447", "0.601326", "0.60129285", "0.60125875", "0.6010304", "0.59886765", "0.598833", "0.59780496", "0.59733105", "0.5973183", "0.59668314", "0.59589136", "0.5958123", "0.5954479", "0.59529227", "0.5952909", "0.5950319", "0.59492" ]
0.71615946
3
cache a message that failed to be delivered
def _cache_msg(cls, sender_id, recipient_id, text, chat_id=None): # todo make this method async message = Message.new(sender_id, recipient_id, text, chat_id) return message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cache_message(self, comm_id, msg):\n if comm_id not in self._cached_messages:\n self._cached_messages[comm_id] = []\n self._cached_messages[comm_id].append(msg)", "def _mark_discarted_messages():\n\n max_retry_value = getattr(settings, \"DJMAIL_MAX_RETRY_NUMBER\", 3)\n queryset = models.Message.objects.filter(status=models.STATUS_FAILED,\n retry_count__gt=max_retry_value)\n return queryset.update(status=models.STATUS_DISCARTED)", "def _cache_response(self, packet):\n self.operator.update_message(packet.message_id, packet.from_node, packet.ret_parameters)", "def test_message_expiry(self):\n channel_layer.send(\"me_test\", {\"value\": \"blue\"})\n time.sleep(expiry_delay)\n channel, message = channel_layer.receive_many([\"me_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "async def test_cache_miss_sent_mod_alert_text(self):\n self.cog.previous_overwrites.get.return_value = None\n await self.cog._unsilence(self.text_channel)\n self.cog._mod_alerts_channel.send.assert_awaited_once()", "def _log_cache_miss(self, job: Job, task_hash: str, args_hash: str) -> None:\n task = self.task_registry.get(hash=task_hash)\n reason = self.backend.explain_cache_miss(task, args_hash)\n\n if not reason:\n # No reason could be determined.\n self.log(\n \"{action} Job {job}: Cannot determine reason for cache miss of \"\n \"'{task_name}()' (task_hash={task_hash}, args_hash={args_hash}).\".format(\n action=\"Miss\".ljust(JOB_ACTION_WIDTH),\n job=job.id[:8],\n task_name=task.fullname,\n task_hash=task_hash[:8],\n args_hash=args_hash[:8],\n )\n )\n\n elif reason[\"reason\"] == \"new_task\":\n self.log(\n \"{action} Job {job}: New task '{task_name}()' with previous arguments \"\n \"(task_hash={old_hash} --> {new_hash}, args_hash={args_hash}).\".format(\n action=\"Miss\".ljust(JOB_ACTION_WIDTH),\n job=job.id[:8],\n task_name=task.fullname,\n old_hash=reason[\"call_task_hash\"][:8],\n new_hash=task_hash[:8],\n args_hash=args_hash[:8],\n )\n )\n\n elif reason[\"reason\"] == \"new_args\":\n self.log(\n \"{action} Job {job}: Existing task '{task_name}()' is called with new arguments \"\n \"(task_hash={task_hash}, args_hash={old_hash} --> {new_hash}).\".format(\n action=\"Miss\".ljust(JOB_ACTION_WIDTH),\n job=job.id[:8],\n task_name=task.fullname,\n task_hash=task_hash[:8],\n old_hash=reason[\"call_args_hash\"][:8],\n new_hash=args_hash[:8],\n )\n )\n\n elif reason[\"reason\"] == \"new_call\":\n self.log(\n \"{action} Job {job}: New task '{task_name}()' is called with new arguments \"\n \"(task_hash={task_hash}, args_hash={args_hash}).\".format(\n action=\"Miss\".ljust(JOB_ACTION_WIDTH),\n job=job.id[:8],\n task_name=task.fullname,\n task_hash=task_hash[:8],\n args_hash=args_hash[:8],\n )\n )\n\n else:\n raise NotImplementedError(reason)", "def _do_force_cache_miss(self):\n for note in self.inspire_record.get(\"_private_notes\", []):\n if note.get(\"value\") == \"orcid-push-force-cache-miss\":\n LOGGER.debug(\n \"OrcidPusher force cache miss\", recid=self.recid, orcid=self.orcid\n )\n return True\n return False", "def error(self, tag, message, undeliverable=False):\n dst = self.abspath('undeliverable' if undeliverable else 'rejected',\n '%s.tmp' % message.id)\n with open(dst, 'wb') as f:\n f.write((0).to_bytes(8, 'big'))\n f.write(message.encode())\n f.flush()\n os.fsync(f.fileno())\n\n os.rename(dst, dst.replace('.tmp','.amqp'))", "async def test_cache_miss_sent_mod_alert_voice(self):\n self.cog.previous_overwrites.get.return_value = None\n await self.cog._unsilence(MockVoiceChannel())\n self.cog._mod_alerts_channel.send.assert_awaited_once()", "def pop():\n task = connection.zrange(QUEUE_KEY, 0, 0)\n if not task:\n return False, 'No emails now!'\n msg_id = task[0]\n timestamp = connection.zscore(QUEUE_KEY, msg_id)\n now = datetime.datetime.now().timestamp()\n if timestamp < now or abs(timestamp - now) <= 1e-6:\n message = connection.get(msg_id)\n pipeline = connection.pipeline()\n pipeline.zrem(QUEUE_KEY, msg_id)\n pipeline.delete(msg_id)\n pipeline.execute()\n return True, message\n return False, \"It's too early now!\"", "def ungraded_response(self, queue_msg, queuekey):\r\n # check the queuekey against the saved queuekey\r\n if('queuestate' in self.input_state and self.input_state['queuestate'] == 'queued'\r\n and self.input_state['queuekey'] == queuekey):\r\n msg = self._parse_data(queue_msg)\r\n # save the queue message so that it can be rendered later\r\n self.input_state['queue_msg'] = msg\r\n self.input_state['queuestate'] = None\r\n self.input_state['queuekey'] = None", "def _get_cached_response(self, message_id, from_node):\n return self.operator.get_message_item(message_id, from_node)", "def dead_lettering_on_message_expiration(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"dead_lettering_on_message_expiration\")", "def tryToSend( self, message ):\n if self.free:\n self.free = False\n yield self.writeToSerial( message )\n elif len( self.queue ) > MAX_QUEUE_SIZE:\n raise Exception( 'Queue size exceeded')\n else: self.queue.append( message )", "async def poll_message(self):\n message_cache = self.message_cache\n if (message_cache is not None) and message_cache:\n return message_cache.pop()\n \n if not self.message_request_more:\n return\n \n message_cache = await self.client.message_get_chunk(self.source_channel, after = self.last_message_id)\n self.message_cache = message_cache\n \n if len(message_cache) < 100:\n self.message_request_more = False\n \n if message_cache:\n return message_cache.pop()", "def failed(self, message, reason=None):\n failed_mail.send(\n sender=self.__class__,\n message=message,\n reason=reason\n )", "def cache_issue(self, msg, args):\n self._asset_bind(msg)\n yield (\"Processing....\")\n trans = self._translation_util(msg)\n query = \"repo:{} is:open type:issue\".format(\n task_repository_name()\n )\n res = trans.cache_issues(query, OPEN_CACHE, MAX_RESULT)\n yield \"{} records had been cached\".format(res)", "def _on_invalid_call(self, msg):\r\n # Workaround: Maybe a bug in their server software,\r\n # I don't know what's missing. Its all poorly documented :-(\r\n # Sometimes some API calls fail the first time for no reason,\r\n # if this happens just send them again. This happens only\r\n # somtimes (10%) and sending them again will eventually succeed.\r\n\r\n if msg[\"id\"] == \"idkey\":\r\n self.debug(\"### resending private/idkey\")\r\n self.client.send_signed_call(\r\n \"private/idkey\", {}, \"idkey\")\r\n\r\n elif msg[\"id\"] == \"info\":\r\n self.debug(\"### resending private/info\")\r\n self.client.send_signed_call(\r\n \"private/info\", {}, \"info\")\r\n\r\n elif msg[\"id\"] == \"orders\":\r\n self.debug(\"### resending private/orders\")\r\n self.client.send_signed_call(\r\n \"private/orders\", {}, \"orders\")\r\n\r\n elif \"order_add:\" in msg[\"id\"]:\r\n parts = msg[\"id\"].split(\":\")\r\n typ = parts[1]\r\n price = int(parts[2])\r\n volume = int(parts[3])\r\n self.debug(\"### resending failed\", msg[\"id\"])\r\n self.client.send_order_add(typ, price, volume)\r\n\r\n elif \"order_cancel:\" in msg[\"id\"]:\r\n parts = msg[\"id\"].split(\":\")\r\n oid = parts[1]\r\n self.debug(\"### resending failed\", msg[\"id\"])\r\n self.client.send_order_cancel(oid)\r\n\r\n else:\r\n self.debug(\"### _on_invalid_call() ignoring:\", msg)", "def test_previously_sent_message_not_sent_twice(self):\n thread = self.create_thread()\n message = thread.first_message\n message.sent = True\n message.save()\n\n send_message(message.pk)\n\n self.assertFalse(self.groupnotify_mock.called)", "def __handleCacheMiss(self, tr):\n logger.debug(\"WECache miss: %s\", printTR(tr))\n # determine next batch of keys to read\n toRead = self.keys()\n toRead = toRead[toRead.index(tr):]\n toRead = sorted(set(toRead) - self._populated, key=lambda t: t[0])\n toRead = toRead[:self._batchSize]\n\n # if not room to read another batch\n if len(self._populated) + self._batchSize > self._maxCacheSize:\n toFlush = []\n # first flush dirty grids\n toFlush += self._populated & self._dirty\n\n # then flush clean grids if necessary\n toFlush += self._populated - self._dirty\n\n # flush only enough to read a batch\n toFlush = sorted(toFlush, key=lambda t: t[0])\n toFlush = toFlush[:self._maxCacheSize - self._batchSize]\n self.__flushGrids(toFlush)\n\n self.__loadGrids(toRead)", "def _timeout(self):\n if self._store_timeout > 0 and (not self._messages.empty()):\n \n # Update Timestamp\n timestamp = 0\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n \n # Remove Timeout Messages\n while (not self._messages.empty()):\n msg_time = self._messages.queue[0][0]\n if (timestamp - msg_time >= self._store_timeout) or\\\n (timestamp < msg_time and 4294967295 - \\\n msg_time + timestamp >= self._store_timeout):\n logger.warning(\"%s: message store timeout occurred.\" %\\\n (self.__class__.__name__))\n self._messages.get()\n else:\n break", "def lastMessageReceived():", "def _cacheAnnouncement():\n confs = Conference.query(ndb.AND(\n Conference.seatsAvailable <= 5,\n Conference.seatsAvailable > 0)\n ).fetch(projection=[Conference.name])\n if confs:\n # If there are conferences close to being sold out,\n # format announcement and set it in memcache\n announcement = '%s %s' % (\n 'Last chance to attend! The following conferences '\n 'are nearly sold out:',\n ', '.join(conf.name for conf in confs))\n memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)\n else:\n # If there are no sold out conferences,\n # delete the memcache announcements entry\n announcement = \"\"\n memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)\n return announcement", "def handle_notfound(self, message):\n cmd = self._popMatchingCmd(message)\n if cmd is not None:\n cmd.success(None)", "def ReinjectMessage(thread_number=0, item={}, metadata={}):\n locator = item['locator']\n message_number = item['message_number']\n\n message_count = metadata['message_count']\n xoauth_string = metadata['xoauth_string']\n imap_debug = metadata['imap_debug']\n restrict_domains = metadata['restrict_domains']\n remove_from_subject = metadata['remove_from_subject']\n label = metadata['label']\n query = metadata['query']\n completed_label = metadata['completed_label']\n\n try:\n imap_connection = IMAPConnection(xoauth_string=xoauth_string,\n imap_debug=imap_debug)\n except:\n return False\n\n try:\n imap_connection.GetMessageLocatorsInLabel(label, query)\n except:\n return False\n\n logging.info('Thread #%s - Fetching message #%s of %s (%s).',\n thread_number, message_number, message_count, locator)\n try:\n message = imap_connection.GetMessage(locator)\n except:\n return False\n\n headers = Parser().parsestr(message)\n\n if headers['From']:\n sender = re.findall('[a-z0-9\\.\\-\\+\\']*@[a-z0-9\\.]*',\n headers['From'].lower())\n elif headers['Sender']:\n sender = re.findall('[a-z0-9\\.\\-\\+\\']*@[a-z0-9\\.]*',\n headers['Sender'].lower())\n \n addresses = []\n if headers['To']:\n addresses.extend(re.findall('[a-z0-9\\.\\-\\+\\']*@[a-z0-9\\.]*',\n headers['To'].lower()))\n if headers['Cc']:\n addresses.extend(re.findall('[a-z0-9\\.\\-\\+\\']*@[a-z0-9\\.]*',\n headers['Cc'].lower()))\n if headers['X-Forwarded-To']:\n addresses.extend(re.findall('[a-z0-9\\.\\-\\+\\']*@[a-z0-9\\.]*',\n headers['X-Forwarded-To'].lower()))\n\n groups = re.findall('received: by .* '\n 'for <[a-z0-9\\.\\-\\+\\']*@[a-z0-9\\.]*>;',\n message.lower())\n for group in groups:\n addresses.extend(re.findall('[a-z0-9\\.\\-\\+\\']*@[a-z0-9\\.]*', group))\n\n if restrict_domains:\n new_addresses = []\n for address in addresses:\n for domain in restrict_domains:\n if address[0 - len(domain):] == domain:\n new_addresses.append(address)\n\n addresses = new_addresses\n\n if addresses:\n if remove_from_subject:\n if headers['Subject'].find(remove_from_subject) == 0:\n message = re.sub('Subject: %s' % remove_from_subject, 'Subject: ',\n message)\n new_headers = Parser().parsestr(message)\n\n logging.info(' Thread #%s - Reinjecting message #%s.' % (thread_number,\n message_number))\n smtp_connection = smtplib.SMTP('aspmx.l.google.com')\n smtp_connection.sendmail(sender, addresses, message)\n smtp_connection.quit()\n\n if completed_label:\n imap_connection.AddLabel(locator, completed_label)\n else:\n logging.info(' No acceptable addresses to reinject to. Skipping.')\n\n imap_connection.Close()\n\n return True", "def _store_response_for_duplicates(self, message):\n\n key = (message.remote, message.mid)\n if key in self._recent_messages:\n self._recent_messages[key] = message", "def _failed(self, msg):\n self.log(msg)\n self.result.passed = False\n self.result.add_error(msg)\n self.log(u\"Failed\")", "def send_failed(self, message, exc=None):\n with self.app.peers_lock:\n self.declare_no_connection(self.app.peers[message.to])\n return None", "def _retry_occurred(self):", "def restartFailed(self):\n # failed should not be in cache anymore, so working on db is sufficient\n self.db.restartFailed()", "def _on_too_many_orders(self, msg):\r\n self.debug(\"### Server said: '%s\" % msg[\"message\"])\r\n self.count_submitted -= 1\r\n self.signal_order_too_fast(self, msg)", "def put_message(cls, message):\n rp = cls.get()\n rp.queue_receive.put(message)", "def message_already_processed(msg):\n\n is_already_member = redis.sismember(redis_sqs_message_set, msg.message_id)\n if not is_already_member:\n redis.sadd(redis_sqs_message_set, msg.message_id)\n\n return is_already_member", "def test_group_message_eviction(self):\n # Add things to a group and send a message that should expire\n self.channel_layer.group_add(\"tgme_group\", \"tgme_test\")\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n # Wait message expiry plus a tiny bit (must sum to less than group expiry)\n time.sleep(1.2)\n # Send new message to group, ensure message never arrives\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n channel, message = self.receive([\"tgme_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def fail(message):\n utils.logit(\"critical\", \"Failed dict.get routine. Error --> {}\".\n format(message), 0)", "def tc_fail(self, msg):\n self.recover()\n tc_fail(msg)", "def ecute(self):\n msg = self.up_queue_recv_socket.recv()\n result, e = self.up_queue.get()\n if e is not None:\n raise e\n return result", "def _mark_cache_invalid(self):\n self.cache_valid = False\n self.pca_results = None", "def check(self, message: Message):\n raise NoDeliveryCheck # pragma: no cover", "def check_and_send_message_to_queue(queue_url, str_message):\n msg_str, msg_sent_timestamp, receipt_handle = lib.get_from_sqs_queue(queue_url, 20, 5)\n\n if not msg_str:\n logger.warning('Unable to retrieve message during this cycle.')\n return \n msg_data = json.loads(msg_str)\n \n msg_ts = float(msg_sent_timestamp) * 0.001\n logger.info('Message from queue: {}'.format(msg_data))\n current_time = time.time()\n\n logger.info('msg ts: {} current ts: {}'.format(msg_ts, current_time))\n\n if (current_time - msg_ts) > 259200:\n logger.info('Message in queue needs to be updated')\n lib.send_message_to_queue(queue_url, str_message)\n lib.delete_message_from_queue(queue_url, receipt_handle) \n else:\n logger.info('Message in queue is still current.')", "def process_non_adherent_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\t\traise Exception(\"Not yet implemented\")", "def messageReceived(self, source, message):\n if not self.myKey:\n self.myKey = open('keyfil').read().strip('\\n').strip()\n\n key = self.myKey \n rmesg = self.decodeMessage(key, message)\n\n if not rmesg:\n print \"CRC error - bailing out\"\n return\n \n messageId, message = self.getID(rmesg)\n #print \"THIVE IN\", messageId, message, self.messageDeferreds\n\n if messageId in self.messageDeferreds:\n self.messageDeferreds[messageId].callback(message)\n reactor.callLater(10, self.cleanDeferred, messageId)\n else:\n # Defer this action\n reactor.callLater(0.05, self.messageHandler, source, message, messageId)", "def test_post_process_forwarder_bad_message(kafka_message_payload):\n forwarder = PostProcessForwarderWorker(concurrency=1)\n\n # Use a version which does not exist to create a bad message\n kafka_message_payload[0] = 100\n mock_message = Mock()\n mock_message.value = MagicMock(return_value=json.dumps(kafka_message_payload))\n mock_message.partition = MagicMock(\"1\")\n\n future = forwarder.process_message(mock_message)\n\n with pytest.raises(InvalidVersion):\n forwarder.flush_batch([future])\n\n forwarder.shutdown()", "def handle_response(self, response):\n with self.lock:\n req_id, status, message = response\n if req_id in self.pending_requests: # request may have timed out\n self.pending_requests[req_id].set((status, message))", "def opt_out(msg_hash):\r\n email, added = Email.handler.opt_out(msg_hash)\r\n if email and added:\r\n Email.handler.add_to_queue(None, None, [email], \"reddit.com\",\r\n datetime.datetime.now(g.tz),\r\n '127.0.0.1', Email.Kind.OPTOUT)\r\n return email, added", "def check_delivered_messages(results):\n assert results[\"metrics\"][\"Delivered messages\"] == 20", "def recover(self):\n if self._message_storage:\n for neighbor in self.neighbors:\n self.channel.queue_declare(queue=str(self.id) + str(neighbor))\n for message in self._message_storage:\n self.channel.basic_publish(\n exchange=\"\",\n routing_key=str(self.id) + str(neighbor),\n body=message,\n )\n\n for neighbor in self.neighbors:\n for _, _, body in self.channel.consume(\n queue=str(neighbor) + str(self.id), auto_ack=True, inactivity_timeout=5\n ):\n if body is not None:\n message = body.decode(\"utf-8\")\n if message != \"marker\":\n self.states.append(message)\n else:\n self.channel.cancel()", "def _chk_empty(self, queue, receiver):\n try:\n msg = receiver.fetch(timeout=0)\n self.assert_(False, \"Queue \\\"%s\\\" not empty: found message: %s\" % (queue, msg))\n except Empty:\n pass", "def receive_message(self, message):\r\n return", "def _deduplicate_message(self, message):\n\n key = (message.remote, message.mid)\n if key in self._recent_messages:\n if message.mtype is CON:\n if self._recent_messages[key] is not None:\n self.log.info('Duplicate CON received, sending old response again')\n # not going via send_message because that would strip the\n # mid and might do all other sorts of checks\n self._send_initially(self._recent_messages[key])\n else:\n self.log.info('Duplicate CON received, no response to send yet')\n else:\n self.log.info('Duplicate NON, ACK or RST received')\n return True\n else:\n self.log.debug('New unique message received')\n self.loop.call_later(message.transport_tuning.EXCHANGE_LIFETIME, functools.partial(self._recent_messages.pop, key))\n self._recent_messages[key] = None\n return False", "def test_cache_get_non_existent_item(self):\n self.assertEqual(self.cache.get('ghost'), None)\n self.assertEqual(self.cache.get('ghost', 'never exists'), 'never exists')", "def testUnsuccessfulIncrement(self):\n\n cache = self.stub._cache\n self.stub._cache = {}\n\n memcache.incr('somekey')\n\n self.stub._cache = cache", "def tryToSend( self, channel, value ):\n if self.free:\n self.free = False\n self.writeToSerial( channel, value )\n elif len( self.queue ) > MAX_QUEUE_SIZE:\n raise DCBoxError( 2 )\n else: self.queue.append( ( channel, value ) )", "def test_redelivery_of_rejected_messages(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = '#67-%s' % randint(10, 99)\n localConfig.submit_sm_throughput = 3\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # Send 60 messages to the queue\n startAt = datetime.now()\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 60:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n counter = 0\n _receivedSubmitsCount = 0\n # Wait for 40 seconds before checking if all submits were delivered\n # It will check for throughput in each iteration\n while counter < 30:\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n\n _receivedSubmitsCount = len(receivedSubmits)\n\n # Wait some time\n yield waitFor(1)\n\n counter += 1\n endAt = datetime.now()\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(2)\n\n # Assertions\n # Take the lastClient (and unique one) and assert received message\n self.assertEqual(len(self.SMSCPort.factory.lastClient.submitRecords), 60)", "def _process_unknown_message(self, msg: Message) -> NoReturn:\n dev = self.reddit.redditor('barrycarey')\n try:\n dev.message(f'FWD: {msg}', f'From {msg.author.name}\\n\\n{msg.body}')\n msg.reply(\n 'Thank you for your message. This inbox is not monitored. I have forwarded your message to the developer')\n except Exception as e:\n log.exception('Failed to send message to dev', exc_info=True)", "def _cache_get(self, metric_name):\n pass", "def send_mail_when_failed(self, body):\r\n pass", "def test_failed_deliveries_logging(self):\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=0)\n self.assertEqual(sms.logs.count(), 0)\n\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=1)\n self.assertEqual(sms.logs.count(), 1)\n\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=2)\n self.assertEqual(sms.logs.count(), 1)", "def test_handle_response_value_message_wrong_key(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n uuids = [uuid for uuid in lookup.pending_requests.keys()]\n uuid = uuids[0]\n contact = lookup.shortlist[0]\n msg = Value(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal, 'f00baa',\n 'value', time.time(), time.time() + 99999, self.version,\n PUBLIC_KEY, 'name', 'signature')\n response = asyncio.Future()\n response.set_result(msg)\n lookup._blacklist = mock.MagicMock()\n lookup._handle_error = mock.MagicMock()\n lookup._handle_response(uuid, contact, response)\n lookup._blacklist.assert_called_once_with(contact)\n self.assertEqual(lookup._handle_error.call_count, 1)\n args = lookup._handle_error.call_args[0]\n self.assertEqual(args[0], uuid)\n self.assertEqual(args[1], contact)\n self.assertIsInstance(args[2], ValueError)\n self.assertEqual(args[2].args[0],\n \"Value with wrong key returned by {}\"\n .format(contact))", "async def test_handle_backmsg_handles_exceptions(self):\n session = _create_test_session(asyncio.get_running_loop())\n with patch.object(\n session, \"handle_backmsg_exception\"\n ) as handle_backmsg_exception, patch.object(\n session, \"_handle_clear_cache_request\"\n ) as handle_clear_cache_request:\n error = Exception(\"explode!\")\n handle_clear_cache_request.side_effect = error\n\n msg = BackMsg()\n msg.clear_cache = True\n session.handle_backmsg(msg)\n\n handle_clear_cache_request.assert_called_once()\n handle_backmsg_exception.assert_called_once_with(error)", "def on_message(message, data):\n if message[\"type\"] != \"error\":\n self.q.put(message[\"payload\"])", "def tick_bad_request_counter(self, request):\r\n self.cache_incr(self.get_cache_key(request))", "def _cached_event_message(event: Event) -> str:\n return message_to_json({\"id\": IDEN_TEMPLATE, \"type\": \"event\", \"event\": event})", "def test_message_missing_body(self):\n receipt_handle = 'blah'\n msg = [{\"ReceiptHandle\": receipt_handle}]\n with patch.object(self.dead_letter, 'remove_message_from_queue') as dequeue_fake:\n self.dead_letter.handle_messages(msg)\n\n # Ensure message dequeued.\n dequeue_fake.assert_called_with(receipt_handle)", "def put(self, msg):\n self.received.put(msg)", "def get(self, msg):\n\n # print(\"get\")\n self.q.put(msg)", "def opt_out(self, msg_hash):\r\n email = self.get_recipient(msg_hash)\r\n if email:\r\n o = self.opt_table\r\n try:\r\n o.insert().execute({o.c.email: email, o.c.msg_hash: msg_hash})\r\n clear_memo('r2.models.mail_queue.has_opted_out', \r\n email)\r\n clear_memo('r2.models.mail_queue.opt_count')\r\n return (email, True)\r\n except sa.exceptions.SQLError:\r\n return (email, False)\r\n return (None, False)", "def error_pipe(self, mail):\n\t\tif self.caching:\n\t\t\tself.failed_mails.append(mail)\n\t\telif self.failure_path:\n\t\t\tself.output_mail(open(os.path.normpath(self.failure_path), \"a\"), mail)", "def failed(self, message=None):\n doc = {self.STATE: self.STATE_FAILED}\n\n if message:\n doc.update({self.ERROR_MESSAGE: message})\n\n self.update(doc)", "def process_message(self, message):\n self.post_to_redis(message)\n return", "def _reconsile_hits(self, broker, ftd_msgs, release_hits):\n for msg in ftd_msgs:\n found = False\n for hit in release_hits:\n if str(msg.id) in hit:\n release_hits.remove(hit)\n #print \"Found %s in %s\" % (msg.id, broker.log)\n found = True\n break\n if not found:\n self.assert_(False, \"Unable to locate released message %s in log %s\" % (msg.id, broker.log))\n if len(release_hits) > 0:\n err = \"Messages were unexpectedly released in log %s:\\n\" % broker.log\n for hit in release_hits:\n err += \" %s\\n\" % hit\n self.assert_(False, err)", "def recv(self):\n\t\tmsg = self.pb.recv()\n\n\t\tif msg.get(0) == \"timeout\":\n\t\t\tprint \"You failed to find Toby before the time ran out!\"\n\t\t\tself.cleanup()\n\t\telif msg.get(0) == \"toby\":\n\t\t\tprint \"You found Toby. Good job!\"\n\t\t\tself.cleanup()\n\t\telif msg.get(0) == \"dead\":\n\t\t\tprint \"You died!\"\n\t\t\tself.cleanup()\n\n\t\treturn msg", "def getAnnouncement(self, request):\n announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or \"\"\n return StringMessage(data=announcement)", "def handle_failure(self, instance, sender=None, reason=None):\n send_email.delay(\n to_email=sender,\n context={\"reason\": reason},\n subject=\"Kunne ikke sende ut begrenset epost\",\n plain_template=\"restricted/email/process_failure.txt\",\n html_template=\"restricted/email/process_failure.html\",\n )", "def process_message(msg):\n\n if message_already_processed(msg):\n print(\"%s is already processed\" % msg.message_id)\n else:\n _tally_message(msg)\n msg.delete()", "def process_message(self, msg, src):", "def func_denied(self, message):\n log_th.log_info('{} - {} wrong entry : \"{}\"'.format(self.message_id, self.client_ip, message))\n self.func_sender(message)", "def feed_update_failure(message_data, exception_data):\n feed_id = message_data['args'][0]\n feed = Feed.objects.get(pk=feed_id)\n\n # mark feed as failed to update and stop updateing it automatically\n feed.flagged = True\n feed.save()\n\n notification = Notification(feed=feed, owner=feed.owner, title=exception_data['type'], message=exception_data['message']+f'[Feed: {feed.id}, {feed.link}]', is_error=True)\n notification.save()\n print(\"dramatiq callback: feed update error\")", "def process_refill_questionnaire_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\tdef process_response(return_message_type):\n\t\t\tfor feedback in message.feedbacks.all():\n\t\t\t\tfeedback.note = Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()]\n\t\t\t\tfeedback.save()\n\t\t\ttemplate = 'messages/refill_questionnaire_responses/' + \\\n\t\t\t Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()] + \\\n\t\t\t '.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=return_message_type, content=content, previous_message=message)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\n\t\t# Switch on type of response\n\t\t# a - Haven't gotten the chance\n\t\tif response.lower() == 'a':\n\t\t\t# Schedule a medication reminder for later\n\t\t\tone_hour = datetime.datetime.now() + datetime.timedelta(hours=1)\n\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# b - Too expensive\n\t\telif response.lower() == 'b':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone needs to refill\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# c - Concerned about side effects\n\t\telif response.lower() == 'c':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone has side effects\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# d - Other\n\t\telif response.lower() == 'd':\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\treturn process_response(Message.OPEN_ENDED_QUESTION)\n\n\t\t# Unknown response\n\t\telse:\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\ttemplate = 'messages/unknown_response.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')", "def test_redelivery_of_rejected_messages_after_restart(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = '#67-%s' % randint(10, 9999)\n localConfig.requeue_delay = 1\n localConfig.submit_sm_throughput = 1\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # Send 4 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 4:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n msgid = yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 5 seconds before stopping\n yield waitFor(5)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(5)\n\n # Save the count before starting the connector\n _submitRecordsCount = len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Wait for 5 seconds before starting again\n yield waitFor(5)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 10 seconds before stopping , all the rest of the queue must be sent\n yield waitFor(10)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(10)\n\n # Update the counter\n _submitRecordsCount += len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Assertions\n self.assertEqual(_submitRecordsCount, 4)", "def getOldGlobalMsgs(self): # if you dont delete global cache locally before it - index will be 1 \n msgsType = 'old' \n return self.getGlobalMsgs(msgsType)", "def _add_message(self, message):\r\n self.result = self.result + message", "def _on_batch_cache_timeout(self, meta, timestamp, batch):\n assert isinstance(meta, Message)\n assert isinstance(timestamp, float)\n assert isinstance(batch, list)\n assert len(batch) > 0\n if __debug__:\n dprint(\"processing \", len(batch), \"x \", meta.name, \" batched messages\")\n\n if meta in self._batch_cache and id(self._batch_cache[meta][2]) == id(batch):\n if __debug__: dprint(\"pop batch cache for \", len(batch), \"x \", meta.name)\n self._batch_cache.pop(meta)\n\n if not self._communities.get(meta.community.cid, None) == meta.community:\n if __debug__: \n dprint(\"dropped \", len(batch), \"x \", meta.name, \" packets (community no longer loaded)\", level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"on_batch_cache_timeout: community no longer loaded\", len(batch))\n self._statistics.drop_count += len(batch)\n return 0\n\n if meta.batch.enabled and timestamp > 0.0 and meta.batch.max_age + timestamp <= time():\n if __debug__:\n dprint(\"dropped \", len(batch), \"x \", meta.name, \" packets (can not process these messages on time)\", level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"on_batch_cache_timeout: can not process these messages on time\", len(batch))\n self._statistics.drop_count += len(batch)\n return 0\n\n return self._on_batch_cache(meta, batch)", "def cache_announcement():\n confs = Conference.query(ndb.AND(\n Conference.seatsAvailable <= 5, Conference.seatsAvailable > 0\n )).fetch(projection=[Conference.name])\n\n if confs:\n # If there are almost sold out conferences,\n # format announcement and set it in memcache\n announcement = ANNOUNCEMENT_TPL % (\n ', '.join(conf.name for conf in confs))\n memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)\n else:\n # If there are no sold out conferences,\n # delete the memcache announcements entry\n announcement = \"\"\n memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)\n\n return announcement", "def add_message(self, message):\n message_time = t.process_time()\n self.log_cache.append((message, message_time))\n if len(self.log_cache) % 20 == 0:\n self._commit_log_db()", "def put_response(self, msg: Any) -> None:\n # redis.Connection.__del__ might call self.close at any time, which\n # will set self.responses to None. We assume this will happen\n # atomically, and the code below then protects us against this.\n responses = self.responses\n if responses:\n responses.put(msg)", "def retry(self, envelope):\n # type: (LimitedRetriesPolicy, Envelope) -> None\n death_count = self.get_death_count(envelope)\n if death_count < len(self.retry_delays):\n delay = self.retry_delays[death_count]\n retry_queue_name = self.declare_retry_queue(delay)\n\n # Save original delivery information\n if envelope.get_header('x-original-delivery-info') is None:\n self.set_original_delivery_info_header(envelope)\n\n self.consumer.channel.basic_publish(\n exchange='',\n routing_key=retry_queue_name,\n properties=envelope.properties,\n body=envelope.payload)\n\n self.consumer.channel.basic_ack(envelope.delivery_tag)\n logger.warning(\n 'Retry handling message [{}] after {}s; death count: {}'.format(\n envelope.message_id, delay, death_count + 1))\n else:\n logger.warning(\n 'Message [{}] exceeded retry limit; death count: {}'.format(\n envelope.message_id, death_count + 1))\n self.consumer.channel.basic_reject(\n envelope.delivery_tag, requeue=False)\n logger.error('Message [{}] is rejected'.format(envelope.message_id))", "def add_message():\n user_ID = str(session['user_id'])\n if 'user_id' not in session:\n abort(401)\n if request.form['text']:\n add_message_query()\n flash('Your message was recorded')\n if redis_obj.get(user_ID):\n redis_obj.delete(user_ID)\n print \"Invalidating cache after adding new message\"\n return redirect(url_for('timeline'))", "def __filter(self, failure, name):\r\n if failure.check(DeadReferenceError, PBConnectionLost):\r\n self.__notify(failure)\r\n else:\r\n print('Received the following error message when calling {0} from '\r\n 'class {1}: {2}'.format(name, self.__class__.__name__,\r\n failure.getErrorMessage()))\r\n\r\n return failure", "def test_db_got_error_without_cache(self):\n mock_method_path = ('dbtobindzone.fetcher.host_data_fetcher'\n '.HostDataFetcher.is_fetch_success')\n with mock.patch(mock_method_path) as mock_method:\n mock_method.return_value = False\n self.host_updater.refresh_cache()\n self.assertEqual(self.host_updater.data, [])", "def _message_failed_job(self):\n self.ensure_one()\n return _(\"Something bad happened during the execution of the job. \"\n \"More details in the 'Exception Information' section.\")", "def process_received_message(self, message):\n self.log.debug('Received \"%s\"', message)\n self.receive_queue.put(message)", "def receive(self, message):", "def _process_notice(self, arg, message):\n self.notices.append(message)\n length = len(self.notices)\n if length > 50:\n del self.notices[:length - 50]", "def on_signature_success(self, cache, new_message):\n # push the new message to the other peer\n self._dispersy.endpoint.send(cache.candidates, [new_message.packet])", "def fail(self, message):\n logger.warning(message)\n g.failed = True", "def receive_key(self, key):\n try:\n self.queue.put(key)\n except:\n raise #Just collecting possible exceptions for now", "def handle_unsent_message(cls, ctx, user, target, msg_type, delivered_or_queued, unsent):\n return", "def test_unmodified(self):\n self.channel.lineReceived(\"If-Modified-Since: %s\"\n % http.datetimeToString(100))\n self.channel.lineReceived('')\n result = self.transport.getvalue()\n self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)\n self.failUnlessEqual(httpBody(result), \"\")", "def whenException(self, channel, call):" ]
[ "0.64296436", "0.6014829", "0.5976981", "0.5920454", "0.57477766", "0.56894994", "0.56732804", "0.558145", "0.5558702", "0.5543183", "0.5528251", "0.5498727", "0.54969853", "0.54897916", "0.54788435", "0.5449968", "0.54112744", "0.5403003", "0.5399943", "0.5390648", "0.5387093", "0.53611773", "0.5352195", "0.5351189", "0.5349199", "0.5337625", "0.5332339", "0.5330961", "0.53294706", "0.530896", "0.53078115", "0.52950156", "0.52354854", "0.52354383", "0.522893", "0.5210394", "0.52066594", "0.5192853", "0.5192125", "0.51818687", "0.51620966", "0.51493883", "0.5145008", "0.5143185", "0.5141341", "0.5138443", "0.513768", "0.5134569", "0.5131677", "0.512278", "0.51087034", "0.5106934", "0.51016515", "0.5080636", "0.5068919", "0.5066763", "0.50616884", "0.5060427", "0.5060364", "0.50567836", "0.5053945", "0.5051482", "0.50423366", "0.5035011", "0.50297105", "0.5020843", "0.5015692", "0.50155705", "0.5014167", "0.50122976", "0.5008422", "0.49963486", "0.49948734", "0.49874088", "0.4982671", "0.49766222", "0.49544263", "0.49519023", "0.4949905", "0.494912", "0.4947722", "0.49475646", "0.49465925", "0.49436292", "0.49436086", "0.4939504", "0.49389994", "0.49324805", "0.49274692", "0.49272498", "0.49257824", "0.49255309", "0.49212232", "0.49209026", "0.49208084", "0.4920469", "0.49198464", "0.49187052", "0.49111968", "0.48968402" ]
0.5689635
5
Start an oef node.
def _start_oef_node(self, network_node):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch_oef():\n script_path = os.path.join(\"scripts\", \"oef\", \"launch.py\")\n configuration_file_path = os.path.join(\"scripts\", \"oef\", \"launch_config.json\")\n print(\"Launching new OEF Node...\")\n subprocess.Popen(\n [\"python3\", script_path, \"-c\", configuration_file_path, \"--background\"],\n stdout=subprocess.PIPE,\n env=os.environ,\n cwd=ROOT_DIR,\n )\n\n # Wait for OEF\n print(\"Waiting for the OEF to be operative...\")\n wait_for_oef = subprocess.Popen(\n [os.path.join(\"sandbox\", \"wait-for-oef.sh\"), \"127.0.0.1\", \"10000\", \":\"],\n env=os.environ,\n cwd=ROOT_DIR,\n )\n\n wait_for_oef.wait(30)", "def ex_start_node(self, node):\n # NOTE: This method is here for backward compatibility reasons after\n # this method was promoted to be part of the standard compute API in\n # Libcloud v2.7.0\n return self.start_node(node=node)", "def start_node(self, **kwargs):\n # project_name, node_name\n\n try:\n if kwargs['project_name'] in self.data:\n project_name = kwargs['project_name']\n project_id = self.data[project_name]['project_id']\n if kwargs['node_name'] in self.data[project_name]['nodes']:\n node_name = kwargs['node_name']\n node_id = self.data[project_name]['nodes'][node_name]['node_id']\n resp = self.post_to_server('projects/{}/nodes/{}/start'.format(project_id, node_id),{})\n print('Node \\'{}\\' started.'.format(node_name))\n self.data[project_name]['nodes'][node_name]['status'] = \"running\"\n except:\n traceback_print_exc()", "def runnode(self, node, pynode=False):\n if pynode:\n process = coreinterface.PythonNode(node, self)\n else:\n process = coreinterface.ExecutableNode(node, self)\n process.spawn()\n self.loadingnodes[node] = process", "def __init__(self):\n self.start = Node('-1')", "def startNode(klass):\n try:\n ws = klass('ws://localhost:8080/ws')\n ws.daemon = False\n ws.connect()\n except:\n ws.close()", "def start_node(self, node, override_cfg_params=None):\n node.account.mkdirs(RedpandaService.DATA_DIR)\n node.account.mkdirs(os.path.dirname(RedpandaService.CONFIG_FILE))\n\n self.write_conf_file(node, override_cfg_params)\n\n if self.coproc_enabled():\n self.start_wasm_engine(node)\n\n cmd = (f\"nohup {self.find_binary('redpanda')}\"\n f\" --redpanda-cfg {RedpandaService.CONFIG_FILE}\"\n f\" --default-log-level {self._log_level}\"\n f\" --logger-log-level=exception=debug:archival=debug \"\n f\" --kernel-page-cache=true \"\n f\" --overprovisioned \"\n f\" --smp {self._num_cores} \"\n f\" --memory 6G \"\n f\" --reserve-memory 0M \"\n f\" >> {RedpandaService.STDOUT_STDERR_CAPTURE} 2>&1 &\")\n\n node.account.ssh(cmd)\n\n wait_until(\n lambda: Admin.ready(node).get(\"status\") == \"ready\",\n timeout_sec=RedpandaService.READY_TIMEOUT_SEC,\n err_msg=f\"Redpanda service {node.account.hostname} failed to start\",\n retry_on_exc=True)", "def run(self):\n self.etcd.start()", "async def start_node(request: web.Request) -> web.Response:\n req_ctx = RequestContext.parse_obj(request)\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n try:\n\n await projects_api.start_project_node(\n request, req_ctx.user_id, path_params.project_id, path_params.node_id\n )\n\n raise web.HTTPNoContent(content_type=MIMETYPE_APPLICATION_JSON)\n except ProjectStartsTooManyDynamicNodes as exc:\n raise web.HTTPConflict(reason=f\"{exc}\") from exc\n except ProjectNotFoundError as exc:\n raise web.HTTPNotFound(\n reason=f\"Project {path_params.project_id} not found\"\n ) from exc\n except NodeNotFoundError as exc:\n raise web.HTTPNotFound(\n reason=f\"Node {path_params.node_id} not found in project\"\n ) from exc", "def __init__(self, start_node):\n self.start_node = start_node", "def on_rcrnode_open_btn_clicked(self):\n # self.rcrnode.init_node()\n self.rcrnode.resume()", "def enter(self):\n log.debug(f\"Entering context creator for PutDoer; node running {self.node.isRunning()}.\")\n if not self.node.isRunning():\n conf = configs.get(self.port)\n self.node = get_node(self.port, **conf)", "def GachaCraftNodeExcelStart(builder):\n return Start(builder)", "def goto_start(self):\n\n self.__do_action(self.motor.moveto_edge(MotorDriver.LEFT))", "def main():\n arg_fmt = argparse.RawDescriptionHelpFormatter\n parser = argparse.ArgumentParser(formatter_class=arg_fmt,\n description=main.__doc__)\n\n parser.add_argument(\n '-s', '--save', metavar='PATH',\n help='save current EE config to given file'\n )\n parser.add_argument(\n '-l', '--load', metavar='PATH',\n help='load config from given file onto EE'\n )\n args = parser.parse_args(rospy.myargv()[1:])\n\n print(\"Initializing node... \")\n rospy.init_node('ee_config_editor', anonymous=True)\n\n ee = intera_interface.get_current_gripper_interface()\n if not ee:\n rospy.logerr(\"Could not detect an attached EndEffector!\")\n return\n\n if args.save:\n rospy.loginfo(\"Saving EE config to {}\".format(args.save))\n save_config(ee, args.save)\n\n if args.load:\n rospy.loginfo(\"Loading config and writing config to ClickSmart from {}\".format(args.load))\n load_config(ee, args.load)\n\n def clean_shutdown():\n print(\"\\nExiting example...\")\n\n rospy.on_shutdown(clean_shutdown)", "def enter(self):\n log.debug(\"Entering context creator for GetDoer\")\n if not self.node.isRunning():\n conf = configs.get(self.port)\n self.node = get_node(self.port, **conf)", "def start(name, call=None):\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n\n conn.start_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n return True", "def launch(self, node):\n if not self.started:\n raise RLException(\"please start ROSLaunch first\")\n elif not isinstance(node, Node):\n raise ValueError(\"arg must be of type Node\")\n\n proc, success = self.parent.runner.launch_node(node)\n if not success:\n raise RLException(\"failed to launch %s/%s\"%(node.package, node.type))\n return proc", "def start_kernel(self, **kw):", "def start(self):\n #url = \"http://xapi.openstreetmap.org\" \\\n #url = \"http://osm.bearstech.com\" \\\n url = \"http://osmxapi.hypercube.telascience.org\" \\\n \"/api/0.6/node[amenity=%s][bbox=%s]\" % \\\n (self._amenity, self._location.getBox())\n\n self._has_list = False\n self._places = None\n self._osm_hand.clear_places()\n \n try:\n self._net_if.download(url)\n except Exception as inst:\n self.send_error(inst)", "def start(cobj):\n pass", "def start_kernel(self, kernel_name=None, **kwargs):", "def start(self, **kwargs):\n return self.client.api.start(self.id, **kwargs)", "def start():", "def start():", "def start():", "def start():", "def start(self, **kwargs):\n pass", "def start(self, **kwargs):\n pass", "def start(self):\n gevent.spawn(self.run)", "def start() -> None:\n # Authenticate\n ee.Authenticate()\n\n # Initialize the library\n ee.Initialize()", "def start( *args, **kwargs ):", "def start():\n trio.run(_main)", "def cmd_NODE(self, line):\r\n config = NodeOptions(self.terminal)\r\n\r\n try:\r\n config.parseOptions(line)\r\n cmd = config.subCommand\r\n opts = config.subOptions if hasattr(config, 'subOptions') else {}\r\n except usage.UsageError as errortext:\r\n self.terminal.write(\"BUG in usage: {0}\".format(errortext))\r\n else:\r\n if cmd == 'start':\r\n if (opts['args'] and opts['ctag'] and opts['ntag']\r\n and opts['pkg'] and opts['exe']):\r\n self.callToUser('addNode', 'robot', opts['ctag'],\r\n opts['ntag'], opts['pkg'], opts['exe'],\r\n opts['args'])\r\n elif (opts['ctag'] and opts['ntag'] and opts['pkg']\r\n and opts['exe']):\r\n self.callToUser('addNode', 'robot', opts['ctag'],\r\n opts['ntag'], opts['pkg'], opts['exe'])\r\n elif cmd == 'stop':\r\n if opts['ctag'] and opts['ntag']:\r\n self.callToUser('removeNode', 'robot', opts['ctag'],\r\n opts['ntag'])", "def InvocationStart(builder):\n return Start(builder)", "def start(self):\n if self._start_event is None:\n _call_spawn_callbacks(self)\n hub = get_my_hub(self) # pylint:disable=undefined-variable\n self._start_event = hub.loop.run_callback(self.switch)", "def start(self) -> None:\n ...", "def start(self) -> None:\n ...", "def start(context):\n context.run(\"python hellotensorflow/hello.py\")", "def start(self, device, *args, **kwargs):\n raise NotImplementedError", "def start(self):\n ...", "def do_start(self, arg):\n args = arg.split(\" \")\n self.model.initialise(args[0])\n self.model.run()", "def run(self):\n self.node_id = CONFIG.node_id\n self.running = Event()\n if not CONFIG.master_pub or not CONFIG.master_repl:\n print colors.yellow(\"Master IP:port is not set in config file (%s)\"\n % CONFIG._fn)\n master_pub = raw_input(\"Enter Master PUB uri (IP or IP:port):\")\n if \":\" in master_pub:\n ip, _, port = master_pub.rpartition(\":\")\n else:\n ip = master_pub\n port = 5551\n CONFIG.update(\"General\", \"master_pub\", \"%s:%s\" % (ip,\n port))\n master_repl = raw_input(\"Enter Master REPLY uri (IP or IP:port), \"\n \"hit ENTER for default(%s:5552):\" % ip)\n if not master_repl:\n port = 5552\n elif \":\" in master_repl:\n ip, _, port = master_repl.rpartition(\":\")\n else:\n ip = master_repl\n port = 5552\n CONFIG.update(\"General\", \"master_repl\", \"%s:%s\" % (ip,\n port))\n CONFIG.reload()\n\n if not validate_address(CONFIG.master_pub) or \\\n not validate_address(CONFIG.master_repl):\n LOG.error('Server IP not present in config or is not valid.\\n'\n 'Check your config')\n exit(1)\n\n if not self.node_id:\n LOG.error(\"The node id not set in config. \"\n \"Run program with config option first\")\n exit(1)\n\n self.backend = self.transport_class.from_config(\n CONFIG, **vars(self.args))\n load_plugins(CONFIG)\n self.sessions = {}\n self.matcher = Matcher(self.node_id, self.backend.meta())\n\n LOG.info(\"Starting node\")\n self.details()\n self._sig_int = signal.getsignal(signal.SIGINT)\n self._sig_term = signal.getsignal(signal.SIGTERM)\n\n if os.name == 'nt':\n # Use Ctrl+C to invoke clean on Windows\n import win32api\n win32api.SetConsoleCtrlHandler(self.clean, True)\n else:\n signal.signal(signal.SIGINT, self._handle_terminate)\n signal.signal(signal.SIGTERM, self._handle_terminate)\n\n # Invoke clean for sessions\n signal.signal(signal.SIGHUP, self.clean)\n\n if not self.backend.prepare():\n LOG.info(\"Cannot start transport backend\")\n self._handle_terminate()\n exit(1)\n\n def request_processor():\n req_queue = self.backend.consume_queue('requests',\n ident=\"DISPATCHER\")\n poller = self.backend.create_poller(req_queue)\n while not self.running.is_set():\n try:\n ready = poller.poll(200)\n if not ready:\n continue\n if req_queue in ready:\n message = req_queue.recv()[0]\n if not message:\n continue\n job = JobTarget.build(message)\n if job:\n self.target_match(job)\n except ConnectionError:\n break\n except Exception:\n continue\n req_queue.close()\n\n Thread(target=request_processor).start()\n\n self.backend.loop()\n\n LOG.info(\"Node exited\")", "def start (self):\n pass", "def start (self):\n pass", "def __init__(self, owner, pkg, exe, args, name, namespace):\r\n ArgumentMixin.__init__(self, owner.loader)\r\n\r\n owner.registerNode(self)\r\n self._owner = owner\r\n\r\n self._reactor = owner.reactor\r\n self._call = None\r\n self._protocol = None\r\n\r\n # Find and validate executable\r\n cmd = [self._loader.findNode(pkg, exe)] # raises ResourceNotFound\r\n\r\n # Add arguments\r\n args = self.processArgument(args)\r\n\r\n # TODO: Is it necessary to limit the possible characters here?\r\n# for char in '$;':\r\n# if char in args:\r\n# raise ValueError('Argument can not contain special '\r\n# \"character '{0}'.\".format(char))\r\n\r\n cmd += shlex.split(args)\r\n\r\n # Process name and namespace argument\r\n if name:\r\n cmd.append('__name:={0}'.format(name))\r\n\r\n if namespace:\r\n cmd.append('__ns:={0}'.format(namespace))\r\n\r\n # Create protocol instance\r\n uid = uuid4().hex\r\n out = os.path.join(self._LOG_DIR,\r\n '{0}-{1}-out.log'.format(uid, name or exe))\r\n err = os.path.join(self._LOG_DIR,\r\n '{0}-{1}-err.log'.format(uid, name or exe))\r\n\r\n self._protocol = NodeProtocol(self, out, err)\r\n\r\n # Start node\r\n log.msg('Start Node {0}/{1} [pkg: {2}, exe: '\r\n '{3}].'.format(namespace, name or exe, pkg, exe))\r\n self._reactor.spawnProcess(self._protocol, cmd[0], cmd, env=os.environ)\r\n\r\n self._name = '{0}/{1}'.format(pkg, exe)", "def start(self):\n self.p.start()", "def start(args, config):\n print('Starts an HPC fleet: \"{}\"'.format(args))", "def start():\r\n # Define a Coordinate Tuple\r\n Coord = collections.namedtuple(\"Coord\", [\"x\", \"y\"])\r\n\r\n # Initialize an elliptic curve by Secp256k1 Parameters.\r\n # Secp256k1 is the elliptic curve used in Bitcoin's public-key cryptography,\r\n # and is defined in Standards for Efficient Cryptography (SEC)\r\n # (Certicom Research, http://www.secg.org/sec2-v2.pdf).\r\n # Currently Bitcoin uses secp256k1 with the ECDSA algorithm\r\n\r\n big_prime = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f\r\n order = 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141\r\n Gx = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798\r\n Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8\r\n ec = elliptic.EC(0, 7, big_prime, Coord(Gx, Gy), order)\r\n\r\n # Initialize a server\r\n sv = server.Server(ec)\r\n\r\n # Initialize a client\r\n cl = client.Client(sv, ec)", "def open(self, amount):\n try:\n open_masternode = loop.run_until_complete(\n self.service.node_management.open_master_node(amount=amount),\n )\n\n except Exception as error:\n return None, str(error)\n\n return {\n 'batch_id': open_masternode.batch_id,\n }, None", "def main_endpoint(request, node_id):\n status = node.infotable['status']\n request.setResponseCode(status)\n\n latency = node.infotable['latency']\n if latency > 0:\n time.sleep(latency)\n\n node.make_requests()\n\n return node.node_id", "def run(name, create_nodes):\n\n print('== {} =='.format(name))\n create_nodes()\n emit_and_print_errors(lkt_file='foo.lkt')\n print('')", "def run(self):\n self.ae.start()", "def start_nodes(self, poll_wait_time=5):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/nodes/start\"\n\n self.connector.http_call(\"post\", _url)\n\n # Update object\n time.sleep(poll_wait_time)\n self.get_nodes()", "def start(params) -> None:\n check_root()\n start_microservice(params)\n load_kernel_module(params)\n start_streamer(params)", "def start_by_node_name(node: str,\n ssh_config_file: str = DEFAULT_CHAOS_SSH_CONFIG_FILE) -> bool:\n logger.debug(\"start node: %s\", node)\n executor = FabricExecutor(ssh_config_file=expanduser(ssh_config_file))\n\n # Start the node by alias name\n result = executor.execute(node, \"systemctl start indy-node\", as_sudo=True)\n if result.return_code != 0:\n logger.error(\"Failed to start %s\", node)\n return False\n\n return True", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n\n rospy.loginfo(self.name + \": Node started\")\n rospy.set_param(\"path_logger_active\", False)\n\n rospy.sleep(1)\n\n self.read_waypoints_pickle()\n rospy.loginfo(self.name + \": Global waypoints read from file\")\n\n while True:\n if self.uav_pose is None:\n rospy.loginfo(self.name + \": Waiting for UAV Pose\")\n self._rate_reached_waypoint.sleep()\n else:\n uav_pose_start = copy.copy(self.uav_pose) # copy is needed here, because uav_pose is mutable!\n rospy.loginfo(self.name + \": UAV Pose received\")\n break\n\n # Set mode to Offboard, Arm the UAV and takeoff to set altitude\n self._takeoff_procedure(uav_pose_start)\n rospy.sleep(1) # To prevent that takeoff goes directly into path following\n rospy.loginfo(self.name + ': Takeoff procedure finished')\n\n # Start publishing global waypoints\n uav_pose_after_takeoff = copy.copy(self.uav_pose)\n wp_global_previous_temp = Waypoint()\n wp_global_previous_temp.x_lat = uav_pose_after_takeoff.pose.position.x\n wp_global_previous_temp.y_long = uav_pose_after_takeoff.pose.position.y\n wp_global_previous_temp.z_alt = uav_pose_after_takeoff.pose.position.z\n wp_global_previous_temp = copy.copy(wp_global_previous_temp)\n self.waypoint_global_next = self.waypoint_global_all.waypoints[0]\n self.waypoint_global_previous = wp_global_previous_temp\n self._thread_waypoint_global.start()\n\n # Activate path logging node. Maybe not best coding practice to do this with a parameter and not a publish/\n # subscriber or service but the path logger was only needed to record test results\n rospy.set_param(\"path_logger_active\", True)\n\n # Starts forwarding the setpoints from the local planner\n self._thread_forward_local_setpoints.start()\n\n # Stops sending the takeoff waypoint. Between this and\n # sending the next waypoint from the local planner can be a maximum of .5 seconds, since waypoints have\n # to be published with >2Hz (PX4/MAVROS restriction)\n self._thread_takeoff_setpoint.do_run = False\n\n # Iterates over all global waypoints\n for wp_global_current in self.waypoint_global_all.waypoints:\n self.waypoint_global_next = wp_global_current\n self.waypoint_global_previous = wp_global_previous_temp\n rospy.loginfo(self.name + ': Published new global waypoint')\n\n while not self._is_at_position(self.uav_pose, wp_global_current, atol=self.tol_wp_reached) \\\n and not rospy.is_shutdown():\n self._rate_reached_waypoint.sleep()\n\n rospy.loginfo(self.name + ': Reached previous global waypoint')\n wp_global_previous_temp = copy.copy(wp_global_current)\n\n self.finished = True\n rospy.set_param(\"path_logger_active\", False)\n self._thread_forward_local_setpoints.do_run = False # Stops forwarding the setpoints from the local planner\n rospy.loginfo(self.name + ': Reached final global waypoint')\n rospy.sleep(10)\n return", "def open(self):\n self.device = ConnectHandler(\n device_type='vyos',\n host=self.hostname,\n username=self.username,\n password=self.password,\n timeout=self.timeout,\n port=self.port\n )", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start():\n\n start_server()", "def open(self):\n self._command = \"open\"", "def start(self):\n assert(self._cbs is not None)\n self._as.start() # start the server", "def start(self) -> None:", "def start(self) -> None:", "def start(self):\r\n pass", "def __init__(self, target, action, context, **kwargs):\n super(NodeAction, self).__init__(target, action, context, **kwargs)\n\n try:\n self.entity = node_mod.Node.load(self.context, node_id=self.target)\n except Exception:\n self.entity = None", "def start_node(\n *,\n services: list,\n user: str,\n host: str,\n identity_file: str,\n cluster: FlintrockCluster):\n ssh_client = get_ssh_client(\n user=user,\n host=host,\n identity_file=identity_file,\n wait=True)\n\n with ssh_client:\n # TODO: Consider consolidating ephemeral storage code under a dedicated\n # Flintrock service.\n if cluster.storage_dirs.ephemeral:\n ssh_check_output(\n client=ssh_client,\n command=\"\"\"\n sudo chown \"{u}:{u}\" {d}\n \"\"\".format(\n u=user,\n d=' '.join(cluster.storage_dirs.ephemeral)))\n\n for service in services:\n service.configure(\n ssh_client=ssh_client,\n cluster=cluster)", "def _start(self):\n pass", "def open(self):\n device_type = \"mikrotik_routeros\"\n if self.transport == \"telnet\":\n device_type = \"mikrotik_routeros_telnet\"\n self.device = self._netmiko_open(\n device_type, netmiko_optional_args=self._netmiko_optional_args\n )", "def create_node(\n self, x: float, y: float, node_type: NodeType, model: str\n ) -> Optional[Node]:\n node_id = self.next_node_id()\n position = Position(x=x, y=y)\n image = None\n if nutils.has_image(node_type):\n image = \"ubuntu:latest\"\n emane = None\n if node_type == NodeType.EMANE:\n if not self.emane_models:\n dialog = EmaneInstallDialog(self.app)\n dialog.show()\n return\n emane = self.emane_models[0]\n name = f\"emane{node_id}\"\n elif node_type == NodeType.WIRELESS_LAN:\n name = f\"wlan{node_id}\"\n elif node_type in [NodeType.RJ45, NodeType.TUNNEL]:\n name = \"unassigned\"\n else:\n name = f\"n{node_id}\"\n node = Node(\n id=node_id,\n type=node_type,\n name=name,\n model=model,\n position=position,\n image=image,\n emane=emane,\n )\n if nutils.is_custom(node):\n services = nutils.get_custom_services(self.app.guiconfig, model)\n node.config_services = set(services)\n # assign default services to CORE node\n else:\n services = self.session.default_services.get(model)\n if services:\n node.config_services = set(services)\n logger.info(\n \"add node(%s) to session(%s), coordinates(%s, %s)\",\n node.name,\n self.session.id,\n x,\n y,\n )\n self.session.nodes[node.id] = node\n return node", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def Start(self):\n self.Init()\n\n fd = aff4.FACTORY.Open(self.args.vfs_file_urn, mode=\"rw\",\n token=self.token)\n\n # Account for implicit directories.\n if fd.Get(fd.Schema.TYPE) is None:\n fd = fd.Upgrade(\"VFSDirectory\")\n\n self.state.get_file_flow_urn = fd.Update(\n attribute=self.args.attribute,\n priority=rdfvalue.GrrMessage.Priority.HIGH_PRIORITY)", "def main():\n rclpy.init()\n node = rclpy.create_node(\"entity_spawner\")\n\n # -- args\n node.declare_parameter(name=\"robot_name\", value=None)\n node.declare_parameter(name=\"x\", value=None)\n node.declare_parameter(name=\"y\", value=None)\n\n robot_name = node.get_parameter('robot_name').value\n x = node.get_parameter('x').value\n y = node.get_parameter('y').value\n\n # -- call service to spawn robot\n if robot_name is not None:\n\n # -- parse xacro to urdf\n xacro_file = os.path.join(get_package_share_directory('my_ros2_robot_gazebo'), 'urdf', f'{robot_name}', 'main.xacro')\n robot_desc = xacro.process_file(xacro_file)\n robot_desc = robot_desc.toxml()\n\n # -- set data for request\n request = SpawnEntity.Request()\n request.name = f'{robot_name}'\n request.xml = robot_desc\n request.initial_pose.position.x = x\n request.initial_pose.position.y = y\n request.initial_pose.position.z = 0.2\n\n # -- service client\n node.get_logger().info(\"Connecting to `/spawn_entity` service...\")\n client = node.create_client(SpawnEntity, \"/spawn_entity\")\n if not client.service_is_ready():\n client.wait_for_service()\n node.get_logger().info(\"...connected!\")\n\n # -- call service\n node.get_logger().info(\"Sending service request to `/spawn_entity`\")\n future = client.call_async(request)\n rclpy.spin_until_future_complete(node, future)\n if future.result() is not None:\n print('response: %r' % future.result())\n else:\n raise RuntimeError(\n 'exception while calling service: %r' % future.exception())\n else:\n node.get_logger().info(\"robot_name is not defined.\")\n\n node.get_logger().info(\"Done! Shutting down node.\")\n node.destroy_node()\n rclpy.shutdown()", "def open(self, mode=\"rb\", encoding=\"ascii\", buffering=1024, size=None,\n block_transfer=False, request_crc_support=True):\n return self.sdo_node.open(self.od.index, self.od.subindex, mode,\n encoding, buffering, size, block_transfer, request_crc_support=request_crc_support)", "def spawn_node(\n islandNamespace=None,\n namespace=None,\n island_id=None,\n robot_id=None,\n model_location=None,\n camera_location=None,\n robot_name=None,\n camera_en=False\n):\n\n # print(model_location)\n\n arg_model = \"-param robot_description -urdf -model {}_{}_{} -x {} -y {} -z {}\".format(\n robot_name,\n island_id,\n robot_id,\n model_location[0],\n model_location[1],\n model_location[2]\n )\n # print(arg_model + \": ARG MODEL\")\n\n arg_camera = \"\"\n if camera_en:\n arg_camera = \"-urdf -param camera -model camera_{}_{} -x {} -y {} -z {} \".format(\n island_id,\n robot_id,\n (camera_location[0]),\n (camera_location[1] - 1),\n (camera_location[2])\n )\n # print(arg_camera + \": ARG CAMERA\")\n\n arg_gazebo = \" -gazebo_namespace {}\".format(islandNamespace) + \"gzserver\"\n # print(arg_gazebo)\n\n # gazebo_env_args = \"$GAZEBO_MASTER_IP:1134{}\".format(4+island_id)\n\n node_model_spawn = roslaunch.core.Node(\n package=\"gazebo_ros\",\n node_type=\"spawn_model\",\n name=\"spawn_urdf\",\n namespace=namespace,\n output=\"screen\",\n args=(arg_model + arg_gazebo)\n )\n\n node_camera_spawn = roslaunch.core.Node(\n package=\"gazebo_ros\",\n node_type=\"spawn_model\",\n name=\"spawn_camera\",\n namespace=namespace,\n output=\"screen\",\n args=(arg_camera + arg_gazebo)\n )\n\n nodes = [node_model_spawn]\n if camera_en:\n nodes.extend(node_camera_spawn)\n return nodes", "def cvmfsStart(reponame = None):\n if reponame == None:\n reponame = _getRepoName()\n\n rc = subprocess.call([\"cvmfs_server\", \"transaction\", reponame])\n if rc != 0:\n raise RuntimeError(\"Could not start CVMFS transaction\")", "def start_run(self, context: RobotRunnerContext) -> None:\n rospy.init_node(\"robot_runner\", disable_signals=True)\n self.ina219_profiler = INA219ProfilerClient()\n self.cpu_mem_profiler = ResourceProfilerClient()", "def start(self, *, user: str, identity_file: str):\n self.load_manifest(user=user, identity_file=identity_file)\n\n partial_func = functools.partial(\n start_node,\n services=self.services,\n user=user,\n identity_file=identity_file,\n cluster=self)\n hosts = [self.master_ip] + self.slave_ips\n\n run_against_hosts(partial_func=partial_func, hosts=hosts)\n\n master_ssh_client = get_ssh_client(\n user=user,\n host=self.master_ip,\n identity_file=identity_file)\n\n with master_ssh_client:\n for service in self.services:\n service.configure_master(\n ssh_client=master_ssh_client,\n cluster=self)\n\n for service in self.services:\n service.health_check(master_host=self.master_ip)", "def start(self):\n self.parent.start(auto_terminate=False)\n self.started = True", "def begin(self):\n self.service = NoseServiceClass()\n\n self.service.init_service(endpoint=self.rp_endpoint,\n project=self.rp_project,\n token=self.rp_uuid,\n ignore_errors=False)\n\n\n # Start launch.\n self.launch = self.service.start_launch(name=self.rp_launch,\n description=self.rp_launch_description,\n mode=self.rp_mode)\n\n self.handler = RPNoseLogHandler(self.filters if self.filters else None)\n self.setupLoghandler()", "def start(self, iface='', network='', bootstrap=[], cb=None, name=None, nodeid=None):\n from urlparse import urlparse\n import socket\n _log.info(\"PROXY start\")\n o=urlparse(self.master_uri)\n fqdn = socket.getfqdn(o.hostname)\n self._server_node_name = fqdn.decode('unicode-escape')\n self.node.network.join([self.master_uri],\n callback=CalvinCB(self._start_link_cb, org_cb=cb),\n corresponding_server_node_names=[self._server_node_name])", "def main():\n\n\t# Initialize the node\n\trospy.init_node(\"node_action_server_ros_iot_bridge\")\n\n\t# Create a object for RosIotBridgeActionServer class\n\taction_server = RosIotBridgeActionServer()\n\n\t# Not letting this node die\n\trospy.spin()", "def run(self):\n # Get the UUID so we can heartbeat to Ironic. Raises LookupNodeError\n # if there is an issue (uncaught, restart agent)\n self.started_at = _time()\n\n # Cached hw managers at runtime, not load time. See bug 1490008.\n hardware.load_managers()\n\n if not self.standalone:\n # Inspection should be started before call to lookup, otherwise\n # lookup will fail due to unknown MAC.\n uuid = inspector.inspect()\n\n content = self.api_client.lookup_node(\n hardware_info=hardware.dispatch_to_managers(\n 'list_hardware_info'),\n timeout=self.lookup_timeout,\n starting_interval=self.lookup_interval,\n node_uuid=uuid)\n\n self.node = content['node']\n self.heartbeat_timeout = content['heartbeat_timeout']\n\n wsgi = simple_server.make_server(\n self.listen_address[0],\n self.listen_address[1],\n self.api,\n server_class=simple_server.WSGIServer)\n\n if not self.standalone:\n # Don't start heartbeating until the server is listening\n self.heartbeater.start()\n\n try:\n wsgi.serve_forever()\n except BaseException:\n self.log.exception('shutting down')\n\n if not self.standalone:\n self.heartbeater.stop()", "def start(self):\n thread.start_new_thread(Pyro4.naming.startNSloop, tuple())\n\n self.ns = Pyro4.locateNS()\n if self.ns == None:\n logging.error('Cannot locate Pyro NS.')\n return\n\n daemon = export(self)\n thread.start_new_thread(daemon.requestLoop, tuple())\n thread.start_new_thread(self.healthcheck, tuple())\n logging.info('%s started' % self.name)", "def init(node_tree):\n\n start_pos_x = 0\n start_pos_y = 0\n\n pos_x_shift = 185\n\n # init parent\n DifSpec.init(node_tree, disable_remap_alpha=True)\n\n base_tex_n = node_tree.nodes[DifSpec.BASE_TEX_NODE]\n spec_mult_n = node_tree.nodes[DifSpec.SPEC_MULT_NODE]\n vcol_mult_n = node_tree.nodes[DifSpec.VCOLOR_MULT_NODE]\n\n # move existing\n for node in node_tree.nodes:\n if node.location.x > start_pos_x + pos_x_shift:\n node.location.x += pos_x_shift * 2\n\n # node creation\n sec_geom_n = node_tree.nodes.new(\"ShaderNodeGeometry\")\n sec_geom_n.name = sec_geom_n.label = DifSpecOclu.SEC_GEOM_NODE\n sec_geom_n.location = (start_pos_x - pos_x_shift, start_pos_y + 1200)\n sec_geom_n.uv_layer = _MESH_consts.none_uv\n\n oclu_tex_n = node_tree.nodes.new(\"ShaderNodeTexture\")\n oclu_tex_n.name = oclu_tex_n.label = DifSpecOclu.OCLU_TEX_NODE\n oclu_tex_n.location = (start_pos_x + pos_x_shift, start_pos_y + 1200)\n\n oclu_sep_rgb_n = node_tree.nodes.new(\"ShaderNodeSeparateRGB\")\n oclu_sep_rgb_n.name = oclu_sep_rgb_n.label = DifSpecOclu.OCLU_SEPARATE_RGB_NODE\n oclu_sep_rgb_n.location = (start_pos_x + pos_x_shift * 3, start_pos_y + 1200)\n\n oclu_mix_n = node_tree.nodes.new(\"ShaderNodeMixRGB\")\n oclu_mix_n.name = oclu_mix_n.label = DifSpecOclu.OCLU_MIX_NODE\n oclu_mix_n.location = (start_pos_x + pos_x_shift * 4, start_pos_y + 1400)\n oclu_mix_n.blend_type = \"MULTIPLY\"\n oclu_mix_n.inputs['Fac'].default_value = 1\n\n oclu_a_mix_n = node_tree.nodes.new(\"ShaderNodeMath\")\n oclu_a_mix_n.name = oclu_a_mix_n.label = DifSpecOclu.OCLU_A_MIX_NODE\n oclu_a_mix_n.location = (start_pos_x + pos_x_shift * 4, start_pos_y + 1600)\n oclu_a_mix_n.operation = \"MULTIPLY\"\n\n # links creation\n node_tree.links.new(oclu_tex_n.inputs[\"Vector\"], sec_geom_n.outputs[\"UV\"])\n\n # pass 1\n node_tree.links.new(oclu_sep_rgb_n.inputs[\"Image\"], oclu_tex_n.outputs[\"Color\"])\n\n # pass 2\n node_tree.links.new(oclu_a_mix_n.inputs[0], base_tex_n.outputs[\"Value\"])\n node_tree.links.new(oclu_a_mix_n.inputs[1], oclu_sep_rgb_n.outputs[\"R\"])\n\n node_tree.links.new(oclu_mix_n.inputs[\"Color1\"], base_tex_n.outputs[\"Color\"])\n node_tree.links.new(oclu_mix_n.inputs[\"Color2\"], oclu_sep_rgb_n.outputs[\"R\"])\n\n # pass 3\n node_tree.links.new(spec_mult_n.inputs[\"Color2\"], oclu_a_mix_n.outputs[\"Value\"])\n\n # pass 4\n node_tree.links.new(vcol_mult_n.inputs[\"Color2\"], oclu_mix_n.outputs[\"Color\"])", "def start_all_nodes(self):\n for node in self.nodes:\n node.start()", "def main():\n openff = Openff()\n openff.console.welcome()" ]
[ "0.73455715", "0.67564565", "0.6507637", "0.5896183", "0.5798123", "0.56471074", "0.5645573", "0.5598258", "0.5573087", "0.556594", "0.55550206", "0.5545505", "0.5535845", "0.5517319", "0.5511", "0.55073994", "0.5473905", "0.5468678", "0.54667735", "0.54622465", "0.54570323", "0.54533374", "0.5438118", "0.5433023", "0.5433023", "0.5433023", "0.5433023", "0.5412878", "0.5412878", "0.54104924", "0.5397499", "0.5362051", "0.53563035", "0.5353051", "0.5324326", "0.5317994", "0.5303134", "0.5303134", "0.52992666", "0.5279814", "0.5273658", "0.52644193", "0.5247972", "0.524463", "0.524463", "0.5218546", "0.5216329", "0.5206955", "0.5190966", "0.5184808", "0.518279", "0.5168836", "0.5168081", "0.51664066", "0.51662457", "0.5163583", "0.5155098", "0.5155098", "0.5155098", "0.5152277", "0.51501656", "0.5149832", "0.5149832", "0.5149832", "0.5149832", "0.5149832", "0.5149832", "0.5149832", "0.5149832", "0.5146813", "0.51326615", "0.5131187", "0.5126105", "0.5126105", "0.51222026", "0.51153797", "0.511265", "0.51088697", "0.5105425", "0.510349", "0.5103237", "0.5103237", "0.5103237", "0.5103237", "0.51017046", "0.5100062", "0.5098453", "0.5097516", "0.5096323", "0.5093549", "0.5091934", "0.50873846", "0.50662243", "0.5065916", "0.50644654", "0.50640565", "0.5050177", "0.50460684", "0.503347", "0.50149715" ]
0.82847816
0
Set the test up.
def setup_class(cls): cls.runner = CliRunner() cls.agent_name = "agent_1" cls.cwd = os.getcwd() cls.t = tempfile.mkdtemp() os.chdir(cls.t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n\n self._set_up()", "def setUp(self):\n MainTests.setUp(self)", "def setUp(self):\n \n pass", "def setUp(self):\n\n # setup init variables\n self.init_vars = {\n 'suppress_logfile': True,\n 'verbosity': 0,\n 'mothur_seed': 54321,\n }\n\n # setup directories for testing\n test_dir = os.path.join(os.getcwd(), 'tests')\n self.test_output_dir = os.path.join(test_dir, 'test_output')\n if not os.path.isdir(self.test_output_dir):\n os.makedirs(self.test_output_dir)\n self.test_input_dir = os.path.join(test_dir, 'test_data')\n\n return", "def setUp(self):\n print(\"New test by Nikolay Melnik\")", "def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')", "def setUp(self):\n test_env_setup()", "def setUp(self):\n\n pass", "def setUp(self):\n\n pass", "def setUp(self) :\n pass", "def setUp(self):\n self.setup_beets()", "def setUp(self):\n\n return", "def setUp(self) -> None:\n pass", "def setUp(self) -> None:\n pass", "def setUp(self):\n pass #because we dont have anything to setup.", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def _set_up():\n repl._setUp = self.setUp", "def setUp(self):\n setUp()", "def setUp(self):\n print('Calling \\'setUp\\'')", "def setUp(self):\n\n BaseTest.setUp(self)", "def setUp(self):\n self", "def setUp(self):\n self", "def setUp(self):\r\n pass", "def setup(self):\n # Have to wait for a server connection before we\n # can run the test\n self.wait_for_server_connections(10)", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\r\n pass # nothing used by all\r", "def test_setup(self):\n engine = Engine(self.config_file, self.api_token)\n engine.setup()", "def setUp(self):\r\n pass # nothing required by all\r", "def setUp(self):\n # Used to initialize objects that should be re-initialized or\n # re-created for each individual test\n self.t = Task()\n\n self.t.config(\"alias.from\", \"to\")", "def setUp(self):\n print(\"\\nIn setUp()...\")", "def setUp(self):\n\t\tself.testCases = [\n\t\t\t{\n\t\t\t\t'show': \"House\",\n\t\t\t\t'episode': 11,\n\t\t\t\t'season': 3,\n\t\t\t\t'title': \"Words and Deeds\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Lost\",\n\t\t\t\t'episode': 21,\n\t\t\t\t'season': 2,\n\t\t\t\t'title': \"?\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Heroes\",\n\t\t\t\t'episode': 15,\n\t\t\t\t'season': 1,\n\t\t\t\t'title': \"Run!\"\n\t\t\t}\n\t\t]", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n super(BasicTestCase, self).setUp()", "def setUp(self):\n raise NotImplementedError", "def setUp(self):\n self.db_fd, mainPyUnit.app.config['DATABASE'] = tempfile.mkstemp()\n mainPyUnit.app.config['TESTING'] = True\n self.app = mainPyUnit.app.test_client()\n #mainPyUnit.init_db()", "def setUp(self):\n\n # Setup for all test cases.\n controllers = com.discover_controllers_on_network()\n self.controller, _, connected = com.connect_robot_with_ipaddr(controllers, '127.0.0.1')\n if not connected:\n print 'Couldn\\'t connect to controller. Test will not be run.'\n sys.exit()\n is_logged_in, _ = user_auth.logon_robot_controller_default(self.controller)\n if not is_logged_in:\n print 'Couldn\\'t log in. Test will not be run.'\n sys.exit()\n\n # Additional setup for some test cases.\n test_desc = self.shortDescription()\n if test_desc == 'Tests edit_and_write_rapid_data_property with correct input data.':\n is_master, _, self.mastership = user_mastership.get_master_access_to_controller_rapid(self.controller)\n if not is_master:\n print 'Couldn\\'t get mastership. Test will not run.'\n sys.exit()\n elif test_desc == 'Tests edit_and_write_rapid_data with correct input data.':\n is_master, _, self.mastership = user_mastership.get_master_access_to_controller_rapid(self.controller)\n if not is_master:\n print 'Couldn\\'t get mastership. Test will not run.'\n sys.exit()", "def setup( self ):", "def setUp(self) -> None:\n self.engine = EvalHPOA()", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setUp(self):\n self.example = Example()", "def setUpTestCase(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setUp(self) -> None:\n\n self.checker = CheckerBase()", "def setup(self) -> None:", "def setUp(self):\n self.hass = get_test_home_assistant()", "def setUp(self):\n self.hass = get_test_home_assistant()", "def setUp(self):\n lang = self._sim_lang\n self._simulator = self._find_resource(\n f\"drake/examples/hardware_sim/hardware_sim_{lang}\")\n self._example_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/example_scenarios.yaml\")\n self._test_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/test/test_scenarios.yaml\")\n self._default_extra = {\n # For our smoke test, exit fairly quickly.\n \"simulation_duration\": 0.0625,\n }", "def setUp(self):\n self.t = Task()", "def setUp(self):\n self.t = Task()", "def setUp(self):\n super().setUp()\n self.runner = CliRunner()", "def setUp(self):\r\n super(EETestCase, self).setUp()" ]
[ "0.82482773", "0.82482773", "0.81176686", "0.800283", "0.7907327", "0.78918254", "0.7887326", "0.7848355", "0.7842833", "0.7832785", "0.7832785", "0.781454", "0.78136706", "0.7806924", "0.78026885", "0.78026885", "0.77940094", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7776961", "0.7766595", "0.77608186", "0.77478987", "0.7743035", "0.76929235", "0.76929235", "0.768341", "0.7623276", "0.7608938", "0.7608938", "0.7608938", "0.7608938", "0.7608938", "0.7608938", "0.7608938", "0.7608938", "0.7608938", "0.75897497", "0.75282216", "0.7513549", "0.7501416", "0.7496145", "0.7493589", "0.7474445", "0.7467448", "0.7464891", "0.7457519", "0.7449974", "0.7449959", "0.74333304", "0.7428299", "0.7428299", "0.7428299", "0.7425823", "0.74212027", "0.74118286", "0.7410674", "0.7410674", "0.7410674", "0.7410674", "0.7410674", "0.7410674", "0.7410674", "0.7410674", "0.7410674", "0.7410674", "0.7410674", "0.7410674", "0.7376384", "0.7364325", "0.7359819", "0.7359819", "0.7359506", "0.73563415", "0.73563415", "0.73493826", "0.73490524" ]
0.0
-1
Test that a generated protocol's serialisation + deserialisation work correctly.
def test_generated_protocol_serialisation(self): # create a message reply_message = {1: "number one", 2: "number two", 7: "number seven"} # message 1 message = TwoPartyNegotiationMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY, reply_message=reply_message, ) # serialise the message encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message) # deserialise the message decoded_message = TwoPartyNegotiationSerializer().decode( encoded_message_in_bytes ) # Compare the original message with the serialised+deserialised message assert decoded_message.message_id == message.message_id assert decoded_message.dialogue_reference == message.dialogue_reference assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0] assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1] assert decoded_message.target == message.target assert decoded_message.performative == message.performative assert decoded_message.reply_message == message.reply_message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob", "def test_proto_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob, from_proto=True)\n assert obj == obj2", "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def test_protocols(container, protocol):\n assert isinstance(container, protocol)", "def test_buildProtocol(self):\n f = AvatarFactory('world')\n p = f.buildProtocol(None)\n self.assertEqual(p.factory, f)\n self.assertEqual(p.world, 'world')\n self.assertTrue(isinstance(p, AvatarProtocol))", "def test_default_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob)\n assert obj == obj2", "def test_serialize_deserialize1(self):\n for command in Command:\n serialized = command.serialize()\n deserialized = Command.deserialize(serialized)\n self.assertTrue(deserialized is command)", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))", "def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP", "def test_messagepack_serialization(self, molecule):\n serialized = molecule.to_messagepack()\n molecule_copy = Molecule.from_messagepack(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))", "def test_buildProtocolStoresFactory(self):\n xs = self.factory.buildProtocol(None)\n self.assertIdentical(self.factory, xs.factory)", "def test_dumps(self):\n schema = self.UnitTestingSchema()\n serializer = JSONSchema()\n self.assertIsInstance(serializer.dumps(schema), str)", "def test_messagepack_serialization(self, molecule):\n serialized = molecule.to_messagepack()\n molecule_copy = Molecule.from_messagepack(serialized)\n assert molecule == molecule_copy", "def test_dumps(self):\n result = self.mapper.dumps(self.serialization)\n self.mapper.to_dict.assert_called_once_with(\"custom\")\n self.serialization.assert_called_once_with(\n self.mapper.to_dict.return_value\n )\n self.assertIs(result, self.serialization.return_value)", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def test_decoding_method(self):\n data = service_call.encode_call(\"foo\", [42])\n name, params = service_call.decode_call(data)\n\n self.assertEqual(name, \"foo\")\n self.assertEqual(params, [42])", "def test_decode(self):\n pass # TODO(tlarsen)", "def testDoNotEncodeStrangeObjects(self):\n class BogusObject(object):\n\n def check_initialized(self):\n pass\n\n self.assertRaises(TypeError,\n protojson.encode_message,\n BogusObject())", "def test_serialization():\n bb_1 = t2.TBoundingBox(0.4, 0.3, 0.1, top=None) # type:ignore forcing some None/null values\n bb_2 = t2.TBoundingBox(0.4, 0.3, 0.1, top=0.2) # type: ignore\n p1 = t2.TPoint(x=0.1, y=0.1)\n p2 = t2.TPoint(x=0.3, y=None) # type:ignore\n geo = t2.TGeometry(bounding_box=bb_1, polygon=[p1, p2])\n geo_s = t2.TGeometrySchema()\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s\n geo = t2.TGeometry(bounding_box=bb_2, polygon=[p1, p2])\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s", "def test_convert_proto_plus_to_protobuf_if_protobuf(self):\n protobuf = ProtobufFixture()\n converted = util.convert_proto_plus_to_protobuf(protobuf)\n self.assertEqual(protobuf, converted)", "def test_decode():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n message = dumps({\n \"data\": \"data\",\n \"mediaType\": DerivedSchema.MEDIA_TYPE,\n })\n assert_that(codec.decode(message), is_(equal_to({\n \"data\": \"data\",\n \"media_type\": DerivedSchema.MEDIA_TYPE,\n })))", "def test_validation(protocol_registry):\n\n # pylint: disable=abstract-class-instantiated,function-redefined\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = None\n _relax_types = None\n\n with pytest.raises(TypeError):\n InputsGenerator()\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = {'relax': {}}\n _relax_types = None\n\n def get_builder(self):\n pass\n\n with pytest.raises(RuntimeError):\n InputsGenerator()\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = None\n _relax_types = {RelaxType.ATOMS: 'description'}\n\n def get_builder(self):\n pass\n\n with pytest.raises(RuntimeError):\n InputsGenerator()\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = {'relax': {}}\n _relax_types = {'invalid-type': 'description'}\n\n def get_builder(self):\n pass\n\n with pytest.raises(RuntimeError):\n InputsGenerator()", "def testNotJSON(self):\n self.assertRaises(ValueError,\n protojson.decode_message, MyMessage, '{this is not json}')", "def testBinaryProtocolEof(self):\n self.eofTestHelper(TBinaryProtocol.TBinaryProtocolFactory())\n self.eofTestHelperStress(TBinaryProtocol.TBinaryProtocolFactory())", "def test_large_msg(self):\n self.proto.makeConnection(self.transport)\n outstr = \"test\" * AMP_MAXLEN\n self.proto.data_to_server(MsgServer2Portal, 1, test=outstr)\n\n if pickle.HIGHEST_PROTOCOL == 5:\n # Python 3.8+\n self.transport.write.assert_called_with(\n b\"\\x00\\x04_ask\\x00\\x011\\x00\\x08_command\\x00\\x10MsgServer2Portal\\x00\\x0bpacked_data\"\n b\"\\x00wx\\xda\\xed\\xc6\\xc1\\t\\x80 \\x00@Q#=5Z\\x0b\\xb8\\x80\\x13\\xe85h\\x80\\x8e\\xbam`Dc\\xf4><\\xf8g\"\n b\"\\x1a[\\xf8\\xda\\x97\\xa3_\\xb1\\x95\\xdaz\\xbe\\xe7\\x1a\\xde\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xe0\\x1f\\x1eP\\x1d\\x02\\r\\x00\\rpacked_data.2\"\n b\"\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3\"\n b\"\\xd9RUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\\xf5\\xfb\\x03m\\xe0\\x06\"\n b\"\\x1d\\x00\\rpacked_data.3\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\"\n b\"\\xa3fSUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\\xf5\\xfb\\x03n\\x1c\"\n b\"\\x06\\x1e\\x00\\rpacked_data.4\\x00Zx\\xda\\xed\\xc3\\x01\\t\\x00\\x00\\x0c\\x03\\xa0\\xb4O\\xb0\\xf5gA\"\n b\"\\xae`\\xda\\x8b\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xdf\\x0fnI\\x06,\\x00\\rpacked_data.5\\x00\\x18x\\xdaK-.)I\\xc5\\x8e\\xa7\\xb22@\\xc0\"\n b\"\\x94\\xe2\\xb6)z\\x00Z\\x1e\\x0e\\xb6\\x00\\x00\"\n )\n elif pickle.HIGHEST_PROTOCOL == 4:\n # Python 3.7\n self.transport.write.assert_called_with(\n b\"\\x00\\x04_ask\\x00\\x011\\x00\\x08_command\\x00\\x10MsgServer2Portal\\x00\\x0bpacked_data\"\n b\"\\x00wx\\xda\\xed\\xc6\\xc1\\t\\x80 \\x00@Q#o\\x8e\\xd6\\x02-\\xe0\\x04z\\r\\x1a\\xa0\\xa3m+$\\xd2\"\n b\"\\x18\\xbe\\x0f\\x0f\\xfe\\x1d\\xdf\\x14\\xfe\\x8e\\xedjO\\xac\\xb9\\xd4v\\xf6o\\x0f\\xf3\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00X\\xc3\\x00P\\x10\\x02\\x0c\\x00\\rpacked_data.2\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\"\n b\"\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3\\xd9RUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\"\n b\"\\xf5\\xfb\\x03m\\xe0\\x06\\x1d\\x00\\rpacked_data.3\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\"\n b\"\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3fSUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\"\n b\"\\xf5\\xfb\\x03n\\x1c\\x06\\x1e\\x00\\rpacked_data.4\\x00Zx\\xda\\xed\\xc3\\x01\\t\\x00\\x00\\x0c\"\n b\"\\x03\\xa0\\xb4O\\xb0\\xf5gA\\xae`\\xda\\x8b\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xdf\\x0fnI\\x06,\\x00\\rpacked_data.5\"\n b\"\\x00\\x18x\\xdaK-.)I\\xc5\\x8e\\xa7\\xb22@\\xc0\\x94\\xe2\\xb6)z\\x00Z\\x1e\\x0e\\xb6\\x00\\x00\"\n )", "def test_buildProtocol(self):\n queryData = (\"fromUser\", None, None)\n factory = irc.DccChatFactory(None, queryData)\n protocol = factory.buildProtocol(\"127.0.0.1\")\n self.assertIsInstance(protocol, irc.DccChat)\n self.assertEqual(protocol.factory, factory)", "def test_proto_export_inverse(tmp_path, x, name):\n config = Config()\n typedef, message = x\n with tempfile.NamedTemporaryFile(\n mode=\"r+\", dir=str(tmp_path), suffix=\".proto\", delete=True\n ) as outfile:\n\n typedef_map = {name: typedef}\n\n protofile.export_proto(typedef_map, output_file=outfile)\n outfile.flush()\n\n outfile.seek(0)\n new_typedef_map = protofile.import_proto(config, input_file=outfile)\n\n config.known_types.update(new_typedef_map)\n # validate\n for name, typedef in new_typedef_map.items():\n blackboxprotobuf.validate_typedef(typedef, config=config)\n\n def _check_field_types(typedef1, typedef2):\n for field_num in typedef1.keys():\n # make sure we don't drop keys\n assert field_num in typedef2\n assert typedef1[field_num][\"type\"] == typedef2[field_num][\"type\"]\n if typedef1[field_num][\"type\"] == \"message\":\n message_typedef1 = None\n message_typedef2 = None\n if \"message_typedef\" in typedef1[field_num]:\n message_typedef1 = typedef1[field_num][\"message_typedef\"]\n elif \"message_type_name\" in typedef1[field_num]:\n assert typedef1[field_num][\"message_type_name\"] in typedef_map\n message_typedef1 = typedef_map[\n typedef1[field_num][\"message_type_name\"]\n ]\n if \"message_typedef\" in typedef2[field_num]:\n message_typedef2 = typedef2[field_num][\"message_typedef\"]\n elif \"message_type_name\" in typedef2[field_num]:\n assert (\n typedef2[field_num][\"message_type_name\"] in new_typedef_map\n )\n message_typedef2 = new_typedef_map[\n typedef2[field_num][\"message_type_name\"]\n ]\n\n _check_field_types(message_typedef1, message_typedef2)\n\n note(typedef_map)\n note(new_typedef_map)\n for name, typedef in typedef_map.items():\n _check_field_types(typedef, new_typedef_map[name])\n\n note(new_typedef_map[name])\n # try to actually encode a message with the typedef\n encode_forward = length_delim.encode_message(message, config, typedef_map[name])\n\n config.known_types = new_typedef_map\n encode_backward = length_delim.encode_message(\n message, config, new_typedef_map[name]\n )\n\n decode_forward, _, _, _ = length_delim.decode_message(\n encode_forward, config, new_typedef_map[name]\n )\n decode_backward, _, _, _ = length_delim.decode_message(\n encode_backward, config, typedef_map[name]\n )", "def test_serialize_parse(\n tmp_path: Path,\n simple_graph: Graph,\n simple_dataset: Dataset,\n args: Tuple[str, GraphType, DestinationType, Optional[str]],\n) -> None:\n serializer_name, graph_type, destination_type, encoding = args\n format = serializer_dict[serializer_name]\n graph: Union[Graph, Dataset]\n if graph_type == GraphType.QUAD:\n graph = simple_dataset\n elif graph_type == GraphType.TRIPLE:\n graph = simple_graph\n else:\n raise ValueError(f\"graph_type {graph_type!r} is not supported\")\n with destination_type.make_ref(tmp_path) as dest_ref:\n destination = None if dest_ref is None else narrow_dest_param(dest_ref.param)\n serialize_result = graph.serialize(\n destination=destination,\n format=serializer_name,\n encoding=encoding,\n )\n\n logging.debug(\"serialize_result = %r, dest_ref = %s\", serialize_result, dest_ref)\n\n if dest_ref is None:\n if encoding is None:\n assert isinstance(serialize_result, str)\n serialized_data = serialize_result\n else:\n assert isinstance(serialize_result, bytes)\n serialized_data = serialize_result.decode(encoding)\n else:\n assert isinstance(serialize_result, Graph)\n assert dest_ref.path.exists()\n serialized_data = dest_ref.path.read_bytes().decode(\n \"utf-8\" if encoding is None else encoding\n )\n\n logging.debug(\"serialized_data = %s\", serialized_data)\n check_serialized(format, graph, serialized_data)", "def test_dumps(self):\n data = \"something\"\n result = self.mapper.loads(self.deser_fn, data)\n self.mapper.from_dict.assert_called_once_with(\n self.deser_fn.return_value, \"custom\"\n )\n self.deser_fn.assert_called_once_with(data)\n self.assertIs(result, self.mapper.from_dict.return_value)", "def test_json_serialization(self, molecule):\n # TODO: Test round-trip, on mini_drug_bank, when to_json bug is fixed, see #547\n mol = Molecule.from_smiles(\"CCO\")\n molecule_copy = Molecule.from_json(mol.to_json())\n assert molecule_copy == mol\n mol.generate_conformers(n_conformers=1)\n with pytest.raises(TypeError):\n mol.to_json()", "def test_serializer():\n\n my_serializer = Serializer()\n\n assert my_serializer.serialize(\"test\") == \"test\"\n my_serializer.serialize = Mock()\n\n my_serializer.disable()\n assert my_serializer(\"test\") == \"test\"\n my_serializer.serialize.assert_not_called()\n\n my_serializer.enable()\n my_serializer(\"test\")\n my_serializer.serialize.assert_called_once()", "def test_encoding_method_name(self):\n data = service_call.encode_call(\"foo\", [1, 2, 3])\n data = serial_datagram.decode(data)\n\n u = msgpack.Unpacker(encoding='ascii')\n u.feed(data)\n command = next(u)\n\n self.assertEqual(command, ['foo', [1, 2, 3]])", "def test06_serialize(self):\n uri = URIRef('http://ex.org/ldprs')\n g = Graph()\n g.add((uri, RDF.type, URIRef('http://ex.org/some_type')))\n g.add((URIRef('http://ex.org/a'), URIRef('http://ex.org/b'), Literal('LITERAL')))\n r = LDPRS(uri=uri, content=g)\n s = r.serialize()\n self.assertIn('@prefix ldp: <http://www.w3.org/ns/ldp#> .', s)\n self.assertIn('ldprs', s) # might prefix or not\n self.assertIn('some_type', s) # might prefix or not\n self.assertIn('ldp:RDFSource', s)\n self.assertIn('ldp:Resource', s)\n self.assertIn('\"LITERAL\"', s)\n #\n s = r.serialize(omits=['content'])\n self.assertIn('ldprs', s) # might prefix or not\n self.assertNotIn('some_type', s) # might prefix or not\n self.assertIn('ldp:RDFSource', s)\n self.assertIn('ldp:Resource', s)\n self.assertNotIn('\"LITERAL\"', s)", "def test_serialize(state):\n assert len(state.players) == 2\n st_data = state.to_data()\n\n assert st_data, \"Expect that we would have some data!\"\n assert len(st_data[\"deck\"]) == 52\n assert len(st_data[\"discarded\"]) == 0\n # Render player subset properly\n assert len(st_data[\"players\"]) == 2\n assert len(st_data[\"players\"][0][\"hand\"]) == 0\n\n new_state = MockState.from_data(st_data)\n assert new_state.__class__ == MockState\n st_data_new = new_state.to_data()\n\n assert st_data == st_data_new", "def test_encode():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(loads(codec.encode(data=\"data\", media_type=DerivedSchema.MEDIA_TYPE)), is_(equal_to({\n \"data\": \"data\",\n \"mediaType\": DerivedSchema.MEDIA_TYPE,\n })))", "def testWrongTypeAssignment(self):\n self.assertRaises(messages.ValidationError,\n protojson.decode_message,\n MyMessage, '{\"a_string\": 10}')", "def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )", "def test_yaml_serialization(self, molecule):\n serialized = molecule.to_yaml()\n molecule_copy = Molecule.from_yaml(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_decode():", "def test_serialization(self, example_pep_cfg_path):\n td = tempfile.mkdtemp()\n fn = os.path.join(td, \"serialized_sample.yaml\")\n p = Project(cfg=example_pep_cfg_path)\n sample = p.samples[0]\n sample.set = set([\"set\"])\n sample.dict = dict({\"dict\": \"dict\"})\n sample.list = list([\"list\"])\n sample.to_yaml(fn)\n with open(fn, \"r\") as f:\n contents = f.read()\n assert \"set\" in contents\n assert \"dict\" in contents\n assert \"list\" in contents", "def test_deserialization(self):\n global TEST_N\n global TEST_E\n global TEST_ID\n with open('tests/simple_private_key.pem', 'rb') as key_file:\n private_key = serialization.load_pem_private_key(\n key_file.read(),\n password=None,\n backend=default_backend()\n )\n TEST_ID = \"stuffblah\"\n\n token = scitokens.SciToken(key=private_key, key_id=TEST_ID)\n token.update_claims({\"test\": \"true\"})\n serialized_token = token.serialize(issuer=\"http://localhost:8080/\")\n\n public_numbers = private_key.public_key().public_numbers()\n TEST_E = public_numbers.e\n TEST_N = public_numbers.n\n\n self.assertEqual(len(serialized_token.decode('utf8').split(\".\")), 3)\n\n scitoken = scitokens.SciToken.deserialize(serialized_token, insecure=True)\n\n self.assertIsInstance(scitoken, scitokens.SciToken)\n\n token = scitokens.SciToken(key=private_key, key_id=\"doesnotexist\")\n serialized_token = token.serialize(issuer=\"http://localhost:8080/\")\n with self.assertRaises(scitokens.utils.errors.MissingKeyException):\n scitoken = scitokens.SciToken.deserialize(serialized_token, insecure=True)", "def test_pickle(self):\n import pickle\n\n forcefield_1 = ForceField(xml_simple_ff)\n pickled = pickle.dumps(forcefield_1)\n forcefield_2 = pickle.loads(pickled)\n assert forcefield_1.to_string() == forcefield_2.to_string()", "def test_load_protocol():\n\n # version 0.0.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,0,0))))\n\n # version 0.1.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,1,0))))", "def test_encode_data(self):\n if self._cls == 'MetaschemaType':\n for x in self._valid_decoded:\n self.assert_raises(NotImplementedError, self.import_cls.encode_type, x)\n self.assert_raises(NotImplementedError, self.import_cls.encode_data,\n x, self.typedef)\n self.assert_raises(NotImplementedError, self.import_cls.decode_data, None,\n self.typedef)\n else:\n for x in self._valid_decoded:\n y = self.import_cls.encode_type(x, **self._encode_type_kwargs)\n z = self.import_cls.encode_data(x, y, **self._encode_data_kwargs)\n self.import_cls.encode_data_readable(x, None)\n self.import_cls.encode_data_readable(x, y)\n x2 = self.import_cls.decode_data(z, y)\n self.assert_result_equal(x2, x)\n if self._cls not in ['JSONNullMetaschemaType', 'AnyMetaschemaType']:\n self.assert_raises(MetaschemaTypeError,\n self.import_cls.encode_type, None)", "def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')", "def parse_round_trip(self):\n parsed = self.test_proto.parse()\n round_trip = avro.protocol.parse(str(parsed))\n self.assertEqual(parsed, round_trip)", "def test_init(self):\n default_encoder_type = type(Encoder())\n\n payload = Payload()\n self.assertIsInstance(payload.encoder, default_encoder_type)\n\n json_encoder = JSONEncoder()\n payload = Payload(encoder=json_encoder)\n self.assertEqual(payload.encoder, json_encoder)", "def test_serialization():\n\n # Class is serializable.\n ray.put(DummyPredictor)\n\n # Instance is not serializable.\n predictor = DummyPredictor()\n with pytest.raises(PredictorNotSerializableException):\n ray.put(predictor)", "def make_serialize_parse_tests() -> Generator[ParameterSet, None, None]:\n xfails: Dict[\n Tuple[str, GraphType, DestinationType, Optional[str]],\n Union[MarkDecorator, Mark],\n ] = {}\n for serializer_name, destination_type in itertools.product(\n serializer_dict.keys(), DESTINATION_TYPES\n ):\n format = serializer_dict[serializer_name]\n encodings: Set[Optional[str]] = {*format.info.encodings, None}\n for encoding, graph_type in itertools.product(\n encodings, format.info.graph_types\n ):\n xfail = xfails.get(\n (serializer_name, graph_type, destination_type, encoding)\n )\n if not xfail:\n if serializer_name in (\"trig\") and graph_type is GraphType.TRIPLE:\n xfail = pytest.mark.xfail(\n raises=AssertionError,\n reason=\"\"\"\n TriG serializes non-context aware stores incorrectly, adding a blank\n node graph name which breaks round tripping.\n \"\"\",\n )\n if serializer_name in (\"json-ld\"):\n xfail = pytest.mark.xfail(\n raises=AssertionError,\n reason=\"\"\"\n JSON-LD is dropping datatype:\n - rdflib.term.Literal('XSD string'),\n + rdflib.term.Literal('XSD string', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#string')),\n \"\"\",\n )\n elif serializer_name in (\"hext\") and graph_type is GraphType.QUAD:\n xfail = pytest.mark.xfail(\n raises=AssertionError,\n reason=\"\"\"\n hext is injecting datatype:\n - rdflib.term.Literal('typeless', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#string')),\n + rdflib.term.Literal('typeless'),\n \"\"\",\n )\n marks = (xfail,) if xfail is not None else ()\n yield pytest.param(\n (serializer_name, graph_type, destination_type, encoding),\n id=f\"{serializer_name}-{graph_type.name}-{destination_type.name}-{encoding}\",\n marks=marks,\n )", "def setUp(self):\n super(ProtocolMapperTestBase, self).setUp()\n self.Reinitialize(path_method='my_method',\n content_type='application/x-google-protobuf')\n\n self.request_message = Request1()\n self.request_message.integer_field = 1\n self.request_message.string_field = u'something'\n self.request_message.enum_field = Enum1.VAL1\n\n self.response_message = Response1()\n self.response_message.integer_field = 1\n self.response_message.string_field = u'something'\n self.response_message.enum_field = Enum1.VAL1", "def test_deserialize_nofmt():\n inst = AsciiTableSerialize.AsciiTableSerialize()\n test_msg = backwards.unicode2bytes('lskdbjs;kfbj')\n nt.assert_raises(RuntimeError, inst.deserialize, test_msg)", "def test_bson_serialization(self, molecule):\n serialized = molecule.to_bson()\n molecule_copy = Molecule.from_bson(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_dump_single(self):\n result = self.serializer.dump(self.schema_to_serialize)\n self.assertIsInstance(result, dict)", "def test_serialize_echorequest_message():\n expected = open('test_data/echorequest_packet_v3.bin', 'rb').read()\n actual = serialize(EchoRequestMessage(278554190))\n\n assert actual == expected", "def write(self, proto):\n pass", "def test__pickle_unpickle(self):\n pass", "def testReadAndWriteSerializedEventData(self):\n expected_event_data = events.EventData()\n expected_event_data._event_data_stream_identifier = 'event_data_stream.1'\n expected_event_data._ignored = 'Not serialized'\n expected_event_data._parser_chain = 'test_parser'\n expected_event_data.data_type = 'test:event2'\n\n expected_event_data.empty_string = ''\n expected_event_data.zero_integer = 0\n expected_event_data.integer = 34\n expected_event_data.float = -122.082203542683\n expected_event_data.string = 'Normal string'\n expected_event_data.unicode_string = 'And I am a unicorn.'\n expected_event_data.my_list = ['asf', 4234, 2, 54, 'asf']\n expected_event_data.a_tuple = ('some item', [234, 52, 15])\n expected_event_data.null_value = None\n\n json_string = (\n json_serializer.JSONAttributeContainerSerializer.WriteSerialized(\n expected_event_data))\n\n self.assertIsNotNone(json_string)\n\n event_data = (\n json_serializer.JSONAttributeContainerSerializer.ReadSerialized(\n json_string))\n\n self.assertIsNotNone(event_data)\n self.assertIsInstance(event_data, events.EventData)\n\n expected_event_data_dict = {\n '_event_data_stream_identifier': 'event_data_stream.1',\n '_parser_chain': 'test_parser',\n 'a_tuple': ('some item', [234, 52, 15]),\n 'data_type': 'test:event2',\n 'empty_string': '',\n 'integer': 34,\n 'float': -122.082203542683,\n 'my_list': ['asf', 4234, 2, 54, 'asf'],\n 'string': 'Normal string',\n 'unicode_string': 'And I am a unicorn.',\n 'zero_integer': 0}\n\n event_data_dict = event_data.CopyToDict()\n self.assertEqual(event_data_dict, expected_event_data_dict)", "def __init__(self, test_proto):\n super().__init__(\"parse_round_trip\")\n self.test_proto = test_proto", "def test_VersionWire():\n # verRelayTxFalse and verRelayTxFalseEncoded is a version message as of\n # BIP0037Version with the transaction relay disabled.\n verRelayTxFalse = baseVersionBIP0037()\n verRelayTxFalse.disableRelayTx = True\n verRelayTxFalseEncoded = baseVersionBIP0037Encoded()\n verRelayTxFalseEncoded[-1] = 0\n\n bv = baseVersionBIP0037()\n tests = [\n (bv, bv, baseVersionBIP0037Encoded()),\n (verRelayTxFalse, verRelayTxFalse, verRelayTxFalseEncoded),\n ]\n\n for msgIn, msgOut, msgEnc in tests:\n # Encode the message to wire format.\n b = msgIn.btcEncode(wire.ProtocolVersion)\n assert b == msgEnc\n\n # Decode the message from wire format.\n msg = msgversion.MsgVersion.btcDecode(msgEnc, wire.ProtocolVersion)\n assert sameMsgVersion(msg, msgOut)", "def test_convert_proto_plus_to_protobuf(self):\n proto_plus = ProtoPlusFixture()\n converted = util.convert_proto_plus_to_protobuf(proto_plus)\n # Assert that the converted proto is an instance of the protobuf\n # protobuf message class.\n self.assertIsInstance(converted, ProtobufMessageType)", "def test_create_from_serialized(self, molecule):\n serialized_molecule = molecule.to_dict()\n molecule_copy = Molecule(serialized_molecule)\n assert molecule == molecule_copy", "def test_binary_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob", "def test_serialize(value, expected):\n assert json_dumps(value) == expected", "def test_serialization(valid_data):\n project: Project = Project.build(valid_data)\n serialized = project.dump()\n assert serialized == valid_data", "def parse_valid(self):\n try:\n self.test_proto.parse()\n except avro.errors.ProtocolParseException: # pragma: no coverage\n self.fail(f\"Valid protocol failed to parse: {self.test_proto!s}\")", "def test_get_protocol_version(self):\n server, client = loopback()\n client_protocol_version = client.get_protocol_version()\n server_protocol_version = server.get_protocol_version()\n\n assert isinstance(server_protocol_version, int)\n assert isinstance(client_protocol_version, int)\n\n assert server_protocol_version == client_protocol_version", "def test_yaml_serialization(self, molecule):\n serialized = molecule.to_yaml()\n molecule_copy = Molecule.from_yaml(serialized)\n assert molecule == molecule_copy", "def test_serialize_object(self):\n test_obj = self.TestObject(prop1='x', prop2=1234)\n\n with self.assertRaises(TypeError):\n serialize(test_obj)", "def read(cls, proto):\n pass", "def test_with_message():\n schema = data.WorkflowTransitionSchema()\n payload = {'transition': 'foo', 'message': 'A message'}\n deserialized = schema.deserialize(payload)\n assert isinstance(deserialized, dict)\n assert 'transition' in deserialized\n assert 'message' in deserialized", "async def test_create_engine_with_protocol(\n decoy: Decoy,\n subject: EngineStore,\n json_protocol_source: ProtocolSource,\n) -> None:\n protocol = ProtocolResource(\n protocol_id=\"my cool protocol\",\n protocol_key=None,\n created_at=datetime(year=2021, month=1, day=1),\n source=json_protocol_source,\n )\n\n result = await subject.create(\n run_id=\"run-id\",\n labware_offsets=[],\n protocol=protocol,\n )\n assert subject.current_run_id == \"run-id\"\n assert isinstance(result, StateSummary)\n assert isinstance(subject.runner, JsonRunner)\n assert isinstance(subject.engine, ProtocolEngine)", "def testPickle(self):\n global MyEnum\n global AnotherMessage\n global MyMessage\n\n class MyEnum(messages.Enum):\n val1 = 1\n val2 = 2\n\n class AnotherMessage(messages.Message):\n string = messages.StringField(1, repeated=True)\n\n class MyMessage(messages.Message):\n field1 = messages.IntegerField(1)\n field2 = messages.EnumField(MyEnum, 2)\n field3 = messages.MessageField(AnotherMessage, 3)\n\n message = MyMessage(field1=1, field2=MyEnum.val2,\n field3=AnotherMessage(string=['a', 'b', 'c']))\n message.set_unrecognized_field(\n 'exists', 'value', messages.Variant.STRING)\n message.set_unrecognized_field('repeated', ['list', 0, ('test',)],\n messages.Variant.STRING)\n unpickled = pickle.loads(pickle.dumps(message))\n self.assertEquals(message, unpickled)\n self.assertTrue(AnotherMessage.string is unpickled.field3.string.field)\n self.assertTrue('exists' in message.all_unrecognized_fields())\n self.assertEquals(('value', messages.Variant.STRING),\n message.get_unrecognized_field_info('exists'))\n self.assertEquals((['list', 0, ('test',)], messages.Variant.STRING),\n message.get_unrecognized_field_info('repeated'))", "def get_proto_serializer():\n def _serialize_proto(proto):\n return proto.SerializeToString()\n return _serialize_proto", "def test_serialization(self):\n serialized = self.Gs.as_dict()\n unserialized = BayesianNetwork.from_dict(serialized)\n\n self.assertDictEqual(serialized, unserialized.as_dict())", "def test_serialize_sinfo(self):\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)", "def test_create_from_serialized(self, molecule):\n serialized_molecule = molecule.__getstate__()\n molecule_copy = Molecule(serialized_molecule)\n assert molecule == molecule_copy", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy", "def test_can_deserialize_plain_object(self):\n handler = BaseRestHandler(mock.MagicMock(), mock.MagicMock())\n handler._write_buffer = []\n obj = SerializeMe()\n obj.key = \"value\"\n handler.write_object(obj)\n res = json.loads(handler._write_buffer[0])\n self.assertDictEqual(res, {\"key\": \"value\"})", "def test_jsonify_decode(self):\n\n Point = namedtuple('Point', ['x', 'y'], False)\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('foo_id')\n\n foo_id = IDField('_id')\n str_field = StringField(default='this is default')\n date_field = DateField()\n int_field = IntField()\n bool_field = BoolField()\n list_field = ListField()\n tuple_field = TupleField(np=Point)\n\n json_str = '''{\n \"__class__\": \"Foo\",\n \"foo_id\": \"1234\",\n \"str_field\": \"anything\",\n \"int_field\": 123,\n \"date_field\": \"2014-12-13\",\n \"bool_field\": false,\n \"tuple_field\":{\n \"x\": 1,\n \"y\": 2\n }\n }'''\n foo = Foo.from_jsonify(json.loads(json_str))\n\n self.assertEqual(foo.foo_id, '1234')\n self.assertEqual(foo.int_field, 123)\n self.assertEqual(foo.bool_field, False)\n self.assertEqual(foo.date_field, datetime.date(2014, 12, 13))\n Point = namedtuple('Point', ['x', 'y'], False)\n self.assertEqual(foo.tuple_field, Point(x=1, y=2))", "def test_pickling_error():\n client, server = make_queue_pairs('localhost')\n\n # Attempt to push a non-JSON-able object to the queue\n with pytest.raises(TypeError):\n client.send_inputs(Test())", "def test_to_json(self):\n r = self.SEQ(\"AAGGCC\", name=\"seq1\")\n got = json.loads(r.to_json())\n expect = {\n \"name\": \"seq1\",\n \"seq\": \"AAGGCC\",\n \"moltype\": r.moltype.label,\n \"info\": None,\n \"type\": get_object_provenance(r),\n \"version\": __version__,\n }\n self.assertEqual(got, expect)", "def test_user(self):\n user = User(username=\"test\")\n print user.message\n print user.id\n bs = user.serialize()\n print \"to bytes: \" + bs\n user.deserialize(bs)\n print \"from deserialize: \" + str(user.message)", "def test_buildProtocolFactoryArguments(self):\n xs = self.factory.buildProtocol(None)\n\n self.assertEqual((None,), xs.args)\n self.assertEqual({\"test\": None}, xs.kwargs)", "def test_convert_protobuf_to_proto_plus(self):\n protobuf = ProtobufFixture()\n converted = util.convert_protobuf_to_proto_plus(protobuf)\n # Assert that the converted proto is an instance of the Message\n # wrapper class.\n self.assertIsInstance(converted, proto.Message)", "def testGeneratorType(self):", "def test_dump_case_objects(cli_args_fixture, tmp_json_dumpfile, node_fixture, crawl_strategy_fixture, protocol_fixture):\n # cli_args_fixture fixture is called but not used simply in order to mock it in render_json.dump()\n # arrange\n # - protocol\n proto_ref, proto_name, proto_blocking, proto_is_database = ('foo', 'bar', True, False)\n protocol = replace(protocol_fixture, ref=proto_ref, name=proto_name, blocking=proto_blocking,\n is_database=proto_is_database)\n # - crawl strategy\n cs_description, cs_name, cs_providers, cs_provider_args, cs_child_provider, cs_filter, cs_rewrites = \\\n ('foo', 'bar', ['baz'], {'buzz': 'buzzbuzz'}, {'qux': True}, {'quux': True}, {'quz': True})\n cs = replace(crawl_strategy_fixture, description=cs_description, name=cs_name, protocol=protocol,\n providers=cs_providers, provider_args=cs_provider_args, child_provider=cs_child_provider,\n service_name_filter=cs_filter, service_name_rewrites=cs_rewrites)\n # - node\n node_ref, provider, mux, from_hint, address, service_name, children, warnings, errors = \\\n ('fake_ref', 'provider', 'bar_mux', True, 'baz_add', 'buz_name', {'qux': 'child'},\n {'quux_warn': True}, {'quuz_err': True})\n node_fixture.crawl_strategy = cs\n node_fixture.provider = provider\n node_fixture.protocol = protocol\n node_fixture.protocol_mux = mux\n node_fixture.from_hint = from_hint\n node_fixture.address = address\n node_fixture.service_name = service_name\n node_fixture.children = children\n node_fixture.warnings = warnings\n node_fixture.errors = errors\n tree = {node_ref: node_fixture}\n\n # act\n render_json.dump(tree, tmp_json_dumpfile)\n loaded = json.load(open(tmp_json_dumpfile))\n loaded_tree = loaded.get('tree')\n node_dict = loaded_tree.get(node_ref)\n\n # assert\n # - node\n assert node_dict is not None\n assert provider == node_dict['provider']\n assert mux == node_dict['protocol_mux']\n assert from_hint == node_dict['from_hint']\n assert address == node_dict['address']\n assert service_name == node_dict['service_name']\n assert children == node_dict['children']\n assert warnings == node_dict['warnings']\n assert errors == node_dict['errors']\n # - protocol\n assert node_dict.get('protocol')\n assert node_dict['protocol']['ref'] == proto_ref\n assert node_dict['protocol']['name'] == proto_name\n assert node_dict['protocol']['blocking'] == proto_blocking\n assert node_dict['protocol']['is_database'] == proto_is_database\n # - crawl strategy\n assert node_dict.get('crawl_strategy')\n assert node_dict['crawl_strategy']['description'] == cs_description\n assert node_dict['crawl_strategy']['name'] == cs_name\n assert node_dict['crawl_strategy']['protocol']['ref'] == protocol.ref\n assert node_dict['crawl_strategy']['providers'] == cs_providers\n assert node_dict['crawl_strategy']['provider_args'] == cs_provider_args\n assert node_dict['crawl_strategy']['child_provider'] == cs_child_provider\n assert node_dict['crawl_strategy']['service_name_filter'] == cs_filter\n assert node_dict['crawl_strategy']['service_name_rewrites'] == cs_rewrites", "def test_generic_serializer_serialize_pack_success(self):\n # generate a pack\n expansion = get_expansion('dgm')\n pack = expansion.generate_pack()\n\n cards_to_serialize = pack.cards + pack.cards\n serializer = GenericSerializer()\n serialized_cards = serializer.serialize(cards_to_serialize)\n #import ipdb; ipdb.set_trace();\n # go through each card that was supposed to be serialized and\n for card in cards_to_serialize:\n self.assertTrue(card.name in serialized_cards)", "def test_deserialize(self):\n prop = VersionProperty(default=\"1.1.1\")\n self.assertEqual(prop.deserialize(\"1.1.1\"), \"1.1.1\")", "def _post_deserialize (self):\n pass", "def test_oef_serialization_description():\n foo_datamodel = DataModel(\n \"foo\",\n [\n Attribute(\"bar\", int, True, \"A bar attribute.\"),\n Attribute(\"location\", Location, True, \"A location attribute.\"),\n ],\n )\n desc = Description(\n {\"bar\": 1, \"location\": Location(10.0, 10.0)}, data_model=foo_datamodel\n )\n msg = OefSearchMessage(\n performative=OefSearchMessage.Performative.REGISTER_SERVICE,\n dialogue_reference=(str(1), \"\"),\n service_description=desc,\n )\n msg_bytes = OefSearchMessage.serializer.encode(msg)\n assert len(msg_bytes) > 0\n recovered_msg = OefSearchMessage.serializer.decode(msg_bytes)\n assert recovered_msg == msg", "def test_bson_serialization(self, molecule):\n serialized = molecule.to_bson()\n molecule_copy = Molecule.from_bson(serialized)\n assert molecule == molecule_copy", "def test_serialize(self):\n r = self.RNA(\"ugagg\")\n assert dumps(r)", "def test_literal_io_from_package(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": {\n \"url\": {\n \"type\": \"string\"\n }\n },\n \"outputs\": {\n \"values\": {\n \"type\": {\n \"type\": \"array\",\n \"items\": \"float\",\n }\n }\n }\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n }\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n\n assert proc[\"id\"] == self._testMethodName\n assert proc[\"title\"] == \"some title\"\n assert proc[\"description\"] == \"this is a test\"\n assert isinstance(proc[\"inputs\"], list)\n assert len(proc[\"inputs\"]) == 1\n assert proc[\"inputs\"][0][\"id\"] == \"url\"\n assert proc[\"inputs\"][0][\"minOccurs\"] == 1\n assert proc[\"inputs\"][0][\"maxOccurs\"] == 1\n assert \"format\" not in proc[\"inputs\"][0]\n assert isinstance(proc[\"outputs\"], list)\n assert len(proc[\"outputs\"]) == 1\n assert proc[\"outputs\"][0][\"id\"] == \"values\"\n assert \"minOccurs\" not in proc[\"outputs\"][0]\n assert \"maxOccurs\" not in proc[\"outputs\"][0]\n assert \"format\" not in proc[\"outputs\"][0]\n expect = KNOWN_PROCESS_DESCRIPTION_FIELDS\n fields = set(proc.keys()) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))\n # make sure that deserialization of literal fields did not produce over-verbose metadata\n for p_input in proc[\"inputs\"]:\n expect = KNOWN_PROCESS_DESCRIPTION_INPUT_DATA_FIELDS\n fields = set(p_input) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))\n for p_output in proc[\"outputs\"]:\n expect = KNOWN_PROCESS_DESCRIPTION_OUTPUT_DATA_FIELDS\n fields = set(p_output) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))", "def test_deterministic_serialization(self):\n os.mkdir(os.path.join(self.testdir, '1'))\n os.mkdir(os.path.join(self.testdir, '2'))\n # These have to have the same filename (not full path,\n # obviously) since the filename gets encoded in the gzip data.\n ring_fname1 = os.path.join(self.testdir, '1', 'the.ring.gz')\n ring_fname2 = os.path.join(self.testdir, '2', 'the.ring.gz')\n rd = ring.RingData(\n [array.array('H', [0, 1, 0, 1]), array.array('H', [0, 1, 0, 1])],\n [{'id': 0, 'zone': 0}, {'id': 1, 'zone': 1}], 30)\n rd.save(ring_fname1)\n rd.save(ring_fname2)\n with open(ring_fname1, 'rb') as ring1:\n with open(ring_fname2, 'rb') as ring2:\n self.assertEqual(ring1.read(), ring2.read())", "def test_decode_errors(self):\n if self._invalid_encoded:\n self.assert_raises((ValueError, jsonschema.exceptions.ValidationError),\n self.import_cls.decode,\n self._invalid_encoded[0], self.typedef)", "def test_serialization_deserialization(self):\n\n original_time = now()\n serialized_time = DatetimeMapper.forward(original_time)\n assert serialized_time == original_time.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n deserialized_time = DatetimeMapper.backward(serialized_time)\n assert original_time == deserialized_time\n\n deserialized_time = DatetimeMapper.backward(None)\n assert isinstance(deserialized_time, datetime)" ]
[ "0.6951617", "0.68632156", "0.67302066", "0.661847", "0.6527949", "0.6503405", "0.6458234", "0.63932693", "0.63932693", "0.6388795", "0.63429564", "0.63004637", "0.6290241", "0.62630475", "0.6233855", "0.618288", "0.61746454", "0.6163066", "0.6099586", "0.6083544", "0.60727656", "0.6058211", "0.603197", "0.6010701", "0.5998617", "0.5996729", "0.5993207", "0.5980765", "0.5970034", "0.595375", "0.5950472", "0.59466046", "0.5911473", "0.5892852", "0.5888614", "0.58570665", "0.58551836", "0.5847972", "0.58469534", "0.58411014", "0.5839765", "0.5839388", "0.5833557", "0.5823283", "0.5818899", "0.5816701", "0.5811711", "0.5810895", "0.580189", "0.5795966", "0.57948864", "0.578826", "0.5786269", "0.578022", "0.5764262", "0.57584196", "0.5752192", "0.57511085", "0.5747721", "0.5734894", "0.5734579", "0.5732419", "0.5731039", "0.5727274", "0.5707706", "0.568509", "0.5683923", "0.56815535", "0.5680191", "0.5675689", "0.5670542", "0.56609213", "0.56565005", "0.565639", "0.5641547", "0.56365377", "0.56362957", "0.56341124", "0.56284344", "0.5624529", "0.5616515", "0.5614626", "0.5607402", "0.56056565", "0.56015575", "0.5600139", "0.55936223", "0.5591257", "0.55904895", "0.55896324", "0.5587916", "0.5587163", "0.55757165", "0.5575476", "0.5563523", "0.5561318", "0.55576706", "0.55448556", "0.5544209", "0.5528359" ]
0.7254605
0
Test that a generated protocol could be used in exchanging messages between two agents.
def test_generated_protocol_end_to_end(self): # AEA components ledger_apis = LedgerApis({}, FETCHAI) wallet_1 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE}) wallet_2 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE}) identity_1 = Identity( name="my_aea_1", address=wallet_1.addresses.get(FETCHAI), default_address_key=FETCHAI, ) identity_2 = Identity( name="my_aea_2", address=wallet_2.addresses.get(FETCHAI), default_address_key=FETCHAI, ) oef_connection_1 = OEFConnection( address=identity_1.address, oef_addr=HOST, oef_port=PORT ) oef_connection_2 = OEFConnection( address=identity_2.address, oef_addr=HOST, oef_port=PORT ) resources_1 = Resources() resources_2 = Resources() # add generated protocols to resources generated_protocol_configuration = ProtocolConfig.from_json( yaml.safe_load( open( os.path.join( self.cwd, "tests", "data", "generator", "two_party_negotiation", "protocol.yaml", ) ) ) ) generated_protocol = Protocol( TwoPartyNegotiationMessage.protocol_id, TwoPartyNegotiationSerializer(), generated_protocol_configuration, ) resources_1.protocol_registry.register( TwoPartyNegotiationMessage.protocol_id, generated_protocol ) resources_2.protocol_registry.register( TwoPartyNegotiationMessage.protocol_id, generated_protocol ) # create AEAs aea_1 = AEA(identity_1, [oef_connection_1], wallet_1, ledger_apis, resources_1) aea_2 = AEA(identity_2, [oef_connection_2], wallet_2, ledger_apis, resources_2) inform_number = tuple((1370, 1991, 1, 4, 17, 6)) # message 1 message = TwoPartyNegotiationMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=TwoPartyNegotiationMessage.Performative.INFORM, inform_number=inform_number, ) encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message) envelope = Envelope( to=identity_2.address, sender=identity_1.address, protocol_id=TwoPartyNegotiationMessage.protocol_id, message=encoded_message_in_bytes, ) # message 2 reply_message = {1: "number one", 2: "number two", 7: "number seven"} message_2 = TwoPartyNegotiationMessage( message_id=2, dialogue_reference=(str(0), ""), target=1, performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY, reply_message=reply_message, ) encoded_message_2_in_bytes = TwoPartyNegotiationSerializer().encode(message_2) # add handlers to AEA resources agent_1_handler = Agent1Handler( skill_context=SkillContext(aea_1.context), name="fake_skill" ) resources_1.handler_registry.register( ( PublicId.from_str("fetchai/fake_skill:0.1.0"), TwoPartyNegotiationMessage.protocol_id, ), agent_1_handler, ) agent_2_handler = Agent2Handler( encoded_messsage=encoded_message_2_in_bytes, skill_context=SkillContext(aea_2.context), name="fake_skill", ) resources_2.handler_registry.register( ( PublicId.from_str("fetchai/fake_skill:0.1.0"), TwoPartyNegotiationMessage.protocol_id, ), agent_2_handler, ) # add error skill to AEAs error_skill_1 = Skill.from_dir( os.path.join(AEA_DIR, "skills", "error"), aea_1.context ) resources_1.add_skill(error_skill_1) error_skill_2 = Skill.from_dir( os.path.join(AEA_DIR, "skills", "error"), aea_2.context ) resources_2.add_skill(error_skill_2) # Start threads t_1 = Thread(target=aea_1.start) t_2 = Thread(target=aea_2.start) try: t_1.start() t_2.start() time.sleep(1.0) aea_1.outbox.put(envelope) time.sleep(5.0) assert ( agent_2_handler.handled_message.message_id == message.message_id ), "Message from Agent 1 to 2: message ids do not match" assert ( agent_2_handler.handled_message.dialogue_reference == message.dialogue_reference ), "Message from Agent 1 to 2: dialogue references do not match" assert ( agent_2_handler.handled_message.dialogue_reference[0] == message.dialogue_reference[0] ), "Message from Agent 1 to 2: dialogue reference[0]s do not match" assert ( agent_2_handler.handled_message.dialogue_reference[1] == message.dialogue_reference[1] ), "Message from Agent 1 to 2: dialogue reference[1]s do not match" assert ( agent_2_handler.handled_message.target == message.target ), "Message from Agent 1 to 2: targets do not match" assert ( agent_2_handler.handled_message.performative == message.performative ), "Message from Agent 1 to 2: performatives do not match" assert ( agent_2_handler.handled_message.inform_number == message.inform_number ), "Message from Agent 1 to 2: inform_numbers do not match" assert ( agent_1_handler.handled_message.message_id == message_2.message_id ), "Message from Agent 1 to 2: dialogue references do not match" assert ( agent_1_handler.handled_message.dialogue_reference == message_2.dialogue_reference ), "Message from Agent 2 to 1: dialogue references do not match" assert ( agent_1_handler.handled_message.dialogue_reference[0] == message_2.dialogue_reference[0] ), "Message from Agent 2 to 1: dialogue reference[0]s do not match" assert ( agent_1_handler.handled_message.dialogue_reference[1] == message_2.dialogue_reference[1] ), "Message from Agent 2 to 1: dialogue reference[1]s do not match" assert ( agent_1_handler.handled_message.target == message_2.target ), "Message from Agent 2 to 1: targets do not match" assert ( agent_1_handler.handled_message.performative == message_2.performative ), "Message from Agent 2 to 1: performatives do not match" assert ( agent_1_handler.handled_message.reply_message == message_2.reply_message ), "Message from Agent 1 to 2: reply_messages do not match" time.sleep(2.0) finally: aea_1.stop() aea_2.stop() t_1.join() t_2.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_identify(self):\n\n protocol_a, transport_a, tree_a, _ = self.create_protocol('protocol_a')\n protocol_b, transport_b, tree_b, _ = self.create_protocol('protocol_b')\n\n transport_a.get_extra_info.return_value = ('127.0.0.1', 1000)\n transport_b.get_extra_info.return_value = ('127.0.0.2', 1000)\n\n self.assertTrue(len(protocol_a.messages) == 0)\n\n protocol_a.identify()\n\n # Check that a message has been sent.\n self.assertTrue(transport_a.write.called)\n self.assertTrue(len(protocol_a.messages) == 1)\n\n # Get the message and check for the key.\n output = transport_a.write.call_args[0][0]\n self.assertTrue(protocol_a.self_key in output.decode())\n\n # Feed the message to the other protocol.\n protocol_b.data_received(output)\n\n # Check that the routing tree has been called to add a Node with the right key.\n self.assertTrue(tree_b.add_node.called)\n self.assertTrue(tree_b.add_node.call_args[0][0].key == 'protocol_a')\n\n # Check that the response on the identify is written to the transport.\n self.assertTrue(transport_b.write.called)\n\n # Get the response, check the key.\n output = transport_b.write.call_args[0][0]\n self.assertTrue(protocol_b.self_key in output.decode())\n\n # Feed the response to the original protocol.\n protocol_a.data_received(output)\n\n # The routing tree should've been called to add the Node with the right key.\n self.assertTrue(tree_a.add_node.called)\n self.assertTrue(tree_a.add_node.call_args[0][0].key == 'protocol_b')\n\n # The messages dict should now be empty again.\n self.assertTrue(len(protocol_a.messages) == 0)", "def test_protocols(container, protocol):\n assert isinstance(container, protocol)", "def test_invalid_same_peer_id2(self):\n # Disable idle timeout before creating any new peer because self.create_peer(...)\n # runs the main loop.\n self.conn.disable_idle_timeout()\n # Create new peer and disable idle timeout.\n manager3 = self.create_peer(self.network, peer_id=self.peer_id2)\n conn = FakeConnection(manager3, self.manager1)\n # Disable idle timeout.\n conn.disable_idle_timeout()\n # HELLO\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'HELLO')\n self.conn.run_one_step()\n conn.run_one_step()\n # PEER-ID\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.conn.run_one_step()\n conn.run_one_step()\n # READY\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'READY')\n self.conn.run_one_step()\n conn.run_one_step()\n # continue until messages stop\n self.conn.run_until_empty()\n conn.run_until_empty()\n self.run_to_completion()\n # one of the peers will close the connection. We don't know which one, as it depends\n # on the peer ids\n\n if self.conn.tr1.disconnecting or self.conn.tr2.disconnecting:\n conn_dead = self.conn\n conn_alive = conn\n elif conn.tr1.disconnecting or conn.tr2.disconnecting:\n conn_dead = conn\n conn_alive = self.conn\n else:\n raise Exception('It should never happen.')\n self._check_result_only_cmd(conn_dead.peek_tr1_value() + conn_dead.peek_tr2_value(), b'ERROR')\n # at this point, the connection must be closing as the error was detected on READY state\n self.assertIn(True, [conn_dead.tr1.disconnecting, conn_dead.tr2.disconnecting])\n # check connected_peers\n connected_peers = list(self.manager1.connections.connected_peers.values())\n self.assertEquals(1, len(connected_peers))\n self.assertIn(connected_peers[0], [conn_alive.proto1, conn_alive.proto2])\n # connection is still up\n self.assertIsConnected(conn_alive)", "def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )", "def test_generated_protocol_serialisation(self):\n # create a message\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n\n # serialise the message\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n\n # deserialise the message\n decoded_message = TwoPartyNegotiationSerializer().decode(\n encoded_message_in_bytes\n )\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.reply_message == message.reply_message", "def test_protocols_updated(self):\n assert self.agent_config.protocols == {self.new_protocol_id}", "def test_multi_line():\n\n for protocol in LEGACY_PROTOCOLS:\n p = protocol([])\n\n test_case = [\n \"48 6B 10 49 02 01 00 01 02 03 FF\",\n \"48 6B 10 49 02 02 04 05 06 07 FF\",\n \"48 6B 10 49 02 03 08 09 0A 0B FF\",\n ]\n\n correct_data = [0x49, 0x02] + list(range(12))\n\n # in-order\n r = p(test_case)\n assert len(r) == 1\n check_message(r[0], len(test_case), 0x10, correct_data)\n\n # test a few out-of-order cases\n for n in range(4):\n random.shuffle(test_case) # mix up the frame strings\n r = p(test_case)\n assert len(r) == 1\n check_message(r[0], len(test_case), 0x10, correct_data)", "def test_send_network(self) :\n symbol = 'A' \n oProtocol = Protocol(symbol,mode=\"client\",debug=self.debug)\n command = \"N200\"\n message = oProtocol.send(command)\n #if message['status'] is False :\n #print(\"\\n*** ERROR : test_send_network : {}\".format(message['notify']))\n\n #Pour enregistrer les traces d'appels de fonctions dans le fichier log/client_calltrack_sorted.txt\n client_tracker_print()\n self.assertTrue( (message['status'] is not True) )", "def test_buildProtocol(self):\n queryData = (\"fromUser\", None, None)\n factory = irc.DccChatFactory(None, queryData)\n protocol = factory.buildProtocol(\"127.0.0.1\")\n self.assertIsInstance(protocol, irc.DccChat)\n self.assertEqual(protocol.factory, factory)", "def test_verify_connection_to_a_device():", "def test_websocket_mechanics():\n transport = StringTransportWithDisconnection()\n service = hey_joe.WebSocketService(\"127.0.0.1\", 9000)\n protocol = service.buildProtocol(service._hey_joe_addr)\n protocol.transport = transport\n transport.protocol = protocol\n protocol.connectionMade()\n data_to_send = b'GET / HTTP/1.1\\r\\nHost: somewhere_in_the_world:9000\\r\\nConnection: keep-alive, Upgrade\\r\\nUpgrade: websocket\\r\\nSec-WebSocket-Version: 13\\r\\nSec-WebSocket-Key: F76ObkF/aCKX8WkmAgx2OQ==\\r\\n\\r\\n'\n protocol.dataReceived(data_to_send)\n assert transport.value().startswith(b'HTTP/1.1 101 Switching Protocols\\r\\nServer: hendrix')", "def testBinaryProtocolEof(self):\n self.eofTestHelper(TBinaryProtocol.TBinaryProtocolFactory())\n self.eofTestHelperStress(TBinaryProtocol.TBinaryProtocolFactory())", "def test_buildProtocol(self):\n f = AvatarFactory('world')\n p = f.buildProtocol(None)\n self.assertEqual(p.factory, f)\n self.assertEqual(p.world, 'world')\n self.assertTrue(isinstance(p, AvatarProtocol))", "async def test_websocket_communicator():\n communicator = WebsocketCommunicator(SimpleWebsocketApp(), \"/testws/\")\n # Test connection\n connected, subprotocol = await communicator.connect()\n assert connected\n assert subprotocol is None\n # Test sending text\n await communicator.send_to(text_data=\"hello\")\n response = await communicator.receive_from()\n assert response == \"hello\"\n # Test sending bytes\n await communicator.send_to(bytes_data=b\"w\\0\\0\\0\")\n response = await communicator.receive_from()\n assert response == b\"w\\0\\0\\0\"\n # Test sending JSON\n await communicator.send_json_to({\"hello\": \"world\"})\n response = await communicator.receive_json_from()\n assert response == {\"hello\": \"world\"}\n # Close out\n await communicator.disconnect()", "def test_differentProtocol(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n protocols = []\n\n class FakeProtocol(object):\n def __init__(self):\n self.transport = StubPort()\n\n def query(self, address, query, timeout=10, id=None):\n protocols.append(self)\n return defer.succeed(dns.Message())\n\n resolver._connectedProtocol = FakeProtocol\n resolver.query(dns.Query('foo.example.com'))\n resolver.query(dns.Query('bar.example.com'))\n self.assertEqual(len(set(protocols)), 2)", "def gotProtocol(self,p): \n p.send_hello()", "def test_connectedProtocol(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n firstProto = resolver._connectedProtocol()\n secondProto = resolver._connectedProtocol()\n\n self.assertNotIdentical(firstProto.transport, None)\n self.assertNotIdentical(secondProto.transport, None)\n self.assertNotEqual(\n firstProto.transport.getHost().port,\n secondProto.transport.getHost().port)\n\n return defer.gatherResults([\n defer.maybeDeferred(firstProto.transport.stopListening),\n defer.maybeDeferred(secondProto.transport.stopListening)])", "def notest_send_recv_network(self) :\n\n # Demarrage du serveur\n symbol = 'S' \n oProtocol_server = Protocol(symbol,mode=\"server\",debug=self.debug)\n # tcpHandlerMethods est definie dans le module test.util.test_util\n tcpHandlerMethods[\"process\"] = test_Protocol_process\n oProtocol_server.handlerRegister(tcpHandlerMethods)\n oProtocol_server.start()\n \n # Attente de l'etat actif du serveur.\n while oProtocol_server.isActivated is not True :\n time.sleep(1)\n\n # Toutes les commandes du protocole sont testees\n symbol = 'X'\n oProtocol_client = Protocol(symbol,mode=\"client\", debug=self.debug)\n \n status = True\n # Les commandes entrees par le joueur sont simulees \n for index, command in enumerate(self.commandList) :\n command = self.commandList[index]\n message = oProtocol_client.send(command)\n # print(\"\\n*** Received message= {}\".format(message))\n status = status and message['status']\n if message['status'] is False :\n print(\"\\n*** test_send_recv_network() : {}\\n\".format(message['notify']))\n\n # Le serveur est arrete\n oProtocol_server.shutdown()\n\n # Attend la terminaison des threads\n oProtocol_server.join()\n \n self.assertTrue( status )", "async def dsmr_connection_send_validate_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=DSMRProtocol)\n\n protocol.telegram = {\n EQUIPMENT_IDENTIFIER: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject([{\"value\": \"123456789\", \"unit\": \"\"}]),\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n\n async def connection_factory(*args, **kwargs):\n \"\"\"Return mocked out Asyncio classes.\"\"\"\n if args[1] == \"5L\":\n protocol.telegram = {\n LUXEMBOURG_EQUIPMENT_IDENTIFIER: CosemObject(\n [{\"value\": \"12345678\", \"unit\": \"\"}]\n ),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject(\n [{\"value\": \"123456789\", \"unit\": \"\"}]\n ),\n }\n if args[1] == \"5S\":\n protocol.telegram = {\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n if args[1] == \"Q3D\":\n protocol.telegram = {\n Q3D_EQUIPMENT_IDENTIFIER: CosemObject(\n [{\"value\": \"12345678\", \"unit\": \"\"}]\n ),\n }\n\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n async def wait_closed():\n if isinstance(connection_factory.call_args_list[0][0][2], str):\n # TCP\n telegram_callback = connection_factory.call_args_list[0][0][3]\n else:\n # Serial\n telegram_callback = connection_factory.call_args_list[0][0][2]\n\n telegram_callback(protocol.telegram)\n\n protocol.wait_closed = wait_closed\n\n with patch(\n \"homeassistant.components.dsmr.config_flow.create_dsmr_reader\",\n connection_factory,\n ), patch(\n \"homeassistant.components.dsmr.config_flow.create_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "def test_large_msg(self):\n self.proto.makeConnection(self.transport)\n outstr = \"test\" * AMP_MAXLEN\n self.proto.data_to_server(MsgServer2Portal, 1, test=outstr)\n\n if pickle.HIGHEST_PROTOCOL == 5:\n # Python 3.8+\n self.transport.write.assert_called_with(\n b\"\\x00\\x04_ask\\x00\\x011\\x00\\x08_command\\x00\\x10MsgServer2Portal\\x00\\x0bpacked_data\"\n b\"\\x00wx\\xda\\xed\\xc6\\xc1\\t\\x80 \\x00@Q#=5Z\\x0b\\xb8\\x80\\x13\\xe85h\\x80\\x8e\\xbam`Dc\\xf4><\\xf8g\"\n b\"\\x1a[\\xf8\\xda\\x97\\xa3_\\xb1\\x95\\xdaz\\xbe\\xe7\\x1a\\xde\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xe0\\x1f\\x1eP\\x1d\\x02\\r\\x00\\rpacked_data.2\"\n b\"\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3\"\n b\"\\xd9RUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\\xf5\\xfb\\x03m\\xe0\\x06\"\n b\"\\x1d\\x00\\rpacked_data.3\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\"\n b\"\\xa3fSUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\\xf5\\xfb\\x03n\\x1c\"\n b\"\\x06\\x1e\\x00\\rpacked_data.4\\x00Zx\\xda\\xed\\xc3\\x01\\t\\x00\\x00\\x0c\\x03\\xa0\\xb4O\\xb0\\xf5gA\"\n b\"\\xae`\\xda\\x8b\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xdf\\x0fnI\\x06,\\x00\\rpacked_data.5\\x00\\x18x\\xdaK-.)I\\xc5\\x8e\\xa7\\xb22@\\xc0\"\n b\"\\x94\\xe2\\xb6)z\\x00Z\\x1e\\x0e\\xb6\\x00\\x00\"\n )\n elif pickle.HIGHEST_PROTOCOL == 4:\n # Python 3.7\n self.transport.write.assert_called_with(\n b\"\\x00\\x04_ask\\x00\\x011\\x00\\x08_command\\x00\\x10MsgServer2Portal\\x00\\x0bpacked_data\"\n b\"\\x00wx\\xda\\xed\\xc6\\xc1\\t\\x80 \\x00@Q#o\\x8e\\xd6\\x02-\\xe0\\x04z\\r\\x1a\\xa0\\xa3m+$\\xd2\"\n b\"\\x18\\xbe\\x0f\\x0f\\xfe\\x1d\\xdf\\x14\\xfe\\x8e\\xedjO\\xac\\xb9\\xd4v\\xf6o\\x0f\\xf3\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n b\"\\x00X\\xc3\\x00P\\x10\\x02\\x0c\\x00\\rpacked_data.2\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\"\n b\"\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3\\xd9RUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\"\n b\"\\xf5\\xfb\\x03m\\xe0\\x06\\x1d\\x00\\rpacked_data.3\\x00Zx\\xda\\xed\\xc3\\x01\\r\\x00\\x00\\x08\"\n b\"\\xc0\\xa0\\xb4&\\xf0\\xfdg\\x10a\\xa3fSUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU\"\n b\"\\xf5\\xfb\\x03n\\x1c\\x06\\x1e\\x00\\rpacked_data.4\\x00Zx\\xda\\xed\\xc3\\x01\\t\\x00\\x00\\x0c\"\n b\"\\x03\\xa0\\xb4O\\xb0\\xf5gA\\xae`\\xda\\x8b\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\"\n b\"\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xdf\\x0fnI\\x06,\\x00\\rpacked_data.5\"\n b\"\\x00\\x18x\\xdaK-.)I\\xc5\\x8e\\xa7\\xb22@\\xc0\\x94\\xe2\\xb6)z\\x00Z\\x1e\\x0e\\xb6\\x00\\x00\"\n )", "def test_protocols_updated(self):\n assert self.connection_config.protocols == {self.new_protocol_id}", "def test_protocols_updated(self):\n assert self.skill_config.protocols == {self.new_protocol_id}", "def test_new_connection(self):\n with InverterFinder() as finder:\n sock1 = create_connection(('127.0.0.1', 1200))\n sock2, addr = finder.find_inverter()\n # Test if the 2 sockets are paired\n sock2.send(b\"\\x12\")\n self.assertEqual(b\"\\x12\", sock1.recv(1))\n sock1.close()\n sock2.close()", "async def test_invalid_messages(self):\n async with Node() as n:\n reader, writer = await asyncio.open_connection(\n 'localhost', n._port\n )\n writer.write('hello\\n'.encode())\n await writer.drain()\n writer.close()\n self.assertTrue(n.check_alive())\n\n async with Node() as n1:\n async with Node() as n2:\n await n2.join_network(n1.nid())\n peer = next(iter(n2._act_set))\n await peer.send_message(f'{constants.JOIN_FOR} hello 42')\n await peer.send_message(f'{constants.SHU_MES} hello 42 world')\n self.assertEqual(n1.num_active(), 1)\n self.assertEqual(n1.num_passive(), 0)\n\n await peer.send_message('hello world')\n await asyncio.sleep(2)\n self.assertEqual(n1.num_active(), 0)\n self.assertEqual(n1.num_passive(), 0)", "def test_envelope_routed(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=20)\n\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message", "def test_envelope_routed(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=20)\n\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message", "def test_star_routing_connectivity(self):\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n for source in range(len(self.multiplexers)):\n for destination in range(len(self.multiplexers)):\n if destination == source:\n continue\n envelope = Envelope(\n to=self.addresses[destination],\n sender=self.addresses[source],\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexers[source].put(envelope)\n delivered_envelope = self.multiplexers[destination].get(\n block=True, timeout=10\n )\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message", "def testFramepack2(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n frame.cmd = 'DISCONNECT'\n result = frame.pack()\n correct = 'DISCONNECT\\n\\n\\x00\\n'\n self.assertEqual(result, correct)", "def test_supported_protocol(self):\n assert self.handler.SUPPORTED_PROTOCOL is None", "def test_load_protocol():\n\n # version 0.0.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,0,0))))\n\n # version 0.1.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,1,0))))", "def test_multi_line_mode_03():\n\n for protocol in LEGACY_PROTOCOLS:\n p = protocol([])\n\n\n test_case = [\n \"48 6B 10 43 00 01 02 03 04 05 FF\",\n \"48 6B 10 43 06 07 08 09 0A 0B FF\",\n ]\n\n correct_data = [0x43, 0x00] + list(range(12)) # data is stitched in order recieved\n # ^^^^ this is an arbitrary value in the source code\n\n r = p(test_case)\n assert len(r) == 1\n check_message(r[0], len(test_case), 0x10, correct_data)", "def test_message_sender_receiver_connection(self):\n sender = sample_user()\n receiver_prof = sample_user_second()\n receiver_prof.is_professor = True\n receiver_prof.save()\n receiver_student = sample_user_third()\n receiver_student.is_student = True\n receiver_student.save()\n message_1_text = \"You are awesome\"\n message_2_text = \"I love your classes.\"\n message_3_text = \"Thanks for the notes.\"\n first_message = models.Message.objects.create(text=message_1_text,\n sender=sender,\n receiver=receiver_prof)\n models.Message.objects.create(text=message_2_text,\n sender=sender,\n receiver=receiver_prof)\n models.Message.objects.create(text=message_3_text,\n sender=sender,\n receiver=receiver_student)\n self.assertEqual(str(first_message), first_message.text)\n self.assertEqual(sender.messages_sent.all().count(), 3)\n self.assertEqual(sender.messages_received.all().count(), 0)\n self.assertEqual(receiver_prof.messages_received.all().count(), 2)\n self.assertEqual(receiver_prof.messages_sent.all().count(), 0)", "def test_store(self):\n\n protocol_a, transport_a, tree_a, _ = self.create_protocol('protocol_a')\n protocol_b, transport_b, tree_b, store_b = self.create_protocol('protocol_b')\n\n self.assertTrue(len(protocol_a.messages) == 0)\n\n future = protocol_a.store('testvalue')\n\n # Check that a message has been sent.\n self.assertTrue(transport_a.write.called)\n self.assertTrue(len(protocol_a.messages) == 1)\n\n # Get the message.\n output = transport_a.write.call_args[0][0]\n\n # Feed the message to the other protocol.\n protocol_b.data_received(output)\n\n # Check that the value store is called.\n self.assertTrue(store_b.store.called)\n self.assertTrue(store_b.store.call_args[0][0] == 'testvalue')\n\n # Check that the response is written to the transport.\n self.assertTrue(transport_b.write.called)\n\n # Get the response.\n output = transport_b.write.call_args[0][0]\n\n # Feed the response to the original protocol.\n protocol_a.data_received(output)\n\n # There shouldn't be any messages left.\n self.assertTrue(len(protocol_a.messages) == 0)\n self.assertTrue(future.done())", "def test_validation(protocol_registry):\n\n # pylint: disable=abstract-class-instantiated,function-redefined\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = None\n _relax_types = None\n\n with pytest.raises(TypeError):\n InputsGenerator()\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = {'relax': {}}\n _relax_types = None\n\n def get_builder(self):\n pass\n\n with pytest.raises(RuntimeError):\n InputsGenerator()\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = None\n _relax_types = {RelaxType.ATOMS: 'description'}\n\n def get_builder(self):\n pass\n\n with pytest.raises(RuntimeError):\n InputsGenerator()\n\n class InputsGenerator(protocol_registry, RelaxInputsGenerator):\n\n _calc_types = {'relax': {}}\n _relax_types = {'invalid-type': 'description'}\n\n def get_builder(self):\n pass\n\n with pytest.raises(RuntimeError):\n InputsGenerator()", "async def test_plaintext_connection(conn: APIConnection, resolve_host, socket_socket):\n loop = asyncio.get_event_loop()\n protocol = _get_mock_protocol(conn)\n messages = []\n protocol: Optional[APIPlaintextFrameHelper] = None\n transport = MagicMock()\n connected = asyncio.Event()\n\n def _create_mock_transport_protocol(create_func, **kwargs):\n nonlocal protocol\n protocol = create_func()\n protocol.connection_made(transport)\n connected.set()\n return transport, protocol\n\n def on_msg(msg):\n messages.append(msg)\n\n remove = conn.add_message_callback(on_msg, {HelloResponse, DeviceInfoResponse})\n transport = MagicMock()\n\n with patch.object(\n loop, \"create_connection\", side_effect=_create_mock_transport_protocol\n ):\n connect_task = asyncio.create_task(conn.connect(login=False))\n await connected.wait()\n\n protocol.data_received(\n b'\\x00@\\x02\\x08\\x01\\x10\\x07\\x1a(m5stackatomproxy (esphome v2023.1.0-dev)\"\\x10m'\n )\n protocol.data_received(b\"5stackatomproxy\")\n protocol.data_received(b\"\\x00\\x00$\")\n protocol.data_received(b\"\\x00\\x00\\x04\")\n protocol.data_received(\n b'\\x00e\\n\\x12\\x10m5stackatomproxy\\x1a\\x11E8:9F:6D:0A:68:E0\"\\x0c2023.1.0-d'\n )\n protocol.data_received(\n b\"ev*\\x15Jan 7 2023, 13:19:532\\x0cm5stack-atomX\\x03b\\tEspressif\"\n )\n await asyncio.sleep(0)\n await connect_task\n assert conn.is_connected\n assert len(messages) == 2\n assert isinstance(messages[0], HelloResponse)\n assert isinstance(messages[1], DeviceInfoResponse)\n assert messages[1].name == \"m5stackatomproxy\"\n remove()\n await conn.force_disconnect()\n await asyncio.sleep(0)", "def test_multiple_messages_received_at_once(self):\n # Send 2 messages\n self.sock.send(message + message)\n # Receive them back\n ident, payload = self.inverter.receive()\n self.assertEqual(b\"\\x00\\x01\\x02\", ident)\n self.assertEqual(b\"\", payload)\n ident, payload = self.inverter.receive()\n self.assertEqual(b\"\\x00\\x01\\x02\", ident)\n self.assertEqual(b\"\", payload)", "def test_hex_straining():\n\n for protocol in LEGACY_PROTOCOLS:\n p = protocol([])\n\n # single non-hex message\n r = p([\"12.8 Volts\"])\n assert len(r) == 1\n assert r[0].ecu == ECU.UNKNOWN\n assert len(r[0].frames) == 1\n\n\n # multiple non-hex message\n r = p([\"12.8 Volts\", \"NO DATA\"])\n assert len(r) == 2\n\n for m in r:\n assert m.ecu == ECU.UNKNOWN\n assert len(m.frames) == 1\n\n # mixed hex and non-hex\n r = p([\"NO DATA\", \"48 6B 10 41 00 00 01 02 03 FF\"])\n assert len(r) == 2\n\n # first message should be the valid, parsable hex message\n # NOTE: the parser happens to process the valid one's first\n check_message(r[0], 1, 0x10, [0x41, 0x00, 0x00, 0x01, 0x02, 0x03])\n\n # second message: invalid, non-parsable non-hex\n assert r[1].ecu == ECU.UNKNOWN\n assert len(r[1].frames) == 1\n assert len(r[1].data) == 0 # no data", "def test_rpcSendRecv(self):\n cli_send = self.client_msg\n srv_send = self.server_msg\n # Send message to driver\n flag = self.client_comm.send(cli_send)\n assert(flag)\n flag, msg_recv = self.server_comm.recv(timeout=self.timeout)\n assert(flag)\n nt.assert_equal(msg_recv, srv_send)\n # Send response back to instance\n flag = self.server_comm.send(srv_send)\n assert(flag)\n # self.driver.sleep(1)\n flag, msg_recv = self.client_comm.recv(timeout=self.timeout)\n assert(flag)\n nt.assert_equal(msg_recv, cli_send)", "def test_get_protocol_version(self):\n server, client = loopback()\n client_protocol_version = client.get_protocol_version()\n server_protocol_version = server.get_protocol_version()\n\n assert isinstance(server_protocol_version, int)\n assert isinstance(client_protocol_version, int)\n\n assert server_protocol_version == client_protocol_version", "def testBinaryProtocolAcceleratedEof(self):\n self.eofTestHelper(TBinaryProtocol.TBinaryProtocolAcceleratedFactory())\n self.eofTestHelperStress(TBinaryProtocol.TBinaryProtocolAcceleratedFactory())", "def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP", "def test_validation_correct_protocols():\n basic_protocol = yank_load(standard_protocol)\n\n # Alchemical paths\n protocols = [\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 0.0]},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 0.0],\n 'lambda_torsions': [1.0, 0.5, 0.0], 'lambda_angles': [1.0, 0.5, 0.0]},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 0.0],\n 'temperature': ['300*kelvin', '340*kelvin', '300*kelvin']},\n 'auto',\n ]\n for protocol in protocols:\n modified_protocol = copy.deepcopy(basic_protocol)\n modified_protocol['absolute-binding']['complex']['alchemical_path'] = protocol\n yield ExperimentBuilder._validate_protocols, modified_protocol\n\n # Try different options both with 'auto' and a path with alchemical functions.\n function_protocol = copy.deepcopy(basic_protocol)\n function_protocol['absolute-binding']['complex']['alchemical_path'] = {\n 'lambda_electrostatics': 'lambda**2',\n 'lambda_sterics': 'sqrt(lambda)',\n 'lambda': [1.0, 0.0]\n }\n auto_protocol = copy.deepcopy(basic_protocol)\n auto_protocol['absolute-binding']['complex']['alchemical_path'] = 'auto'\n\n trailblazer_options = [\n {'n_equilibration_iterations': 1000, 'n_samples_per_state': 100,\n 'thermodynamic_distance': 0.5, 'distance_tolerance': 0.05},\n {'n_equilibration_iterations': 100, 'n_samples_per_state': 10},\n {'thermodynamic_distance': 1.0, 'distance_tolerance': 0.5},\n {'function_variable_name': 'lambda'},\n {'function_variable_name': 'lambda', 'reversed_direction': False}\n ]\n for opts in trailblazer_options:\n # Use the function protocol if the function variable is specified.\n if 'function_variable_name' in opts:\n modified_protocol = copy.deepcopy(function_protocol)\n else:\n modified_protocol = copy.deepcopy(auto_protocol)\n modified_protocol['absolute-binding']['complex']['trailblazer_options'] = opts\n yield ExperimentBuilder._validate_protocols, modified_protocol\n\n # Multiple phases.\n alchemical_path = copy.deepcopy(basic_protocol['absolute-binding']['complex'])\n protocols = [\n {'complex': alchemical_path, 'solvent': alchemical_path},\n {'complex': alchemical_path, 'solvent': {'alchemical_path': 'auto'}},\n {'my-complex': alchemical_path, 'my-solvent': alchemical_path},\n {'solvent1': alchemical_path, 'solvent2': alchemical_path},\n {'solvent1variant': alchemical_path, 'solvent2variant': alchemical_path},\n collections.OrderedDict([('a', alchemical_path), ('z', alchemical_path)]),\n collections.OrderedDict([('z', alchemical_path), ('a', alchemical_path)])\n ]\n for protocol in protocols:\n modified_protocol = copy.deepcopy(basic_protocol)\n modified_protocol['absolute-binding'] = protocol\n yield ExperimentBuilder._validate_protocols, modified_protocol\n sorted_protocol = ExperimentBuilder._validate_protocols(modified_protocol)['absolute-binding']\n if isinstance(protocol, collections.OrderedDict):\n assert sorted_protocol.keys() == protocol.keys()\n else:\n assert isinstance(sorted_protocol, collections.OrderedDict)\n first_phase = next(iter(sorted_protocol.keys())) # py2/3 compatible\n assert 'complex' in first_phase or 'solvent1' in first_phase", "def test_outgoing_message_send(self):\n message = self.create_outgoing_message()\n message.send()\n self.assertEqual(self.outbound[0].text, message.text)", "def setup_protocol(self):\n self.protocol = pysubunit.TestProtocolServer(self.client)\n self.protocol.lineReceived(compat._b(\"test mcdonalds farm\\n\"))\n self.test = self.client._events[-1][-1]", "def setup_protocol(self):\n self.protocol = pysubunit.TestProtocolServer(self.client)\n self.protocol.lineReceived(compat._b(\"test mcdonalds farm\\n\"))\n self.test = self.client._events[-1][-1]", "def test_buildProtocolStoresFactory(self):\n xs = self.factory.buildProtocol(None)\n self.assertIdentical(self.factory, xs.factory)", "def test_find_node(self):\n\n protocol_a, transport_a, tree_a, _ = self.create_protocol('protocol_a')\n protocol_b, transport_b, tree_b, _ = self.create_protocol('protocol_b')\n\n self.assertTrue(len(protocol_a.messages) == 0)\n\n protocol_a.find_node('testkey')\n\n # Check that a message has been sent.\n self.assertTrue(transport_a.write.called)\n self.assertTrue(len(protocol_a.messages) == 1)\n\n # Get the message.\n output = transport_a.write.call_args[0][0]\n\n tree_b.find_nodes.return_value = [\n Node('first', '127.0.0.1', 1234),\n Node('second', '127.0.0.1', 5678),\n Node('third', '127.0.0.1', 9100),\n ]\n\n # Feed the message to the other protocol.\n protocol_b.data_received(output)\n\n self.assertTrue(tree_b.find_nodes.called)\n self.assertTrue(tree_b.find_nodes.call_args[0][0] == 'testkey')\n\n # Check that the response is written to the transport.\n self.assertTrue(transport_b.write.called)\n\n # Get the response.\n output = transport_b.write.call_args[0][0]\n\n # Feed the response to the original protocol.\n protocol_a.data_received(output)\n\n self.assertTrue(len(protocol_a.messages) == 0)\n\n self.assertTrue(tree_a.add_node.called)\n self.assertTrue(len(tree_a.add_node.call_args_list) == 3)", "def test_basic_message_passing(self):\n test_data = [\"hello world\", 42]\n parent, child = create_psuedo_anonymous_duct_pair()\n bind_address = parent.bind_address\n parent.send(test_data)\n assert_that(child.recv()).is_equal_to(test_data)\n child.close()\n parent.close()\n assert_that(os.path.exists(bind_address)).is_false()", "def test_sendimmessages(self):\n pass", "def test_virtual_service_create_command_for_human_readable_with_protocol_as_udp(\n virtual_service_create_success_udp, virtual_service_success_udp_hr\n):\n resp = prepare_virtual_service_output(virtual_service_create_success_udp)\n assert resp == virtual_service_success_udp_hr", "def test_send(self):\n self.inverter.send(b\"\\x00\\x01\\x02\", b\"\")\n received_message = self.sock.recv(4096)\n self.assertEqual(message, received_message)", "def test_comm_base(comms):\n commsend, commrecv = comms\n\n assert commsend.is_open()\n assert commrecv.is_open()\n\n received_messages = []\n\n def handler(msg_dict, buffer):\n received_messages.append((msg_dict, buffer))\n\n # Register callback\n commrecv._register_message_handler('test_message', handler)\n\n # Send a message\n commsend._send_message('test_message', content='content', data='data')\n assert len(received_messages) == 1\n assert received_messages[0][0]['spyder_msg_type'] == 'test_message'\n assert received_messages[0][0]['content'] == 'content'\n assert received_messages[0][1] == 'data'\n\n # Send another message\n commsend._send_message('test_message', content='content', data='data')\n assert len(received_messages) == 2\n\n # Unregister callback\n commrecv._register_message_handler('test_message', None)\n\n # Send another unhandled message\n commsend._send_message('test_message', content='content', data='data')\n assert len(received_messages) == 2\n\n # Test closing\n commsend.close()\n assert not commsend.is_open()\n assert not commrecv.is_open()", "def test_load_non_existant_protocol():\n Protocol.load(path(__file__).parent /\n path('protocols') /\n path('no protocol'))", "async def test_basic_messaging(self):\n async with Node() as n1:\n async with Node() as n2:\n await n2.join_network(n1.nid())\n\n n1bcast = Network_Message()\n n2bcast = Network_Message()\n n1.attach_broadcast_callback(n1bcast.msg_callback)\n n2.attach_broadcast_callback(n2bcast.msg_callback)\n\n n1dmsg = Network_Message()\n n2dmsg = Network_Message()\n n1.attach_direct_message_callback(n1dmsg.msg_callback)\n n2.attach_direct_message_callback(n2dmsg.msg_callback)\n\n await n1.send_broadcast('hello world')\n msg_succ = await n2bcast.wait_msg()\n self.assertTrue(msg_succ)\n self.assertEqual(n2bcast.nid, n1.nid())\n self.assertEqual(n2bcast.msg, 'hello world')\n\n # nodes shouldnt receive their own broadcast\n msg_succ = await n1bcast.wait_msg(1)\n self.assertFalse(msg_succ)\n\n await n2.send_message(n1.nid(), '42')\n msg_succ = await n1dmsg.wait_msg()\n self.assertTrue(msg_succ)\n self.assertEqual(n1dmsg.nid, n2.nid())\n self.assertEqual(n1dmsg.msg, '42')\n\n # nodes shouldnt receive their own direct meessage\n msg_succ = await n2dmsg.wait_msg(1)\n self.assertFalse(msg_succ)", "def test_handshake_pong(tchannel_pair):\n server, client = tchannel_pair\n\n client.ping()\n with pytest.raises(InvalidMessageException):\n server.await_handshake(headers={})", "async def test_chatroom_commands():\n\n # Login all the users.\n tokens = {}\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n tokens[name] = await attempt_login(username, password)\n\n # Alice will:\n # 1. Connect and retrieve MOTD.\n # 2. List rooms, and expect the four in the example.\n # 3. Join \"family\" room, and receive a success.\n # 4. List rooms, and expect the four ones, with \"family\" having \"joined\": true.\n # 3. Join \"family\" room, and receive an error.\n # 4. List rooms, and expect the four ones, with \"family\" having \"joined\": true.\n alice_communicator = make_communicator(tokens['alice'])\n alice_connected, _ = await alice_communicator.connect()\n motd = await alice_communicator.receive_json_from()\n assert motd['type'] == 'notification'\n assert motd['code'] == 'api-motd'\n await alice_communicator.send_json_to({'type': 'list'})\n list_ = await alice_communicator.receive_json_from()\n assert list_['type'] == 'notification'\n assert list_['code'] == 'list'\n assert list_['list'] == [{'name': 'family', 'joined': False}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]\n await alice_communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n joined = await alice_communicator.receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'alice'\n assert joined['you']\n assert joined['room_name'] == 'family'\n await alice_communicator.send_json_to({'type': 'list'})\n list_ = await alice_communicator.receive_json_from()\n assert list_['type'] == 'notification'\n assert list_['code'] == 'list'\n assert list_['list'] == [{'name': 'family', 'joined': True}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]\n await alice_communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n error = await alice_communicator.receive_json_from()\n assert error['type'] == 'error'\n assert error['code'] == 'room:already-joined'\n assert error['details']['name'] == 'family'\n await alice_communicator.send_json_to({'type': 'list'})\n list_ = await alice_communicator.receive_json_from()\n assert list_['type'] == 'notification'\n assert list_['code'] == 'list'\n assert list_['list'] == [{'name': 'family', 'joined': True}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]\n # Bob will:\n # 1. Connect and retrieve MOTD.\n # 2. Join \"family\" room, and receive a success.\n # 3. Send a message in the \"family\" room: \"Hello Alice\", and receive a success.\n # 4. Leave the room, and receive a success.\n # 5. Leave the room, and receive an error.\n # 6. Disconnect.\n # Alice will:\n # 1. Receive the \"Bob joined\" message.\n # 2. Receive the \"Hello Alice\" message.\n # 3. Receive the \"Bob left\" message.\n # ~~ Bob interactions ~~\n bob_communicator = make_communicator(tokens['bob'])\n bob_connected, _ = await bob_communicator.connect()\n motd = await bob_communicator.receive_json_from()\n assert motd['type'] == 'notification'\n assert motd['code'] == 'api-motd'\n await bob_communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n joined = await bob_communicator.receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert joined['you']\n assert joined['room_name'] == 'family'\n await bob_communicator.send_json_to({'type': 'message', 'room_name': 'family', 'body': 'Hello Alice'})\n message = await bob_communicator.receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert message['you']\n assert message['user'] == 'bob'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello Alice'\n await bob_communicator.send_json_to({'type': 'part', 'room_name': 'family'})\n parted = await bob_communicator.receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert parted['you']\n assert parted['room_name'] == 'family'\n await bob_communicator.send_json_to({'type': 'part', 'room_name': 'family'})\n error = await bob_communicator.receive_json_from()\n assert error['type'] == 'error'\n assert error['code'] == 'room:not-joined'\n assert error['details']['name'] == 'family'\n await bob_communicator.disconnect()\n # ~~ Alice interactions ~~\n joined = await alice_communicator.receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n message = await alice_communicator.receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'bob'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello Alice'\n parted = await alice_communicator.receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n await alice_communicator.disconnect()", "def run_protocol_checks(sub: Submission, logger):\n\n protocols = sub.protocol\n\n codes = []\n names = set()\n p_types = set()\n allowed_types = ontology_term(\"protocol_types\")\n mandatory = [label for label, attrib in allowed_types.items()\n if attrib[\"exp_type\"] == \"all\" and\n (attrib[\"mandatory\"] == \"ma\" or attrib[\"mandatory\"] == \"seq\")]\n exclusive = [label for label, attrib in allowed_types.items()\n if attrib[\"exp_type\"] == \"all\" and\n attrib[\"mandatory\"] == \"one of\"]\n found_exclusive = False\n\n if not protocols:\n logger.error(\"Experiment has no protocols. At least one expected.\")\n codes.append(\"PROT-E01\")\n return codes\n for p in protocols:\n if p.alias:\n # Protocol names should be unique.\n if p.alias in names:\n logger.error(\"Protocol name \\\"{}\\\" is not unique.\".format(p.alias))\n codes.append(\"PROT-E04\")\n names.add(p.alias)\n # Protocol must have a name\n else:\n logger.error(\"Protocol found with no name. Not checking it further.\")\n codes.append(\"PROT-E02\")\n continue\n if p.description:\n # Protocol description should be longer than 50 characters\n if len(p.description) < 50:\n logger.warning(\"Protocol \\\"{}\\\" is shorter than 50 characters.\".format(p.alias))\n codes.append(\"PROT-W01\")\n # Protocol must have description\n else:\n logger.error(\"Protocol \\\"{}\\\" has no description.\".format(p.alias))\n codes.append(\"PROT-E03\")\n if p.protocol_type:\n # Protocol type must be from controlled vocabulary (EFO)\n p_types.add(p.protocol_type.value)\n if p.protocol_type.value not in allowed_types:\n logger.error(\"Protocol \\\"{}\\\" has a type that is not from controlled vocabulary/EFO: \"\n \"\\\"{}\\\"\".format(p.alias, p.protocol_type.value))\n codes.append(\"PROT-E05\")\n if p.protocol_type.value in exclusive:\n found_exclusive = True\n else:\n # Protocol must have a protocol type\n logger.warn(\"Protocol \\\"{}\\\" has no protocol type.\".format(p.alias))\n codes.append(\"PROT-E07\")\n\n # Mandatory protocol types (for all experiment types) must be present\n for p_type in mandatory:\n if p_type not in p_types:\n logger.error(\"A {} must be included.\".format(p_type))\n codes.append(\"PROT-E06\")\n\n # Every experiment must have at least one growth/treatment/sample collection protocol\n if not found_exclusive:\n logger.error(\"A growth, treatment or sample collection protocol must be included.\")\n codes.append(\"PROT-E07\")\n\n return codes", "async def rfxtrx_dsmr_connection_send_validate_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=RFXtrxDSMRProtocol)\n\n protocol.telegram = {\n EQUIPMENT_IDENTIFIER: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject([{\"value\": \"123456789\", \"unit\": \"\"}]),\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n\n async def connection_factory(*args, **kwargs):\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n async def wait_closed():\n if isinstance(connection_factory.call_args_list[0][0][2], str):\n # TCP\n telegram_callback = connection_factory.call_args_list[0][0][3]\n else:\n # Serial\n telegram_callback = connection_factory.call_args_list[0][0][2]\n\n telegram_callback(protocol.telegram)\n\n protocol.wait_closed = wait_closed\n\n with patch(\n \"homeassistant.components.dsmr.config_flow.create_rfxtrx_dsmr_reader\",\n connection_factory,\n ), patch(\n \"homeassistant.components.dsmr.config_flow.create_rfxtrx_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "async def test_create_engine_with_protocol(\n decoy: Decoy,\n subject: EngineStore,\n json_protocol_source: ProtocolSource,\n) -> None:\n protocol = ProtocolResource(\n protocol_id=\"my cool protocol\",\n protocol_key=None,\n created_at=datetime(year=2021, month=1, day=1),\n source=json_protocol_source,\n )\n\n result = await subject.create(\n run_id=\"run-id\",\n labware_offsets=[],\n protocol=protocol,\n )\n assert subject.current_run_id == \"run-id\"\n assert isinstance(result, StateSummary)\n assert isinstance(subject.runner, JsonRunner)\n assert isinstance(subject.engine, ProtocolEngine)", "def test_envelope_echoed_back_node_agent(self):\n addr_1 = self.connection_client_1.address\n addr_n = self.connection_node_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n original_envelope = Envelope(\n to=addr_n,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(original_envelope)\n delivered_envelope = self.multiplexer_node_2.get(block=True, timeout=10)\n assert delivered_envelope is not None\n\n delivered_envelope.to = addr_1\n delivered_envelope.sender = addr_n\n\n self.multiplexer_node_2.put(delivered_envelope)\n echoed_envelope = self.multiplexer_client_1.get(block=True, timeout=5)\n\n assert echoed_envelope is not None\n assert echoed_envelope.to == original_envelope.sender\n assert delivered_envelope.sender == original_envelope.to\n assert (\n delivered_envelope.protocol_specification_id\n == original_envelope.protocol_specification_id\n )\n assert delivered_envelope.message == original_envelope.message", "def test_im_chat_messages(self):\n pass", "async def test_websocket_application():\n application = URLRouter([path(\"testws/<str:message>/\", KwargsWebSocketApp())])\n communicator = WebsocketCommunicator(application, \"/testws/test/\")\n connected, subprotocol = await communicator.connect()\n # Test connection\n assert connected\n assert subprotocol is None\n message = await communicator.receive_from()\n assert message == \"test\"\n await communicator.disconnect()", "def test_emirp_check():\r\n pass", "def TestSendRecvMessage(self):\n byte_array_message = bytes(\"\\x01\\x01\\x01\\x01\\x01\\x01\", encoding=DATA_ENCODING)\n txmsg = TxMessage(byte_array_message, num_response_msg=1, expect_eom=True)\n # Send a response back from the server (in advance to avoid potential race condition or timeout\n byte_array_response = bytes('\\xFF\\xFF\\xAB\\xBA\\xBA\\xC1', encoding=DATA_ENCODING)\n self.connection.send(byte_array_response)\n rxmsg = self.txrx.send_recv_message(txmsg)\n # Check the received message is EOM\n self.assertEqual(rxmsg, [(0xFFFF, 0xABBABAC1)])\n # Receive the bytes from our test socket\n msg = self.connection.recv(6)\n # Verify the bytes are the same as those sent\n self.assertEqual(msg, byte_array_message)", "def isProtocolDefined(self) -> bool:\n ...", "def test_connectionLostProtocolDeletion(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.pm.protocols[\"foo\"].transport.signalProcess(\"KILL\")\r\n self.reactor.advance(\r\n self.pm.protocols[\"foo\"].transport._terminationDelay)\r\n self.assertNotIn(\"foo\", self.pm.protocols)", "async def test_chatroom_broadcast():\n\n # Login all the users.\n tokens = {}\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n tokens[name] = await attempt_login(username, password)\n\n # Alice, Bob, Carl connect to the server.\n communicators = {}\n for name in ['alice', 'bob', 'carl']:\n communicator = make_communicator(tokens[name])\n communicators[name] = communicator\n connected, _ = await communicator.connect()\n assert connected\n motd = await communicator.receive_json_from()\n assert motd['type'] == 'notification'\n assert motd['code'] == 'api-motd'\n await communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n await asyncio.sleep(0.5)\n # Alice expects 3 joins.\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'alice'\n assert joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n # Bob expects 2 joins.\n joined = await communicators['bob'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['bob'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n # Carl expects 1 join.\n joined = await communicators['carl'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert joined['you']\n assert joined['room_name'] == 'family'\n # Now Alice sends a \"Hello guys\" message, and bob and carl\n # will read it.\n await communicators['alice'].send_json_to({'type': 'message', 'room_name': 'family', 'body': 'Hello guys'})\n message = await communicators['alice'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n message = await communicators['bob'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n message = await communicators['carl'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n # Now they all leave the channel.\n for name in ['alice', 'bob', 'carl']:\n await communicators[name].send_json_to({'type': 'part', 'room_name': 'family'})\n await asyncio.sleep(0.5)\n # And they will receive all the part messages.\n parted = await communicators['alice'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['bob'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['bob'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'carl'\n assert parted['you']\n assert parted['room_name'] == 'family'\n # And the 3 will disconnect.\n for name in ['alice', 'bob', 'carl']:\n await communicator.disconnect()", "def test_validation_wrong_protocols():\n basic_protocol = yank_load(standard_protocol)\n\n # Alchemical paths\n protocols = [\n {'lambda_electrostatics': [1.0, 0.5, 0.0]},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 'wrong!']},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 11000.0]},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, -0.5]},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': 0.0},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 0.0], 3: 2}\n ]\n for protocol in protocols:\n modified_protocol = copy.deepcopy(basic_protocol)\n modified_protocol['absolute-binding']['complex']['alchemical_path'] = protocol\n yield assert_raises, YamlParseError, ExperimentBuilder._validate_protocols, modified_protocol\n\n # Try different options both with 'auto' and a path with alchemical functions.\n auto_path = 'auto'\n no_lambda_path = {'lambda_electrostatics': 'lambda**2', 'lambda_sterics': 'sqrt(lambda)'}\n hardcoded_path = {'lambda_electrostatics': [1.0, 0.0], 'lambda_sterics': [1.0, 0.0]}\n correct_lambda_path = {'lambda': [1.0, 0.0], **no_lambda_path}\n str_lambda_path = {'lambda': 'string', **no_lambda_path}\n three_lambda_path = {'lambda': [1.0, 0.5, 0.0], **no_lambda_path}\n\n # Each test case is (error_regex, options, alchemical_path)\n trailblazer_options = [\n (\"n_equilibration_iterations:\\n - must be of integer type\",\n {'n_equilibration_iterations': 'bla'}, auto_path),\n (\"Only mathematical expressions have been given with no values for their variables\",\n {}, no_lambda_path),\n (\"Mathematical expressions were detected but no function variable name was given\",\n {}, correct_lambda_path),\n (\"Function variable name 'lambda' is not defined in 'alchemical_path'\",\n {'function_variable_name': 'lambda'}, hardcoded_path),\n (\"Only mathematical expressions have been given with no values for their variables\",\n {'function_variable_name': 'lambda'}, str_lambda_path),\n (\"Only the two end-point values of function variable 'lambda' should be given.\",\n {'function_variable_name': 'lambda'}, three_lambda_path),\n ]\n for regex, opts, alchemical_path in trailblazer_options:\n modified_protocol = copy.deepcopy(basic_protocol)\n modified_protocol['absolute-binding']['complex']['alchemical_path'] = alchemical_path\n modified_protocol['absolute-binding']['complex']['trailblazer_options'] = opts\n yield assert_raises_regexp, YamlParseError, regex, ExperimentBuilder._validate_protocols, modified_protocol\n\n # Phases\n alchemical_path = copy.deepcopy(basic_protocol['absolute-binding']['complex'])\n protocols = [\n {'complex': alchemical_path},\n {2: alchemical_path, 'solvent': alchemical_path},\n {'complex': alchemical_path, 'solvent': alchemical_path, 'thirdphase': alchemical_path},\n {'my-complex-solvent': alchemical_path, 'my-solvent': alchemical_path},\n {'my-complex': alchemical_path, 'my-complex-solvent': alchemical_path},\n {'my-complex': alchemical_path, 'my-complex': alchemical_path},\n {'complex': alchemical_path, 'solvent1': alchemical_path, 'solvent2': alchemical_path},\n {'my-phase1': alchemical_path, 'my-phase2': alchemical_path},\n collections.OrderedDict([('my-phase1', alchemical_path), ('my-phase2', alchemical_path),\n ('my-phase3', alchemical_path)])\n ]\n for protocol in protocols:\n modified_protocol = copy.deepcopy(basic_protocol)\n modified_protocol['absolute-binding'] = protocol\n yield assert_raises, YamlParseError, ExperimentBuilder._validate_protocols, modified_protocol", "def test_pack_unpack_with_routing_keys(alice, bob):\n route1 = Connection.from_parts(crypto.create_keypair())\n route2 = Connection.from_parts(crypto.create_keypair())\n alice.target.update(routing_keys=[route1.verkey, route2.verkey])\n packed_message = alice.pack({\"@type\": \"doc;protocol/1.0/name\"})\n\n route2_msg = route2.unpack(packed_message)\n assert route2_msg.type == utils.FORWARD\n assert route2_msg[\"to\"] == route1.verkey_b58\n assert route2_msg.mtc.is_anoncrypted()\n assert route2_msg.mtc.sender is None\n\n route1_msg = route1.unpack(route2_msg[\"msg\"])\n assert route1_msg.type == utils.FORWARD\n assert route1_msg[\"to\"] == bob.verkey_b58\n assert route1_msg.mtc.is_anoncrypted()\n assert route1_msg.mtc.sender is None\n\n bob_msg = bob.unpack(route1_msg[\"msg\"])\n assert bob_msg.type == \"doc;protocol/1.0/name\"\n assert bob_msg.mtc.is_authcrypted()\n assert bob_msg.mtc.sender == alice.verkey_b58\n assert bob_msg.mtc.recipient == bob.verkey_b58", "def testEngine(self):\n e = stomper.Engine(testing=True)\n\n # test session connected message:\n msg = \"\"\"CONNECTED\nsession:ID:snorky.local-49191-1185461799654-3:18\n\n\\x00\n\"\"\"\n result = stomper.unpack_frame(msg)\n correct = ''\n returned = e.react(result)\n self.assertEqual(returned, correct)\n\n # test message:\n msg = \"\"\"MESSAGE\ndestination: /queue/a\nmessage-id: some-message-id\n\nhello queue a\n\n\\x00\n\"\"\"\n returned = e.react(msg)\n correct = 'ACK\\nmessage-id: some-message-id\\n\\n\\x00\\n'\n self.assertEqual(returned, correct)\n\n # test error:\n msg = \"\"\"ERROR\nmessage:some error\n\nThere was a problem with your last message\n\n\\x00\n\"\"\"\n returned = e.react(msg)\n correct = 'error'\n self.assertEqual(returned, correct)\n\n # test receipt:\n msg = \"\"\"RECEIPT\nmessage-id: some-message-id\n\n\\x00\n\"\"\"\n returned = e.react(msg)\n correct = 'receipt'\n self.assertEqual(returned, correct)", "def test_messages(self):\n pass", "def test_client_copy_from_both_proto_plus(self):\n destination = ProtoPlusFixture()\n origin = ProtoPlusFixture()\n origin.name = \"Test\"\n\n util.proto_copy_from(destination, origin)\n\n self.assertEqual(destination.name, \"Test\")\n self.assertIsNot(destination, origin)", "def test_message_user():", "def test_sending_sms(self):\n try:\n from django.conf import settings\n except ImportError:\n self.fail(msg=\"No TEST_NUMBER found in settings!\")\n\n from rapidsms.router import send\n from rapidsms.models import Connection, Backend\n from random import randint\n\n b = Backend.objects.get_or_create(name='envaya_nexmo')[0]\n c = Connection.objects.get_or_create(identity = settings.TEST_NUMBER, backend = b)[0]\n msg = \"Hey, this is a test message from NexmoOutgoingBackendTest! \\n Your Lucky number is %s\" % (randint(1,42))\n\n send(msg,[c])\n print \"Cannot actually verify whether the message was sent or not because of the limitations of rapdisms framework :-/\"", "def test_handshake_succeeds(self):\n session_mock = Mock()\n t = FakeTransport()\n f = WampRawSocketClientFactory(lambda: session_mock)\n p = WampRawSocketClientProtocol()\n p.transport = t\n p.factory = f\n\n server_session_mock = Mock()\n st = FakeTransport()\n sf = WampRawSocketServerFactory(lambda: server_session_mock)\n sp = WampRawSocketServerProtocol()\n sp.transport = st\n sp.factory = sf\n\n sp.connectionMade()\n p.connectionMade()\n\n # Send the server the client handshake\n sp.dataReceived(t._written[0:1])\n sp.dataReceived(t._written[1:4])\n\n # Send the client the server handshake\n p.dataReceived(st._written)\n\n # The handshake succeeds, a session on each end is created\n # onOpen is called on the session\n session_mock.onOpen.assert_called_once_with(p)\n server_session_mock.onOpen.assert_called_once_with(sp)", "def testFailure(self):\n request = b'hello'\n reply = self.sendAndReceive(request)\n self.assertEqual(2, reply[0])", "def test_mock_transport_without_random_failures(self): # pylint: disable=too-many-locals\n msgcount = 10000\n expected_msgcount = 0\n failurerate = 0\n maxdelay = 1\n\n def genmsg(index):\n nonlocal expected_msgcount\n msg = 'msg-' + str(index)\n if self.cointoss():\n expected_msgcount += 1\n return msg\n # force drop\n return msg + MockTransport.ALWAYSDROP_TEXT\n\n messages = {genmsg(i): conc.AtomicNumber(0) for i in range(msgcount)}\n recvcount = conc.AtomicNumber(0)\n errcount = conc.AtomicNumber(0) # count of unexpected messages recvd\n\n def receivemsg(msg, node):\n # print(\"Received message [{}] from {}\".format(msg, node))\n if str(node) != self.LOCAL_ADDRESS:\n errcount.next()\n recvcount.next()\n counter = messages.get(msg)\n if counter:\n counter.next()\n else:\n errcount.next() # unexpected message\n\n transport = MockTransport(self.LOCAL_ADDRESS)\n transport.set_transport_characteristics(failurerate, maxdelay)\n transport.start_listening(receivemsg)\n node = transport.new_remotenode(self.LOCAL_ADDRESS)\n for msg in messages:\n transport.send(msg, node) # We're sending the keys\n # print(\"Waiting... for {} messages\".format(expected_msgcount))\n while recvcount.value() < expected_msgcount and transport.healthy():\n time.sleep(0.1)\n # print(\"Received {} messages\".format(msgcount))\n\n # Need to close before any test failure assertions, otherwise\n # scheduler thread will never exit\n transport.close()\n\n self.assertEqual(errcount.value(), 0)\n self.assertEqual(recvcount.value(), expected_msgcount)\n\n # Check that expected messages are received exactly once, and\n # unexpected messages are not received.\n for msg, counter in messages.items():\n expected = 1\n if MockTransport.ALWAYSDROP_TEXT in msg:\n expected = 0\n self.assertEqual(counter.value(), expected, \"msg count for:\" + msg)\n\n print(\"transport_simple: received {}/{} messages\".format(recvcount, msgcount))", "def test_send_1_1(self):\n netconf = self.generate_all_mocks()\n netconf.current_level = netconf_connection.NETCONF_1_1_CAPABILITY\n netconf.chan.recv = mock.MagicMock(\n return_value=(\n \"\\n#5\\n<rpc>\\n#6\\n</rpc>\\n##\\n\"\n )\n )\n self.assertEqual(\n \"<rpc></rpc>\",\n netconf.send(\"ping\")\n )\n netconf.chan.send.assert_called_with(\n \"\\n#4\\nping\\n##\\n\"\n )\n # check with preloaded\n netconf.buff = \"\\n#25\\n12345678901234567890\"\n netconf.chan.recv = mock.MagicMock(\n return_value=(\n \"12345\\n#5\\n<rpc>\\n#6\\n</rpc>\\n##\\n\"\n )\n )\n self.assertEqual(\n \"1234567890123456789012345<rpc></rpc>\",\n netconf.send(\"ping\")\n )\n netconf.chan.send.assert_called_with(\n \"\\n#4\\nping\\n##\\n\"\n )\n self.assertEqual(\n \"\", netconf.buff\n )\n # check with preloaded\n netconf.buff = \"\\n#5\"\n netconf.chan.recv = mock.MagicMock(\n return_value=(\n \"\\n12345\\n#5\\n<rpc>\\n#6\\n</rpc>\\n##\\n\"\n )\n )\n self.assertEqual(\n \"12345<rpc></rpc>\",\n netconf.send(\"ping\")\n )\n netconf.chan.send.assert_called_with(\n \"\\n#4\\nping\\n##\\n\"\n )\n self.assertEqual(\n \"\", netconf.buff\n )\n # broken package\n netconf.chan.recv = mock.MagicMock(\n return_value=(\n \"\\n1\"\n )\n )\n with self.assertRaises(cfy_exc.NonRecoverableError):\n netconf.send(\"ping\")", "def test_client_copy_from_different_types_protobuf(self):\n destination = ProtoPlusFixture()\n destination = type(destination).pb(destination)\n origin = ProtobufFixture()\n origin.name = \"Test\"\n\n self.assertRaises(TypeError, util.proto_copy_from, destination, origin)", "def test_message_group():", "def test_scenario_2(scenario):\n nt_server, nt_client, nt_client2, st, ct1, ct2 = scenario\n\n nt_client.disconnect()\n nt_client2.disconnect()\n\n ct1.putString(\"Client1Only\", \"11\")\n ct1.putString(\"SC1Shared\", \"11\")\n ct1.putString(\"ClientShared\", \"11\")\n\n ct2.putString(\"Client2Only\", \"12\")\n ct2.putString(\"SC2Shared\", \"12\")\n ct2.putString(\"ClientShared\", \"12\")\n\n st.putString(\"ServerOnly\", \"10\")\n st.putString(\"SC1Shared\", \"10\")\n st.putString(\"SC2Shared\", \"10\")\n\n with nt_server.expect_changes(3):\n with nt_client.expect_changes(3):\n nt_client.start_test()\n\n with nt_server.expect_changes(3):\n with nt_client.expect_changes(3):\n with nt_client2.expect_changes(3):\n nt_client2.start_test()\n\n check_results(\n st,\n ct1,\n ct2,\n ServerOnly=10,\n Client1Only=11,\n Client2Only=12,\n SC1Shared=11,\n SC2Shared=12,\n ClientShared=12,\n )", "def test_send_1_1(self):\n netconf = self.generate_all_mocks()\n netconf.current_level = netconf_connection.NETCONF_1_1_CAPABILITY\n netconf.conn.recv = mock.MagicMock(\n return_value=(\n \"\\n#5\\n<rpc>\\n#6\\n</rpc>\\n##\\n\"\n )\n )\n self.assertEqual(\n \"<rpc></rpc>\",\n netconf.send(\"ping\")\n )\n netconf.conn.send.assert_called_with(\n \"\\n#4\\nping\\n##\\n\"\n )\n # check with preloaded\n netconf.buff = \"\\n#25\\n12345678901234567890\"\n netconf.conn.recv = mock.MagicMock(\n return_value=(\n \"12345\\n#5\\n<rpc>\\n#6\\n</rpc>\\n##\\n\"\n )\n )\n self.assertEqual(\n \"1234567890123456789012345<rpc></rpc>\",\n netconf.send(\"ping\")\n )\n netconf.conn.send.assert_called_with(\n \"\\n#4\\nping\\n##\\n\"\n )\n self.assertEqual(\n \"\", netconf.buff\n )\n # check with preloaded\n netconf.buff = \"\\n#5\"\n netconf.conn.recv = mock.MagicMock(\n return_value=(\n \"\\n12345\\n#5\\n<rpc>\\n#6\\n</rpc>\\n##\\n\"\n )\n )\n self.assertEqual(\n \"12345<rpc></rpc>\",\n netconf.send(\"ping\")\n )\n netconf.conn.send.assert_called_with(\n \"\\n#4\\nping\\n##\\n\"\n )\n self.assertEqual(\n \"\", netconf.buff\n )\n # broken package\n netconf.conn.recv = mock.MagicMock(\n return_value=(\n \"\\n1\"\n )\n )\n with self.assertRaises(exceptions.NonRecoverableError):\n netconf.send(\"ping\")\n # empty package with closed connection\n netconf.buff = \"\"\n netconf.conn.recv = mock.MagicMock(return_value=(\"\"))\n self.assertEqual(netconf.send(\"ping\"), \"\")", "def test_received_messages():\n app = HelperApp(server.message_app)\n # Get a list of people to send messages to (everyone who's not Jessie)\n with open(\"passwords.json\") as f:\n passwords = json.load(f)\n people = set(passwords.keys())\n\n # Add a bunch of messages (26)\n for l in string.ascii_lowercase:\n sender = random.choice(list(people))\n receiver = random.choice(list(people - set(sender)))\n app.post('/login/', {'username': sender,\n 'password': passwords[sender]})\n\n app.get('/compose/')\n app.post('/compose/', {'to': receiver,\n 'subject': l, 'body': l.upper()})\n\n received = {}\n for person in people:\n messages = message.load_received_messages(person)\n received[person] = messages\n\n # It's extremely improbable that all of the messages would go to\n # the same person.\n assert len(messages) < 26\n\n for m in messages:\n assert m['to'] == person\n\n # We should have seen 26 total messages\n assert sum(len(l) for l in received.values()) == 26\n\n # We should have 26 unique messages\n assert len(set(x['id'] for l in received.values() for x in l)) == 26", "def test_channelCorrection(self):\n self.client.invite(\"foo\", \"bar\")\n self.assertEqual(self.client.lines, [\"INVITE foo #bar\"])", "def test_subscriber_access_for_two_vsg_services(self):", "def test_two_way_comms(relay_board_name, timeout):\n before = time.time() + timeout\n with connected_socket(relay_board_name, before) as sock:\n timed_write(sock, START_COMMAND + TEST_2_WAY_COMMS, before)\n res = timed_read(sock, before)\n return res2status(res)", "def test_handshakeV12(self):\n \n user=padString('hello')\n host=padString('world')\n\n self.sfact.program= \\\n [('send',CAmessage(dtype=0, count=12)),\n ('recv',CAmessage(dtype=0, count=CA_VERSION)),\n ('recv',CAmessage(cmd=20, size=len(user), body=user)),\n ('recv',CAmessage(cmd=21, size=len(host), body=host)),\n ]\n # since client gets notification before program\n # completes have server do shutdown\n self.sfact.halt=True\n\n d=self.cfact.requestCircuit(self.target)\n\n @d.addCallback\n def postCondition(circ):\n self.assertTrue(circ is not None)\n # we get notification when the first packet is processed\n # the next three may have been received\n self.assertTrue(len(self.sfact.program)<=3)\n self.assertEqual(circ.version,12)\n \n return circ.transport.connector.whenDis\n\n @d.addCallback\n def done(circ):\n self.assertEqual(self.sfact.program,[])\n\n return d", "def test_send(self):\n msg_flag = self.instance.send(self.msg_short)\n assert(msg_flag)\n msg_flag, msg_recv = self.driver.recv(self.timeout)\n assert(msg_flag)\n nt.assert_equal(msg_recv, self.msg_short)", "def test_envelope_received(self):\n sender = self.connection_client_2.address\n receiver = self.connection_client_1.address\n envelope = self._make_envelope(sender, receiver)\n\n # make the receive to fail\n with mock.patch.object(\n self.connection_client_1.logger, \"error\"\n ) as _mock_logger, mock.patch.object(\n self.connection_client_1._node_client,\n \"_read\",\n side_effect=ConnectionError(),\n ):\n # this envelope will be lost.\n self.multiplexer_client_2.put(envelope)\n # give time to reconnect\n time.sleep(2.0)\n _mock_logger.assert_has_calls(\n [\n call(\n RegexComparator(\n \"Connection error:.*Try to reconnect and read again\"\n )\n )\n ]\n )\n # proceed as usual. Now we expect the connection to have reconnected successfully\n delivered_envelope = self.multiplexer_client_1.get(block=True, timeout=20)\n\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message", "def test_away(self):\n message = \"Sorry, I'm not here.\"\n self.protocol.away(message)\n expected = [\n \"AWAY :{}\".format(message),\n \"\",\n ]\n self.assertEqualBufferValue(self.transport.value().split(b\"\\r\\n\"), expected)", "def test_message_rx(self):\n\n self.maxDiff = 1000\n session_id = self._open_session()\n\n # Send a data message in our new session\n sent_data_message = {\n \"fqdn\": self.CLIENT_NAME,\n \"type\": \"DATA\",\n \"plugin\": \"test_messaging\",\n \"session_id\": session_id,\n \"session_seq\": 0,\n \"body\": None,\n }\n response = self._post([sent_data_message])\n self.assertResponseOk(response)\n\n forwarded_data_message = self._receive_one_amqp()\n\n self.assertDictEqual(sent_data_message, forwarded_data_message)", "def test_restricted_to_protocols_updated(self):\n assert self.connection_config.restricted_to_protocols == {self.new_protocol_id}", "def test_client_copy_from_different_types_proto_plus(self):\n destination = ProtobufFixture()\n destination = proto.Message.wrap(destination)\n origin = ProtoPlusFixture()\n origin.name = \"Test\"\n\n self.assertRaises(TypeError, util.proto_copy_from, destination, origin)", "def test_save_send(self):\r\n # Don't really know how to test this effectively...\r\n # Would require to simulate a blocking socket on the recipient side...\r\n pass", "def test_portforward(self):\n realServerFactory = protocol.ServerFactory()\n realServerFactory.protocol = lambda: self.serverProtocol\n realServerPort = reactor.listenTCP(0, realServerFactory, interface=\"127.0.0.1\")\n self.openPorts.append(realServerPort)\n self.proxyServerFactory = TestableProxyFactory(\n \"127.0.0.1\", realServerPort.getHost().port\n )\n proxyServerPort = reactor.listenTCP(\n 0, self.proxyServerFactory, interface=\"127.0.0.1\"\n )\n self.openPorts.append(proxyServerPort)\n\n nBytes = 1000\n received = []\n d = defer.Deferred()\n\n def testDataReceived(data):\n received.extend(iterbytes(data))\n if len(received) >= nBytes:\n self.assertEqual(b\"\".join(received), b\"x\" * nBytes)\n d.callback(None)\n\n self.clientProtocol.dataReceived = testDataReceived\n\n def testConnectionMade():\n self.clientProtocol.transport.write(b\"x\" * nBytes)\n\n self.clientProtocol.connectionMade = testConnectionMade\n\n clientFactory = protocol.ClientFactory()\n clientFactory.protocol = lambda: self.clientProtocol\n\n reactor.connectTCP(\"127.0.0.1\", proxyServerPort.getHost().port, clientFactory)\n\n return d", "def test_decode_failure(self):\n\n def handle(event):\n return 0x0000, event.attribute_list\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n\n handlers = [(evt.EVT_N_CREATE, handle)]\n scp = ae.start_server((\"localhost\", 11112), evt_handlers=handlers, block=False)\n\n ae.add_requested_context(ModalityPerformedProcedureStep)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n class DummyReply:\n def getvalue(self):\n def test():\n pass\n\n return test\n\n class DummyMessage:\n is_valid_response = True\n is_valid_request = False\n AttributeList = DummyReply()\n Status = 0x0000\n STATUS_OPTIONAL_KEYWORDS = []\n\n class DummyDIMSE:\n msg_queue = queue.Queue()\n gotten = False\n\n def send_msg(*args, **kwargs):\n return\n\n def get_msg(self, *args, **kwargs):\n if not self.gotten:\n self.gotten = True\n return 1, DummyMessage()\n return None, None\n\n assoc._reactor_checkpoint.clear()\n while not assoc._is_paused:\n time.sleep(0.01)\n assoc.dimse = DummyDIMSE()\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_create(\n ds, ModalityPerformedProcedureStep, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0110\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def test_generate_and_send(mock_sr):\n # mock send packets\n mock_sr.return_value = fake_sr_return()\n\n # init generator\n netprobify = NetProbify()\n netprobify.instantiate_generator()\n\n # generate packets\n TARGET.generate_packets(GROUP, netprobify.id_gen)\n assert len(TARGET.packets) == 10\n assert TARGET.packets[0].dst == \"127.0.0.1\"\n assert TARGET.packets[0].sport == 65000\n\n # check number of packets\n assert len(TARGET.packets) == 10\n\n # check if the sport are rotated in the range\n n = 0\n for pkt in TARGET.packets:\n port = n % 2 + 65000\n n += 1\n assert pkt[UDP].sport == port\n assert pkt.id == n\n\n # subnet test\n UDPunreachable(\n \"localhost\",\n active=True,\n description=\"localhost\",\n destination=\"127.0.0.0/30\",\n config_destination=\"127.0.0.0/30\",\n address_family=\"ipv4\",\n dont_fragment=True,\n is_subnet=True,\n nb_packets=1,\n interval=0,\n timeout=1,\n dst_port=0,\n ip_payload_size=0,\n threshold=1,\n state=\"in production\",\n alert_level=\"paging\",\n is_dynamic=False,\n dns_update_interval=0,\n groups={\"test\"},\n lifetime={\"days\": \"1\"},\n creation_date=None,\n )\n\n TARGET.generate_packets(GROUP, netprobify.id_gen)\n ip_addresses = [\"127.0.0.0\", \"127.0.0.1\", \"127.0.0.2\", \"127.0.0.3\"]\n for pkt in TARGET.packets:\n n += 1\n assert pkt.dst in ip_addresses\n assert pkt.id == n\n\n # fake packets sending\n result = []\n TARGET.send_packets(result, \"WARNING\", GROUP)\n\n assert result == [\n {\n 65000: {\"sent\": 1, \"loss\": 1, \"timestamp_ooo\": 0, \"latency\": []},\n 65001: {\"sent\": 1, \"loss\": 0, \"timestamp_ooo\": 0, \"latency\": [0.1]},\n \"name\": \"localhost\",\n \"probing_type\": \"UDPunreachable\",\n \"groups\": {\"test\"},\n \"destination\": \"127.0.0.1\",\n \"address_family\": \"ipv4\",\n \"state\": \"in production\",\n \"alert_level\": \"paging\",\n \"ip_payload_size\": 8,\n \"port_mismatch\": 0,\n }\n ]", "def test_client_copy_from_non_proto_message(self):\n destination = ProtoPlusFixture()\n origin = {\"name\": \"Test\"}\n\n self.assertRaises(ValueError, util.proto_copy_from, destination, origin)", "def test_is_simulating(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n) -> None:\n decoy.when(mock_engine_client.state.config.ignore_pause).then_return(True)\n assert subject.is_simulating()", "def test_basic():\n\n test_extra = 'ugh'\n\n req1 = SentmanRequest(SentmanRequest.ALLOCATE_SENTINEL)\n\n # make sure that we can pack and then recv.\n req1_buf = req1.pack()\n (msgs, buf) = SentmanRequest.recv(req1_buf)\n assert(len(msgs) == 1)\n assert(str(msgs[0]) == str(req1))\n assert(buf == '')\n\n # make sure that we can pack and then recv with a partial\n req1_buf = req1.pack() + test_extra\n (msgs, buf) = SentmanRequest.recv(req1_buf)\n assert(len(msgs) == 1)\n assert(str(msgs[0]) == str(req1))\n assert(buf == test_extra)\n\n # make sure that we can pack and then recv multiple\n req1_buf = req1.pack() * 2\n (msgs, buf) = SentmanRequest.recv(req1_buf)\n assert(len(msgs) == 2)\n assert(str(msgs[0]) == str(req1))\n assert(str(msgs[1]) == str(req1))\n assert(buf == '')\n\n # make sure that we can pack and then recv multiple plus a partial\n req1_buf = (req1.pack() * 2) + test_extra\n (msgs, buf) = SentmanRequest.recv(req1_buf)\n assert(len(msgs) == 2)\n assert(str(msgs[0]) == str(req1))\n assert(str(msgs[1]) == str(req1))\n assert(buf == test_extra)" ]
[ "0.6693443", "0.65527207", "0.64990115", "0.64490074", "0.64464223", "0.6435972", "0.641256", "0.63902915", "0.6338546", "0.6269686", "0.6266628", "0.6260895", "0.6249174", "0.61818177", "0.61632335", "0.61631536", "0.61530423", "0.61333424", "0.6118203", "0.60873365", "0.6057119", "0.60551274", "0.60195684", "0.6013828", "0.60061854", "0.60061854", "0.6005192", "0.5999445", "0.5917562", "0.5914799", "0.5905133", "0.5891651", "0.5882147", "0.58803874", "0.5871866", "0.5871787", "0.58716893", "0.5869242", "0.5861064", "0.58417034", "0.58337563", "0.58262825", "0.5819553", "0.58164", "0.58164", "0.5806963", "0.58047813", "0.58038956", "0.57947934", "0.5788796", "0.5768111", "0.5762061", "0.5758767", "0.57404995", "0.5729344", "0.57210094", "0.57124513", "0.57046825", "0.5702892", "0.56976527", "0.569412", "0.56936675", "0.5689173", "0.56871617", "0.5682271", "0.56808126", "0.56793684", "0.5675246", "0.56721455", "0.5671581", "0.5666115", "0.56552577", "0.56464565", "0.5613103", "0.56108207", "0.55873847", "0.5585169", "0.5582636", "0.5576207", "0.5567409", "0.5559117", "0.5556581", "0.55508286", "0.55462515", "0.5544196", "0.554052", "0.5539544", "0.5536561", "0.5532551", "0.552831", "0.5527236", "0.5526463", "0.5515335", "0.54819053", "0.5479643", "0.5479475", "0.5469505", "0.5466374", "0.5466331", "0.5466022" ]
0.6860271
0
Tear the test down.
def teardown_class(cls): os.chdir(cls.cwd) try: shutil.rmtree(cls.t) except (OSError, IOError): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def teardown_test(self):\n self.log.info('Tearing down the test case')\n self.iperf_server.stop()\n self.access_point.bridge.teardown(self.brconfigs)\n self.access_point.close()\n wputils.reset_host_interface(self.pkt_sender.interface)\n self.mon.usb('on')", "def tearDown(self):\n logging.debug('tearing down')", "def tearDown(self):\n logging.debug('tearing down')", "def tearDown(self):\n self.testbed.deactivate()", "def tearDown(self):\n self.testbed.deactivate()", "def tearDown(self):\n test_env_teardown()", "def tearDown(self):\n\n self._tear_down()", "def tearDown(self):\n\n BaseTest.tearDown(self)", "def tearDown(self):\r\n testing.tearDown()", "def teardown_class(self):\n self.log.info('Tearing down the test class')\n self.mon.usb('on')\n self.access_point.close()", "def tearDown(self): # pylint: disable=invalid-name\n self.hass.stop()", "def tearDown(self): # pylint: disable=invalid-name\n self.hass.stop()", "def tearDown(self): # pylint: disable=invalid-name\n self.hass.stop()", "def tearDown(self): # pylint: disable=invalid-name\n self.hass.stop()", "def tearDown(self):\n self.m.shutdown()", "def tearDown(self):\n test_utils.delete_test_config()", "def tearDown(self):\n zope.component.testing.tearDown()", "def teardown(self):\n del self.testInst, self.dname, self.test_vals, self.test_fracs\n\n return", "def tearDown(self):\n self.hass.stop()", "def tearDown(self):\n self.hass.stop()", "def tearDown(self):\n self.hass.stop()", "def tearDown(self):\n self.hass.stop()", "def teardown(self):\n del self.testInst, self.dname\n\n return", "def teardown(self):\n pass", "def teardown(self):\n pass", "def teardown(self):\n pass", "def tearDown(self):\n tests.utils.cleanup_environment()", "def tearDown(self):\n tests.utils.cleanup_environment()", "def teardown(self):\n del self.testInst, self.dname, self.test_val_length\n\n return", "def teardown(self):\n del self.testInst, self.dname, self.test_val_length\n\n return", "def teardown(self) -> None:", "def teardown(self) -> None:", "def teardown(self) -> None:", "def tearDown(self):\n self.teardown_beets()", "def tear_down(self):\n self.driver.close()\n self.driver.quit()", "def teardown(self):\n pass # pylint: disable=unnecessary-pass", "def _tearDown(self):\r\n\r\n if core.FW_conf['connection'].isLeader() and core.FW_conf['settings'].TestRun.BLTEnabledInFollower:\r\n executeInFollower(\"core.FW_conf['blt_ue'].stopCurrentMeasuring()\")\r\n\r\n # stop current measurement if battery is available\r\n if core.FW_conf['connection'].battery is not None and core.FW_conf['connection'].battery.isEnabled():\r\n core.FW_conf['connection'].battery.stopCurrentMeasuring()\r\n\r\n # skip tearDown if systemExit exception has occurred or\r\n # we are stopping execution or teardown skipping is wanted\r\n if not self._raiseSystemExit and not core.FW_conf['should_stop']:\r\n debug.out(\"MarbleTestCase tearDown\")\r\n\r\n self.logApply(core.FW_conf['connection']._tearDown, self)\r\n\r\n for remote in core.FW_conf['remote_connection']:\r\n self.logApply(remote._tearDown, self)", "def tearDown(self):\n\n self.ssx.close()\n\tself.ether_linux.close()", "def teardown(self) -> None:\n pass", "def teardown(self) -> None:\n pass", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "def tear_down_all(self):\n self.dut.send_expect(\"quit\", \"# \")\n time.sleep(2)\n self.dut.kill_all()", "def teardown():\n\n self.zorp_mock.stop()", "def tearDown(self):\n self.tmp.cleanup()", "def tearDown(self):\n\t\tprint(\"end test\")\n\t\tpass", "def tear_down(self):\n verdict, msg = TestStepEngine.tear_down(self)\n self._device.inject_device_log(\"i\", \"ACS_TESTCASE\", \"TEARDOWN: %s\" % self._name)\n return verdict, msg", "def teardown(self):\r\n self.driver.quit()", "def tearDown(self):\n self._procfs_mock.stop()", "def tearDown(self):\n self.log.debug(\"Started TearDown\")\n self.driver.quit()\n self.log.debug(\"Completed TearDown\")", "def tearDown(self):\n self.stop_worker()", "def tearDown(self):\n\n self._patcher.stop()", "def teardown(self):", "def teardown(self):", "def teardown(self):", "def tearDown(self):\n\n rmtree(self.test_output_dir)\n\n return", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.popen_patcher.stop()\n self.env_patcher.stop()", "def tear_down(self):\n self.destroy_env()\n self.dut.kill_all()", "def tear_down_cleanup(self):\n self.hass.stop()", "def tearDown(self):\n pass\n # teardown called after each test\n # e.g. maybe write test results to some text file", "def tearDown(self):\n self.supvisors_patcher.stop()", "def tearDown(self) -> None:\n\n del self.checker", "def _tear_down():\n repl._tearDown = self.tearDown", "def teardown(self):\n self.tcex.log.trace('teardown')", "def tearDown(self) -> None:\n os.remove(TestConfigFile.TEST_CONFIG)", "def teardown(self):\n\n\t\tself.shutdown = True\n\t\tself.terminate_process()", "def teardown(self):\n\n del self.testC, self.insts, self.testInst, self.dname, self.test_vals\n\n return", "def teardown_class(self):\n self._tester = None\n self._sut = None", "def tearDown(self) -> None:\n\n self.temp_env_file.close()\n os.remove(self.temp_env_file.name)\n\n del self.temp_env_file\n del self.test_name\n del self.helper", "def teardown_class(self):\n\n # TODO: If environment variable is set keep the workspace\n # and print out the path.\n global TEST_WORKSPACE\n\n check_env = env.import_test_cfg(TEST_WORKSPACE)[\n 'codechecker_cfg']['check_env']\n codechecker.remove_test_package_product(TEST_WORKSPACE, check_env)\n\n print(\"Removing: \" + TEST_WORKSPACE)\n shutil.rmtree(TEST_WORKSPACE, ignore_errors=True)", "def teardown_class(self):\n\n # TODO: If environment variable is set keep the workspace\n # and print out the path.\n global TEST_WORKSPACE\n\n check_env = env.import_test_cfg(TEST_WORKSPACE)[\n 'codechecker_cfg']['check_env']\n codechecker.remove_test_package_product(TEST_WORKSPACE, check_env)\n\n print(\"Removing: \" + TEST_WORKSPACE)\n shutil.rmtree(TEST_WORKSPACE, ignore_errors=True)", "def stopTestRun(self):", "def tearDown(self):\n self._service.tearDown()", "def tearDown(self):\n self._service.tearDown()", "def tearDown(self):\r\n self.app.application_close(self.util.client, self.app_name)\r\n\r\n self.common.generate_report(self.util.client, False)\r\n # Releases the client so that other clients can approach the agent in the near future.\r\n self.common.release_client(self.util.client)\r\n self.logger.info(\"==============Results=================\")\r\n self.logger.info(\"Number of Strings verified: \" + str(len(Config.results_list)/2))\r\n for i in range(0, len(Config.results_list), 2):\r\n self.logger.info(str(Config.results_list[i]) + \"{:>36}\".format('=====> ')\r\n + str(Config.results_list[i+1]))\r\n self.logger.info(\"Testcase tear-down: COMPLETED\")", "def tearDown(self):\n self.teardown_local_site()\n self.teardown_remote_site()\n time.sleep(2)", "def tearDown(self):\n # Stop the framework\n pelix.framework.FrameworkFactory.delete_framework()\n\n self.framework = None\n self.dispatcher = None", "def teardown(self):\n if self.ae:\n self.ae.shutdown()", "def tearDown(self):\n\n self.testInit.clearDatabase()\n\n self.testInit.delWorkDir()\n\n EmulatorSetup.deleteConfig(self.configFile)\n\n return", "def tearDownClass(cls):\n cls.container.stop()\n cls.container.remove()\n cls.client.close()", "def tearDown(self):\n self.remove_test_files()", "def tearDownClass(cls):\n\n cls.httpd.shutdown()", "def tearDown(self) -> None:\n self.directory.cleanup()", "def tearDown(self):\n self.popen_patcher.stop()", "def tearDown(self):\n self.popen_patcher.stop()", "def tearDown(self):\n self._s1ap_wrapper.cleanup()", "def tearDown(self):\n self._s1ap_wrapper.cleanup()", "def teardown(self) -> None:\n self._unregister_service()\n self._unregister_agent()", "def teardown(self):\n\n del self.testInst, self.test_bins, self.test_label, self.test_data\n del self.out_keys, self.out_data\n\n return", "def teardown_module():\n with suppress(Exception):\n mock_ad_xapp.stop()", "def teardown(self,**kwargs):\n pass", "def tearDown(self) -> None:\n if not self._outcome.result.wasSuccessful():\n self.driver.get_screenshot_as_file(f\"systemtraytest_failed_test_shot_#{self.id()}.png\")\n self.kded.kill()", "def tearDown(self):\n if hasattr(self.module, '__path__'):\n names = ['teardownPackage', 'teardown_package']\n else:\n names = ['teardownModule', 'teardown_module']\n names += ['tearDown', 'teardown'] \n try_run(self.module, names)", "def teardown_test(self):\n if self._ws:\n if self._ws.connected:\n self._ws.close()\n self._ws = None\n self._builder = None\n self.last_server_message = None\n self.session_refresh_token = None\n self.current_session_token = None", "def _teardown_dut(duthost, ptfhost, request):\n logger.info(\"Teardown SAI tests.\")\n _collect_test_result(duthost, ptfhost, request)\n _cleanup_ptf(ptfhost)" ]
[ "0.81449527", "0.77001065", "0.77001065", "0.7650473", "0.7650473", "0.76173466", "0.7492439", "0.74516493", "0.74354964", "0.7418989", "0.73991543", "0.73991543", "0.73991543", "0.73991543", "0.7376718", "0.7350522", "0.73498565", "0.7335127", "0.73328424", "0.73328424", "0.73328424", "0.73328424", "0.73241085", "0.73136175", "0.73136175", "0.73136175", "0.7287286", "0.7287286", "0.7267638", "0.7267638", "0.7249896", "0.7249896", "0.7249896", "0.72467303", "0.72269595", "0.72217983", "0.7218728", "0.7215892", "0.7213931", "0.7213931", "0.72047985", "0.72047985", "0.72047985", "0.72047985", "0.7200083", "0.71901876", "0.7165363", "0.71588445", "0.714978", "0.71390194", "0.7134412", "0.7132719", "0.7127428", "0.71268255", "0.71131086", "0.71131086", "0.71131086", "0.7105651", "0.70978117", "0.70978117", "0.70978117", "0.70978117", "0.70946634", "0.7089151", "0.7085881", "0.7084263", "0.7079455", "0.7078378", "0.7069652", "0.7065968", "0.70504886", "0.70355594", "0.7029067", "0.7021209", "0.7013328", "0.70092607", "0.70092607", "0.7005478", "0.7003619", "0.7003619", "0.700089", "0.69948804", "0.6988303", "0.69877845", "0.6984034", "0.6975879", "0.6974261", "0.69735944", "0.69726866", "0.6970895", "0.6970895", "0.6969088", "0.6969088", "0.6946697", "0.69422644", "0.69422156", "0.6936472", "0.69298714", "0.6921925", "0.6920193", "0.69190365" ]
0.0
-1
Test _specification_type_to_python_type method unsupported type.
def test__specification_type_to_python_type_unsupported_type(self): with self.assertRaises(TypeError): _specification_type_to_python_type("unsupported_type")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_type_error(self):\n self._error_test(TypeError)", "def test_raises_type_error(self):\n wrong_type = dict()\n self.assertRaises(\n TypeError, util.convert_protobuf_to_proto_plus, wrong_type\n )", "def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_type(\"missing_type\", {})", "def _check_type(self, new_value):\n raise NotImplementedError", "def checkType(self, value):\n pass", "def check_type(value: Any, type_spec: computation_types.Type):\n py_typecheck.check_type(type_spec, computation_types.Type)\n value_type = type_conversions.infer_type(value)\n if not type_spec.is_assignable_from(value_type):\n raise TypeError(\n computation_types.type_mismatch_error_message(\n value_type,\n type_spec,\n computation_types.TypeRelation.ASSIGNABLE,\n second_is_expected=True,\n )\n )", "def test_coerce() -> None:\n assert _coerce(\"1.0\") == Version(\"1.0\")\n assert _coerce(1.0) == Version(\"1.0\")\n expected = \"Unable to coerce object type\"\n with pytest.raises(NotImplementedError, match=expected):\n _coerce(type(Version))", "def test_unexpectedType(self):\n self.assertRaises(TypeError, nativeString, 1)", "def test_should_raise_error_if_type_is_invalid(self):\r\n with self.assertRaises(ValueError):\r\n self.spec_parser.parse_statement({'type': 'sugar'})", "def CheckType(self, *args, **kwargs):\n pass", "def check_type(self):\n return True", "def test_ticket_type_change_error_bad_type(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type change bad_type changed_type')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def _validate_type(self) -> None:\n # TODO: add transformation logic so that we don't have to transform inputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)", "def test_proto_plus_to_protobuf_raises_type_error(self):\n wrong_type = dict()\n self.assertRaises(\n TypeError, util.convert_proto_plus_to_protobuf, wrong_type\n )", "def _validate_type(self):\n # TODO: add transformation logic so that we don't have to transform outputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)", "def test_check_type_1():\r\n hl = hotlist.HotList()\r\n hl._validate_value(1)\r\n hl._validate_value(1L)\r\n hl._validate_value(1.5)\r\n hl._validate_value(\"abc\")\r\n hl._validate_value(u\"abc\")\r\n hl._validate_value((1, 2, 3,))\r\n hl._validate_value((1, \"AAA\", 3,))\r\n hl._validate_value((1, (\"AAA\", 2, 3,) , 3,))\r\n hl._validate_value((1, frozenset([\"AAA\", 2, 3,]) , 3,))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value([ 1, 2, 3,])\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(( 1, 2, [ 3, 4, 5,],))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value({})\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(hotlist.HotList())", "def _assert_type(type):\n if isinstance(type, str):\n o, v, p, t = type.split('.')\n if not ontologies.is_supported(o, v, p, t):\n rt.throw(\"Type {0}.v{1}.{2} is unsupported.\".format(o, v, p, t))\n elif type not in ontologies.get_types():\n rt.throw(\"Type {0} is unsupported.\".format(type))", "def test_incompatible_option_type(key, value):\n wrong_types = {int, str, list, bool} - {type(value)}\n for wrong_type in wrong_types:\n test_value = wrong_type()\n with pytest.raises(InputError):\n _check_input_config({key: test_value})", "def _check_type_compatibility(self, type_name1, type_name2,\n operation):\n if type_name1 != type_name2:\n raise TypeCompatibilityError(type_name1, type_name2, operation)", "def try_wrong_types(self, p, name, type_):\n for x in (1, 1.0, \"x\", True, np.ndarray,):\n if type(x) != type_:\n with self.assertRaises(TypeError, msg=f\"{name} {type_} {x}\"):\n setattr(p, name, x)", "def test_types(self):\n assert types.typeClass(\"str\") == str\n\n assert types.isBuiltinType(\"str\")\n\n assert types.isCollectionType(\"map\")\n assert types.isCollectionType(\"seq\")\n assert not types.isCollectionType(\"str\")\n\n assert types.isScalarType(\"str\")\n assert not types.isScalarType(\"seq\")\n assert not types.isScalarType(\"map\")\n\n assert types.isCollection([])\n assert types.isCollection({})\n assert not types.isCollection(\"foo\")\n\n assert types.isScalar(\"\")\n assert types.isScalar(True)\n assert not types.isScalar([])\n\n assert types.isCorrectType(\"\", str)\n assert types.isCorrectType({}, dict)\n\n assert types.isString(\"foo\")\n assert not types.isString([])\n\n assert types.isInt(1)\n assert not types.isInt(\"foo\")\n\n assert types.isBool(True)\n assert not types.isBool(1)\n assert not types.isBool(\"true\")\n\n assert types.isFloat(1.0)\n assert not types.isFloat(\"foo\")\n\n assert types.isNumber(1)\n assert types.isNumber(1.0)\n assert not types.isNumber(\"foo\")\n\n assert types.isText(\"foo\")\n assert types.isText(1)\n assert types.isText(1.0)\n assert not types.isText([])\n assert not types.isText(True)\n\n assert types.isAny(\"foo\")\n assert types.isAny(True)\n assert types.isAny(1)\n assert types.isAny(1.0)\n assert types.isAny({})\n assert types.isAny([])\n\n assert types.isEnum(\"foo\")\n assert not types.isEnum(1)\n\n assert types.isNone(None)\n assert not types.isNone(\"foo\")", "def test_should_return_appropriate_type(self):\r\n assert isinstance(self.spec_parser.parse_statement(self.edge_spec), Edge)\r\n assert isinstance(self.spec_parser.parse_statement(self.property_spec), Property)", "def testTheType(self, theTestType):\n \n pass", "def test_wrong_input_on_creation(self):\r\n\r\n self.assertRaises(TypeError, TypedListType, None)", "def test_should_return_error_if_stmt_contains_no_type(self):\r\n with self.assertRaises(TypeError):\r\n self.spec_parser.parse_statement({'name': 'todd'})", "def TYPE(value):\n raise NotImplementedError()", "def test_data_type(self):\n self.assertTrue(self.tester.data_type(), \"18S\")", "def test_expected_type(val, exp_type):\n\n if not isinstance(val, exp_type):\n return False", "def test_get_types(self):\n pass", "def test_types(self):\n \n self.assertIsInstance(self.detector_type, str)\n self.assertIsInstance(self.psd, dict)\n self.assertIsInstance(self.intensity, dict)\n self.assertIsInstance(self.database, str)\n self.assertIsInstance(self.position, list)\n self.assertIsInstance(self.angle, list)\n self.assertIsInstance(self.linearity_curve, dict)\n self.assertIsInstance(self.FOV, float)\n \n pass", "def test_snmpset_non_existant_type():\n with pytest.raises(SNMPWriteError) as excinfo:\n snmpset(ipaddress=SNMP_SRV_ADDR, community='public',\n oid='SNMPv2-MIB::sysName.0', value_type='z',\n value='Test Description', port=SNMP_SRV_PORT)\n assert str(excinfo.value) == 'The type value you specified does not ' \\\n 'match one of the accepted type codes.\\n' \\\n 'Valid type codes are one of ' \\\n '(i|u|t|a|o|s|x|d|b)'", "def test_datatype():\n\n assert isinstance(pf.get_datatype(), torch.dtype)\n assert pf.get_datatype() == torch.float32\n\n pf.set_datatype(torch.float64)\n assert isinstance(pf.get_datatype(), torch.dtype)\n assert pf.get_datatype() == torch.float64\n pf.set_datatype(torch.float32)\n\n with pytest.raises(TypeError):\n pf.set_datatype(\"lala\")", "def test_type_check(ExampleComponentClass):\n\n instance = ExampleComponentClass()\n\n configure(instance, {\"a\": 4.5}, name=\"x\")\n\n # Attempting to access the field should now raise a type error.\n with pytest.raises(\n TypeError,\n match=\"Field 'a' of component 'x' is annotated with type '<class 'int'>', which is not satisfied by value 4.5.\",\n ):\n instance.a", "def test_ensure_valid_model_type(self):\n # Note the \"valid\" type strings for our test\n test_types = [\"bar\", \"foo\", \"Sreeta\", \"Feras\"]\n # Note a set of invalid type strings for the test\n bad_types = [\"Tim\", \"Sam\"]\n\n # Alias the function to be tested\n func = pylogit.pylogit.ensure_valid_model_type\n\n # Make note of part of the error message that should be raised\n partial_error_msg = \"The specified model_type was not valid.\"\n\n # Perform the requisite tests\n for good_example in test_types:\n self.assertIsNone(func(good_example, test_types))\n for bad_example in bad_types:\n self.assertRaisesRegexp(ValueError,\n partial_error_msg,\n func,\n bad_example,\n test_types)\n\n return None", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def test__validate_arg_type(parameter_name, parameter, expected_type, raises) :\n\n if raises is not None : \n # We expect this to raise an error\n with pytest.raises(raises) :\n _validate_arg_type(parameter_name, parameter, expected_type)\n else :\n _validate_arg_type(parameter_name, parameter, expected_type)", "def testWrongTypeAssignment(self):\n self.assertRaises(messages.ValidationError,\n protojson.decode_message,\n MyMessage, '{\"a_string\": 10}')", "def test_etype__invalid(self):\n\n for etype in (\"SyntaxError\", self):\n self.assertRaises(TypeError, encode_string, \"test\", etype=etype)", "def test_invalid_data_types(self):\n response=self.check_invalid_data_type()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],\"Require int or float type\")\n self.assertEqual(response.status_code, 200)", "def testUnsupportedType(self) -> None:\n fake_node = typing.cast(result_output.NodeType, 'a')\n with self.assertRaises(RuntimeError):\n result_output._RecursiveHtmlToFile(fake_node, self.output_file)", "def test_wrong_type_error(self, parse_input_mocked_metadata):\n with pytest.raises(ValueError, match=\"invalid value\"):\n bb = parse_input_mocked_metadata(\n \"for int m in [1, 4.2, 9]\\n\\tMZgate(0, 1) | [0, 1]\"\n )", "def test_datatype(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.datatype == str\n np.testing.assert_array_equal(hist2.datatype, [numpy.number, str])\n np.testing.assert_array_equal(hist3.datatype, [numpy.datetime64, numpy.number, str])", "def check(self, value: Any) -> None:\n if not isinstance(value, self.oktype):\n raise TypeError(value)", "def _validate_impropertype(contype):\n if contype is None:\n warnings.warn(\"Non-parametrized Improper detected\")\n elif not isinstance(contype, ImproperType):\n raise GMSOError(\"Supplied non-ImproperType {}\".format(contype))\n return contype", "def is_valid_type(type):\n return type in type_to_adapter", "def test_create_obj_by_type_from_primitive_type(self):\n test_obj = \"test_primitive\"\n returned_obj = self.tested_class._create_obj_by_type(test_obj)\n self.assertEqual(returned_obj, test_obj)", "def test_new_invalid(self) -> None:\n with pytest.raises(TypeError) as excinfo:\n RunwayTestDefinition({}) # type: ignore\n assert str(excinfo.value).startswith(\"expected data of type\")", "def test_match_stype():\n if backwards.PY2: # pragma: Python 2\n slist = ['hello', bytearray('hello'), unicode('hello')]\n else: # pragma: Python 3\n slist = ['hello', b'hello', bytearray('hello', 'utf-8')]\n for s1 in slist:\n for s2 in slist:\n nt.assert_equal(backwards.match_stype(s1, s2), s1)\n nt.assert_raises(TypeError, backwards.match_stype, 1, 'hello')", "def test_type_validation(self):\r\n with self.assertRaises(ValidationError):\r\n TestSetModel.create(int_set={'string', True}, text_set={1, 3.0})", "def testTypeFancy(self):\n prop = make_prop(kind=config.List(int))\n for value in (1, 'hi', [3, 'test']):\n with self.assertRaises(TypeError):\n prop.interpret(value, {})\n\n self.assertEqual([2, 3], prop.interpret([2, 3], {}))", "def test_validate_type_failure(self, field_type, value):\n opt = scheme.Option('test-option', field_type=field_type)\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)", "def test_snmpset_wrong_type():\n with pytest.raises(SNMPWriteError) as excinfo:\n snmpset(ipaddress=SNMP_SRV_ADDR, oid='SNMPv2-MIB::sysName.0',\n value_type='a', value='255.255.255.255', port=SNMP_SRV_PORT)\n assert 'Bad variable type' in str(excinfo.value)", "def test_unknown_type(testdir: Testdir) -> None:\n schema = '''\n datasource db {{\n provider = \"postgres\"\n url = env(\"POSTGRES_URL\")\n }}\n\n generator db {{\n provider = \"coverage run -m prisma\"\n output = \"{output}\"\n {options}\n }}\n\n model User {{\n id String @id\n meta Json\n }}\n '''\n with pytest.raises(subprocess.CalledProcessError) as exc:\n testdir.generate(schema=schema)\n\n assert 'Unknown scalar type: Json' in str(exc.value.output, 'utf-8')", "def test_type_errors():\n\n\ttry:\n\t\ttransmissions = compute_transmissions(cal_directory, lines = 3.0)\n\texcept TypeError:\n\t\ttry:\n\t\t\ttransmissions = compute_transmissions(cal_directory, calibrator = 300.0)\n\t\texcept TypeError:\n\t\t\tassert True\n\t\telse:\n\t\t\tassert False\n\telse:\n\t\tassert False", "def test_version_type(self):\n self.assertIsInstance(get_version(), str)", "def test_no_coercion():\n\n @type_checked(coerce=False)\n def _run_test(something:str): pass\n\n with pytest.raises(TypeError) as error:\n _run_test(1234)\n\n assert \"1234 is of type int, expecting str.\" in error.value.args", "def missing_types():\n\n return ...", "def test_wrong_type_param():\n from scraper import get_inspection_page\n with pytest.raises(TypeError):\n get_inspection_page(Violation_Points=0, City='Seattle')", "def check_type(name: str, val: str, expected_type: Union[List, TypeVar, None]) -> None:\n if not type_matches(val, expected_type):\n raise ValueError(\n \"{} should be '{}' but is {} of type '{}'.\".format(\n name, expected_type.__name__, val, type(val).__name__\n )\n )", "def test_arg_type(args, arg, arg_type):\n try:\n arg_type(args[arg])\n except Exception:\n raise GaiaException('Required argument {} must be of type {}'\n .format(arg, arg_type))", "def get_type_check(self, arg, option):\n pass", "def test_as_python_types(self):\n obs = _as_python_types(self.metadata_map, self.headers)\n exp = [[2.1, 3.1, 3],\n ['str1', '200', 'string30'],\n [1, 2, 3]]\n self.assertEqual(obs, exp)", "def test_type(self):\n geometric = [x for x in iterators.GeometricIterator(limit=10, ratio=0.5)]\n type_of_geometric = type(geometric[0])\n self.assertTrue(type_of_geometric == int or type_of_geometric == float)", "def test_wrong_type(self):\n msg = 'Widget type is not valid. Valid widget types are: ' + \\\n 'basic, default, formula, histogram, category, animation, time-series.'\n\n with pytest.raises(ValueError) as e:\n Widget({'type': 'xxx'}).get_info()\n assert str(e.value) == msg", "def test_type(self):\n geometric = [x for x in generators.geometric(1, 10, 0.5)]\n type_of_geometric = type(geometric[0])\n self.assertTrue(type_of_geometric == int or type_of_geometric == float)", "def test_not_supported_requirement(self, space_each_type):\n with pytest.raises(TypeError) as exc:\n build_required_space(space_each_type, type_requirement=\"fasdfasf\")\n assert \"Unsupported\" in str(exc.value)", "def failure(self: _UnwrappableType) -> _SecondType:", "def _expected_type(column_definition):\n try:\n expected_type = column_definition.type.python_type\n except NotImplementedError:\n # Custom column definitions can lack a type.\n # We use custom column definitions for primary keys of type int.\n expected_type = int\n if issubclass(expected_type, Enum):\n # This is an Enum type column, I'm making the simplifying assumption\n # that those will always be string type\n expected_type = str\n return expected_type", "def test_type_validation(self):\r\n with self.assertRaises(ValidationError):\r\n TestListModel.create(int_list=['string', True], text_list=[1, 3.0])", "def is_my_type(type_str):\n raise NotImplementedError()", "def convert_type(self, value, schema_type, **kwargs):", "def test_validate_on_invalid_data_type(self):\n args = (self.bytes_a, 'invalid')\n self.assertRaises(TypeError, objects.OpaqueObject, *args)", "def testGeneratorType(self):", "def test_etype__invalid(self):\n\n for etype in (\"SyntaxError\", self):\n self.assertRaises(TypeError, encode_file_path, \"test\", etype)", "def expected_type(self):\n return self.__expectedType", "def test_type_error(self):\n with self.assertRaises(TypeError):\n function_inclusion_filter_builder(5)", "def test_types(self):\n \n self.assertIsInstance(self.tx_data_in, numpy.ndarray)\n self.assertIsInstance(self.circuit_simulation, bool)\n self.assertIsInstance(self.bypass, bool)\n \n pass", "def test_isinstance_oldbytestrings_bytes(self):\n self.assertTrue(isinstance(b'blah', bytes_types)) # not with the redefined bytes obj\n self.assertTrue(isinstance(u'blah'.encode('utf-8'), bytes_types)) # not with the redefined bytes obj", "def get_check_types():", "def test_type_attribute(self):\n\n self._create_string()\n self.assertEquals(\"%s:%s\" % (\"xs\",\"string\"), self.string.schema_node.get(\"type\"))", "def validate(self, value):\n value = super(Type,self).validate(value)\n if self.type is None:\n return value\n if value is not None and not isinstance(value,self.type):\n try:\n if isinstance(value, list) or isinstance(value, tuple): value = self.type(*value)\n elif isinstance(value, dict): value = self.type(**value)\n else: value = self.type(value)\n except: \n raise BadValueError(\"Cannot coerce: %s to %s\"% (value, self.type))\n return value", "def test_incorrect_arg_type(self):\n\n with pytest.raises(TypeError) as exc_info:\n upper_incomplete_gamma(a='A', z=0.3)\n\n expected_error_msg = (\n 'type of argument \"a\" must be one of (int, float); got str instead'\n )\n assert str(exc_info.value) == expected_error_msg", "def test_kind():\n value = 11\n num_a = param.Integer(value=value)\n\n assert num_a.kind == \"Integer\"", "def test_wrong_input_type(self):\n with self.assertRaises(TypeError):\n votes_to_percentages(['not', 'a', 'queryset'])\n with self.assertRaises(TypeError):\n votes_to_percentages(Disposable.objects.all())", "def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))", "def check_type(value, ty, value_name=\"value\"):\n\n if ty in [str, unicode, int, float, bytes]:\n assert type(value) is ty, \"{} has type {}, not {}\".format(value_name, type(value), ty)\n elif type(ty) is list:\n assert type(value) is list, \"{} has type {}, not {}\".format(value_name, type(value), dict)\n for i in range(len(value)):\n check_type(value[i], ty[0], \"{}[{}]\".format(value_name, i))\n elif type(ty) is dict:\n assert type(value) is dict, \"{} has type {}, not {}\".format(value_name, type(value), dict)\n for k, t in ty.items():\n assert k in value, \"{} is missing key {}\".format(value_name, repr(k))\n check_type(value[k], t, \"{}[{}]\".format(value_name, repr(k)))\n else:\n raise Exception(\"unknown type spec {}\".format(repr(ty)))", "def test_type_builder_raises_exception_on_invalid_schema_item_type():\n\n class UnknownSchemaItem(SchemaItem):\n pass\n\n schema = [\n SchemaObject(\n name=\"FakeObject\", properties=[UnknownSchemaItem(name=\"objectUnknown\")]\n )\n ]\n\n with pytest.raises(ValueError):\n _ = build_types(schema)", "def test_datatype_detection():\n\n grammar = \"\"\"\n IsObjectDatatype: INT | STRING | ID;\n IsIntDatatype: INT;\n IsIdDatatype: ID;\n IsAlsoDatatype: SubDT1 | SubDT2;\n SubDT1: INT;\n SubDT2: STRING;\n \"\"\"\n\n mm = metamodel_from_str(grammar)\n\n IsObjectDatatype = mm['IsObjectDatatype']\n assert isinstance(IsObjectDatatype, ecore.EDataType)\n assert IsObjectDatatype.name == 'IsObjectDatatype'\n assert IsObjectDatatype.eType == object\n\n IsIntDatatype = mm['IsIntDatatype']\n assert isinstance(IsIntDatatype, ecore.EDataType)\n assert IsIntDatatype.name == 'IsIntDatatype'\n assert IsIntDatatype.eType == int\n\n IsIdDatatype = mm['IsIdDatatype']\n assert isinstance(IsIdDatatype, ecore.EDataType)\n assert IsIdDatatype.name == 'IsIdDatatype'\n assert IsIdDatatype.eType == str\n\n IsAlsoDatatype = mm['IsAlsoDatatype']\n assert isinstance(IsAlsoDatatype, ecore.EDataType)\n IsAlsoDatatype = mm['IsAlsoDatatype']\n assert IsAlsoDatatype.eType == object", "def test_exit_on_wrong_type(self):\n with self.assertRaises(SystemExit):\n pyint = Interpreter()\n pyint.run(file=WRONG_TYPE)", "def testProtocolSetBadType(self):\n def setProtocol():\n self.mr.protocol = 12345\n\n self.assertRaises(\n TypeError,\n setProtocol\n )", "def test_ticket_type_remove_error_bad_type(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type remove bad_type')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_get_type():\n formatter = TabularOutputFormatter()\n\n tests = (\n (1, int),\n (2.0, float),\n (b\"binary\", binary_type),\n (\"text\", text_type),\n (None, type(None)),\n ((), text_type),\n )\n\n for value, data_type in tests:\n assert data_type is formatter._get_type(value)", "def check_type(val):\n\n try:\n a = float(val)\n return type(a)\n except ValueError:\n pass\n\n try:\n a = int(val)\n return type(a)\n except ValueError:\n pass\n\n try:\n a = dt.datetime.strptime(val, '%Y-%m-%dT%H:%M:%SZ')\n return type(a)\n except ValueError:\n pass\n\n return type(val)", "def validate_instruction_type(parsed_data: Any, expected_type: IntEnum) -> None:\n if parsed_data.instruction_type != expected_type:\n raise ValueError(\n f\"invalid instruction; instruction index mismatch {parsed_data.instruction_type} != {expected_type}\"\n )", "def check_type(instance, type):\n\tif not isinstance(instance, type):\n\t\traise TypeError('Instance expected type {0}, but got: {1}', type(type), type(instance))", "def test_data_type(self):\n with self.assertRaises(TypeError):\n max_integer(None)\n\n with self.assertRaises(TypeError):\n max_integer([\"Hey\", 3, 456, \"ALX\", 65])", "def test_validation_can_fail():\n\n @type_checked\n def _run_test(something:int): pass\n\n with pytest.raises(TypeError) as error:\n _run_test(\"abc\")\n\n assert \"abc is of type str, expecting int.\" in error.value.args", "def test_validate_type_ok(self, field_type, value):\n opt = scheme.Option('test-option', field_type=field_type)\n opt.validate('foo', value)", "def validate_type(in_type: Mapping[str, Any]) -> TypeDescription:\n # Check if all keys are there\n missing_keys = {\"name\", \"weights\", \"access\"}.difference(in_type.keys())\n if missing_keys:\n missing_keys = \", \".join(repr(i) for i in sorted(missing_keys))\n raise InvalidTypeDescription(\"Provided type description is missing some keys: \"\n + missing_keys)\n\n # Check in_type['name']\n if not isinstance(in_type[\"name\"], str):\n got_type = type(in_type[\"name\"]).__name__\n raise InvalidTypeDescription(f\"Value of type['name'] should be str, not {got_type}\")\n\n # Check in_type['weights']\n if not isinstance(in_type[\"weights\"], collections.abc.Mapping):\n got_type = type(in_type[\"weights\"]).__name__\n raise InvalidTypeDescription(\n \"Value of type['weights'] should be an instance of collections.abc.Mapping \"\n f\"(e.g. a dict), but {got_type} was provided\"\n )\n\n # Check in_type['access']\n if not isinstance(in_type[\"access\"], collections.abc.Sequence):\n got_type = type(in_type[\"access\"]).__name__\n raise InvalidTypeDescription(\n \"Value of type['access'] should be an instance of collections.abc.Sequence \"\n f\"(e.g. a list), but {got_type} was provided\"\n )\n\n return in_type # type: ignore", "def test_flow_must_define_type(self):\n flow = MockFlowWithoutType()\n pytest.raises(\n ValueError, flow.authentication_flow_document, 'argument'\n )" ]
[ "0.6947089", "0.6929546", "0.6844311", "0.6795259", "0.66925305", "0.6652292", "0.6652253", "0.6572005", "0.65506715", "0.6511283", "0.64829165", "0.6474207", "0.64679945", "0.64503706", "0.64311326", "0.6370294", "0.6370191", "0.6354397", "0.6286515", "0.6261668", "0.62531507", "0.6238267", "0.6237771", "0.62207377", "0.6207208", "0.6176347", "0.6165806", "0.6164325", "0.6159431", "0.61389667", "0.6122016", "0.6090095", "0.60782486", "0.60526377", "0.6051671", "0.60429984", "0.6042015", "0.6026651", "0.6024614", "0.6001658", "0.5999646", "0.59921664", "0.5982305", "0.597359", "0.5963416", "0.5954669", "0.5954037", "0.5948596", "0.5945822", "0.59451747", "0.5941398", "0.5941136", "0.5936044", "0.5934277", "0.59281963", "0.5927194", "0.59266114", "0.59156907", "0.5912043", "0.5909371", "0.5905061", "0.590137", "0.5894957", "0.5891415", "0.5889819", "0.58838284", "0.5868699", "0.5860349", "0.585657", "0.58400244", "0.5839741", "0.58370495", "0.5831164", "0.5828375", "0.581335", "0.5808163", "0.5803207", "0.58022386", "0.58005965", "0.579952", "0.5795918", "0.5791762", "0.5778064", "0.5773953", "0.5770466", "0.575985", "0.57537764", "0.5746437", "0.5746035", "0.5739645", "0.5724888", "0.5721915", "0.57201654", "0.57182133", "0.5718182", "0.57156503", "0.5709737", "0.5707999", "0.57058346", "0.5696206" ]
0.91027087
0
Test _union_sub_type_to_protobuf_variable_name method tuple.
def test__union_sub_type_to_protobuf_variable_name_tuple(self, mock): _union_sub_type_to_protobuf_variable_name("content_name", "Tuple") mock.assert_called_once()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _union_sub_type_to_protobuf_variable_name(\n content_name: str, content_type: str\n) -> str:\n if content_type.startswith(\"FrozenSet\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n expanded_type_str = \"set_of_{}\".format(sub_type)\n elif content_type.startswith(\"Tuple\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n expanded_type_str = \"list_of_{}\".format(sub_type)\n elif content_type.startswith(\"Dict\"):\n sub_type_1 = _get_sub_types_of_compositional_types(content_type)[0]\n sub_type_2 = _get_sub_types_of_compositional_types(content_type)[1]\n expanded_type_str = \"dict_of_{}_{}\".format(sub_type_1, sub_type_2)\n else:\n expanded_type_str = content_type\n\n protobuf_variable_name = \"{}_type_{}\".format(content_name, expanded_type_str)\n\n return protobuf_variable_name", "def _decode_union_old(data_type, obj, alias_validators, strict, for_msgpack):\n val = None\n if isinstance(obj, six.string_types):\n # Union member has no associated value\n tag = obj\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if not isinstance(val_data_type, (bv.Void, bv.Nullable)):\n raise bv.ValidationError(\n \"expected object for '%s', got symbol\" % tag)\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n elif isinstance(obj, dict):\n # Union member has value\n if len(obj) != 1:\n raise bv.ValidationError('expected 1 key, got %s' % len(obj))\n tag = list(obj)[0]\n raw_val = obj[tag]\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if isinstance(val_data_type, bv.Nullable) and raw_val is None:\n val = None\n elif isinstance(val_data_type, bv.Void):\n if raw_val is None or not strict:\n # If raw_val is None, then this is the more verbose\n # representation of a void union member. If raw_val isn't\n # None, then maybe the spec has changed, so check if we're\n # in strict mode.\n val = None\n else:\n raise bv.ValidationError('expected null, got %s' %\n bv.generic_type_name(raw_val))\n else:\n try:\n val = _json_compat_obj_decode_helper(\n val_data_type, raw_val, alias_validators, strict, True,\n for_msgpack)\n except bv.ValidationError as e:\n e.add_parent(tag)\n raise\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n else:\n raise bv.ValidationError(\"expected string or object, got %s\" %\n bv.generic_type_name(obj))\n return data_type.definition(tag, val)", "def union_parts(union: UnionKind, value: dict):\n selector, sub_value = list(value.items())[0]\n final_kind = union.kind_for(selector)\n value = sub_value\n return final_kind, value", "def _decode_union(data_type, obj, alias_validators, strict, for_msgpack):\n val = None\n if isinstance(obj, six.string_types):\n # Handles the shorthand format where the union is serialized as only\n # the string of the tag.\n tag = obj\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if not isinstance(val_data_type, (bv.Void, bv.Nullable)):\n raise bv.ValidationError(\n \"expected object for '%s', got symbol\" % tag)\n if tag == data_type.definition._catch_all:\n raise bv.ValidationError(\n \"unexpected use of the catch-all tag '%s'\" % tag)\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n elif isinstance(obj, dict):\n tag, val = _decode_union_dict(\n data_type, obj, alias_validators, strict, for_msgpack)\n else:\n raise bv.ValidationError(\"expected string or object, got %s\" %\n bv.generic_type_name(obj))\n return data_type.definition(tag, val)", "def typeToName(type: int) -> unicode:\n ...", "def method_union_name(self) -> str:", "def get_Union_params(un):\n try:\n return un.__union_params__\n except AttributeError:\n # Python 3.6\n return un.__args__", "def test_frame_variable(self):\n self.build()\n self.common_setup()\n\n # This should display correctly.\n self.expect(\n \"frame variable --show-types -- *my_foo_ptr\",\n VARIABLES_DISPLAYED_CORRECTLY,\n substrs=[\n \"(foo)\",\n \"(sub_foo)\",\n \"other_element = 3\"])", "def _var_name_sub(self, sprintf, quote=False):\n q = ''\n if quote:\n q = \"'\"\n name_list = map(lambda x: q + self.cdict[x][0] + q, sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(name_list)", "def _check_typevar(self, name: str, node: nodes.AssignName) -> None:\n if isinstance(node.parent, nodes.Assign):\n keywords = node.assign_type().value.keywords\n args = node.assign_type().value.args\n elif isinstance(node.parent, nodes.Tuple):\n keywords = (\n node.assign_type().value.elts[node.parent.elts.index(node)].keywords\n )\n args = node.assign_type().value.elts[node.parent.elts.index(node)].args\n\n variance = TypeVarVariance.invariant\n name_arg = None\n for kw in keywords:\n if variance == TypeVarVariance.double_variant:\n pass\n elif kw.arg == \"covariant\" and kw.value.value:\n variance = (\n TypeVarVariance.covariant\n if variance != TypeVarVariance.contravariant\n else TypeVarVariance.double_variant\n )\n elif kw.arg == \"contravariant\" and kw.value.value:\n variance = (\n TypeVarVariance.contravariant\n if variance != TypeVarVariance.covariant\n else TypeVarVariance.double_variant\n )\n\n if kw.arg == \"name\" and isinstance(kw.value, nodes.Const):\n name_arg = kw.value.value\n\n if name_arg is None and args and isinstance(args[0], nodes.Const):\n name_arg = args[0].value\n\n if variance == TypeVarVariance.double_variant:\n self.add_message(\n \"typevar-double-variance\",\n node=node,\n confidence=interfaces.INFERENCE,\n )\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(\"\",),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.covariant and not name.endswith(\"_co\"):\n suggest_name = f\"{re.sub('_contra$', '', name)}_co\"\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is covariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.contravariant and not name.endswith(\"_contra\"):\n suggest_name = f\"{re.sub('_co$', '', name)}_contra\"\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is contravariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.invariant and (\n name.endswith(\"_co\") or name.endswith(\"_contra\")\n ):\n suggest_name = re.sub(\"_contra$|_co$\", \"\", name)\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is invariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n\n if name_arg is not None and name_arg != name:\n self.add_message(\n \"typevar-name-mismatch\",\n node=node,\n args=(name_arg, name),\n confidence=interfaces.INFERENCE,\n )", "def test_get_output_var_names(initialized_bmi):\n names = initialized_bmi.get_output_var_names()\n assert isinstance(names, tuple)\n\n if hasattr(initialized_bmi, \"get_output_var_name_count\"):\n n_names = initialized_bmi.get_output_var_name_count()\n assert len(names) == n_names\n else:\n warnings.warn(\"get_output_var_name_count not implemented\")", "def property_to_py_name(cpp_struct_name):\r\n first_underscore = cpp_struct_name.find('_')\r\n assert first_underscore != -1\r\n return cpp_struct_name[first_underscore + 1:]", "def test_output_named_tuple_vs_dictionary_5():\n assert largest_blood_type == largest_blood_type_d, \"Max Blood Type cannot be different for same group\"", "def gen_type_assertion(var_name: str, ty: type) -> str:\n\n tys = type_str(ty)\n vars = [c for c in 'abcdefghijklmnop' if c != var_name][::-1]\n\n def helper(var_name, tys):\n tys = tys.strip()\n pre_bracket = tys.split(\"[\")[0].lower() # part before [ (or the entire string if no bracket\n ans = f\"type({var_name}) is {pre_bracket}\"\n if \"[\" in tys:\n inside = tys[tys.index(\"[\") + 1:-1]\n new_var = vars.pop()\n if pre_bracket == \"list\" or pre_bracket == \"set\":\n inside_check = helper(new_var, inside)\n # if \" and \" in inside_check:\n # inside_check = \"(\" + inside_check + \")\"\n ans += f\" and all({inside_check} for {new_var} in {var_name})\"\n elif pre_bracket == \"dict\":\n depth = 0\n for i, c in enumerate(inside):\n if c == \"[\":\n depth += 1\n elif c == \"]\":\n depth -= 1\n elif c == \",\" and depth == 0:\n break\n assert depth == 0 and c == \",\", \"Dict[(expecting comma inside)]\"\n key_var = vars.pop()\n key_check = helper(key_var, tys[:i])\n val_check = helper(new_var, tys[i + 1:])\n ans += f\" and all({key_check} and {val_check} for {key_var}, {new_var} in {var_name}.items())\"\n else:\n assert False, f\"Unknown type `{tys}`\"\n return ans\n\n return f\"assert {helper(var_name, tys)}, '{var_name} must be of type {tys}'\"", "def gen_type_tuple_string(self, name, node):\n return \"('{}', {})\".format(name, self.gen_type_string(node))", "def test_proto_export_inverse(tmp_path, x, name):\n config = Config()\n typedef, message = x\n with tempfile.NamedTemporaryFile(\n mode=\"r+\", dir=str(tmp_path), suffix=\".proto\", delete=True\n ) as outfile:\n\n typedef_map = {name: typedef}\n\n protofile.export_proto(typedef_map, output_file=outfile)\n outfile.flush()\n\n outfile.seek(0)\n new_typedef_map = protofile.import_proto(config, input_file=outfile)\n\n config.known_types.update(new_typedef_map)\n # validate\n for name, typedef in new_typedef_map.items():\n blackboxprotobuf.validate_typedef(typedef, config=config)\n\n def _check_field_types(typedef1, typedef2):\n for field_num in typedef1.keys():\n # make sure we don't drop keys\n assert field_num in typedef2\n assert typedef1[field_num][\"type\"] == typedef2[field_num][\"type\"]\n if typedef1[field_num][\"type\"] == \"message\":\n message_typedef1 = None\n message_typedef2 = None\n if \"message_typedef\" in typedef1[field_num]:\n message_typedef1 = typedef1[field_num][\"message_typedef\"]\n elif \"message_type_name\" in typedef1[field_num]:\n assert typedef1[field_num][\"message_type_name\"] in typedef_map\n message_typedef1 = typedef_map[\n typedef1[field_num][\"message_type_name\"]\n ]\n if \"message_typedef\" in typedef2[field_num]:\n message_typedef2 = typedef2[field_num][\"message_typedef\"]\n elif \"message_type_name\" in typedef2[field_num]:\n assert (\n typedef2[field_num][\"message_type_name\"] in new_typedef_map\n )\n message_typedef2 = new_typedef_map[\n typedef2[field_num][\"message_type_name\"]\n ]\n\n _check_field_types(message_typedef1, message_typedef2)\n\n note(typedef_map)\n note(new_typedef_map)\n for name, typedef in typedef_map.items():\n _check_field_types(typedef, new_typedef_map[name])\n\n note(new_typedef_map[name])\n # try to actually encode a message with the typedef\n encode_forward = length_delim.encode_message(message, config, typedef_map[name])\n\n config.known_types = new_typedef_map\n encode_backward = length_delim.encode_message(\n message, config, new_typedef_map[name]\n )\n\n decode_forward, _, _, _ = length_delim.decode_message(\n encode_forward, config, new_typedef_map[name]\n )\n decode_backward, _, _, _ = length_delim.decode_message(\n encode_backward, config, typedef_map[name]\n )", "def test_get_name_of_variable(self):\n name = Code()\n self.assertEqual(str(name), 'name')", "def repackage_var(x):\n if type(x) == Variable:\n return Variable(x.data)\n else:\n return tuple(repackage_var(v) for v in x)", "def nameof_both(var, *more_vars):\n result = nameof(var, *more_vars, frame=2)\n\n if not more_vars:\n assert result == bytecode_nameof(frame=2)\n return result", "def _infer_variable_types_from_data(raw_data):\n raise NotImplementedError()", "def subexpr_to_smtlib(expr, pre, suff='', fun_annotate_subexpr = None):\n if fun_annotate_subexpr is not None and pre in PythonOperators.logic_ops:\n return '(! (' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + \\\n ') :named ' + fun_annotate_subexpr() + ')'\n else:\n return '(' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + ')'", "def test_extracting_one_value(self):\n\t\tself.assertEqual([\"b\"], au.extract_variables(bf.Var(\"b\")), \"Invalid variables extracted, expected [b].\")", "def test_get_input_var_names(initialized_bmi):\n names = initialized_bmi.get_input_var_names()\n assert isinstance(names, tuple)\n\n if hasattr(initialized_bmi, \"get_input_var_name_count\"):\n n_names = initialized_bmi.get_input_var_name_count()\n assert len(names) == n_names\n else:\n warnings.warn(\"get_input_var_name_count not implemented\")", "def getunittouu(self, param):\n if type(param) is tuple:\n return tuple([self.getunittouu(val) for val in param])\n try:\n return inkex.unittouu(param)\n except AttributeError:\n return self.unittouu(param)", "def gen_proto_recv(signame, argname, typename, size, is_enum, is_struct, is_varlen):\n add_code = None\n wordoff = word_offset(signame, argname)\n if is_varlen:\n # Array. Logic is identical to send direction; copying\n # is done elsewhere, we just return an offset.\n # The offset's the same for send, so we don't need\n # to generate any code.\n proto_code = None\n copy_code = None\n signature = None\n else:\n signature = mangle_type(typename)\n if is_struct:\n proto_code = \"%s *%s\" % (typename, argname)\n copy_code = \" CCP_%s_%s_GET(pdu, %s);\" % (\n signame.upper(), argname.upper(), argname)\n else:\n proto_code = \"%s *%s\" % (typename, argname)\n cast = \"(%s)\" % (typename)\n copy_code = \" *%s = %sCCP_%s_%s_GET(pdu);\" % (\n argname, cast, signame.upper(), argname.upper())\n return (proto_code, copy_code, add_code, signature)", "def unpack_type_spec_from(\n buffer: bytes, offset: int = 0\n) -> tuple[computation_types.Type, int]:\n length, length_size = _unpack_length_from(buffer, offset=offset)\n offset += length_size\n type_spec_bytes, *_ = struct.unpack_from(f'!{length}s', buffer, offset=offset)\n proto = computation_pb2.Type.FromString(type_spec_bytes)\n type_spec = type_serialization.deserialize_type(proto)\n return type_spec, length_size + length # pytype: disable=bad-return-type", "def __splitVariableNames(self, name, indexes):\n if name == 'x':\n var = self.xCoordinates[indexes[0]][indexes[1]]\n elif name == 'y':\n var = self.yCoordinates[indexes[0]][indexes[1]]\n elif name == 'z':\n var = self.zCoordinates[indexes[0]][indexes[1]]\n elif name == 'colorMap':\n var = self.colorMapCoordinates[indexes[0]][indexes[1]]\n elif name == 'clusterLabels':\n var = self.clusterLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureLabels':\n var = self.mixtureLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureMeans':\n var = self.mixtureMeans[indexes[0]][indexes[1]]\n elif name == 'mixtureCovars':\n var = self.mixtureCovars[indexes[0]][indexes[1]]\n\n # The variable can contain brackets {} (when the symbol \"|\" is present in\n # the variable name), e.g.:\n # DataName|Input|{RavenAuxiliary|variableName|initial_value}\n # or it can look like:\n # DataName|Input|variableName\n\n if var is not None:\n result = [None] * 3\n if '|input|' in var.lower():\n match = re.search(r\"(\\|input\\|)\", var.lower())\n elif '|output|' in var.lower():\n match = re.search(r\"(\\|output\\|)\", var.lower())\n else:\n self.raiseAnError(IOError, f'In Plot {self.name}, the input coordinate {name} has not specified an \"Input\" or \"Output\" (case insensitive). e.g., sourceName|Input|aVariable) in {var}')\n startLoc, endLoc = match.start(), match.end()\n result = [var[:startLoc].strip(), var[startLoc+1:endLoc-1].strip(), var[endLoc:].strip()]\n if '{' in result[-1] and '}' in result[-1]:\n locLower, locUpper = result[-1].find(\"{\"), result[-1].rfind(\"}\")\n result[-1] = result[-1][locLower + 1:locUpper].strip()\n else:\n result = None\n\n return result", "def __compile_subroutine_parameters(self):\r\n while self.__tokenizer.keyword() == TYPES_DIC[\"VAR\"]:\r\n self.compile_var_dec()", "def tuple(self, arg: SeField[Any]) -> str:\n if is_bare_tuple(arg.type):\n return arg.varname\n elif is_variable_tuple(arg.type):\n earg = arg[0]\n earg.name = \"v\"\n return f\"tuple({self.render(earg)} for v in {arg.varname})\"\n else:\n rvalues = []\n for i, _ in enumerate(type_args(arg.type)):\n r = arg[i]\n r.name = f\"{arg.varname}[{i}]\"\n rvalues.append(self.render(r))\n return f\"({', '.join(rvalues)},)\" # trailing , is required for single element tuples", "def test_parse_substitution_variable():\n assert parse_substitution_variable(\"${SOME_VAR}\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"$SOME_VAR\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"SOME_STRING\") is None\n assert parse_substitution_variable(\"SOME_$TRING\") is None\n assert parse_substitution_variable(\"${some_var}\") == \"some_var\"\n assert parse_substitution_variable(\"$some_var\") == \"some_var\"\n assert parse_substitution_variable(\"some_string\") is None\n assert parse_substitution_variable(\"some_$tring\") is None\n assert parse_substitution_variable(\"${SOME_$TRING}\") is None\n assert parse_substitution_variable(\"$SOME_$TRING\") == \"SOME_\"", "def test_tuple_identifier(self):\n\n # This will resolve to the Username AVP\n self._compare_avp(\n avp.AVP((avp.VendorId.DEFAULT, 1), 'a username'),\n avp.UTF8StringAVP(\n 1, value='a username', vendor=avp.VendorId.DEFAULT,\n flags=avp.FLAG_MANDATORY,\n name='User-Name',\n ),\n )\n\n self._compare_avp(\n avp.AVP((avp.VendorId.TGPP, 701), b'msisdn'),\n avp.OctetStringAVP(\n 701, value=b'msisdn', vendor=avp.VendorId.TGPP,\n flags=avp.FLAG_MANDATORY | avp.FLAG_VENDOR,\n name='MSISDN',\n ),\n )\n\n # Unknown AVPs default to unknown AVP\n self._compare_avp(\n avp.AVP((0xfac3b00c, 1), b'wut'),\n avp.UnknownAVP(\n 1, value=b'wut', vendor=0xfac3b00c,\n flags=0, name='Unknown-AVP',\n ),\n )", "def tuple_namer(name,tupl):\n tupl_templ = collections.namedtuple(name, 'battery status neighbour')\n named = tupl_templ(battery = tupl[0], status = tupl[1], neighbour = tupl[2])\n return named", "def _split_name(name):\n name_split = name.split('_view_')\n view_num = None\n if(len(name_split) > 1):\n view_num = int(name_split[1])\n optimizer_key = ''\n fp16_key = ''\n if name_split[0].startswith('Moment_1'):\n optimizer_key = 'Moment_1_'\n elif name_split[0].startswith('Moment_2'):\n optimizer_key = 'Moment_2_'\n elif name_split[0].startswith('Update_Count'):\n optimizer_key = 'Update_Count_'\n elif name_split[0].endswith('_fp16'):\n fp16_key = '_fp16'\n param_name = name_split[0]\n if optimizer_key != '':\n param_name = param_name.split(optimizer_key)[1]\n param_name = param_name.split('_fp16')[0]\n return param_name, optimizer_key, view_num, fp16_key", "def gen_load_code(var_name: str, ty: type) -> str:\n\n tys = type_str(ty)\n\n if tys.startswith(\"Set[\"):\n assert tys.endswith(\"]\")\n inside = tys[4:-1]\n ans = f\"{var_name} = set(json.load(sys.stdin))) # convert set (stored as json dictionary)\"\n assertions = [f\"all(isinstance(x, {inside}) for x in {var_name})\"]\n else:\n ans = f\"{var_name} = json.load(sys.stdin)\"\n num_lists = tys.count(\"List[\")\n assert tys.startswith(\"List[\" * num_lists) and tys.endswith(\"]\" * num_lists)\n inside = tys[5 * num_lists: len(tys) - num_lists]\n if num_lists == 0:\n assertions = [f\"isinstance({var_name}, {inside})\"]\n else:\n assertions = [f\"isinstance({var_name}, list)\"]\n if num_lists == 1:\n assertions.append(f\"all(isinstance(x, {inside}) for x in {var_name})\")\n else:\n assertions.append(f\"all(isinstance(x, list) for x in {var_name})\")\n if num_lists == 2:\n assertions.append(f\"all(isinstance(y, {inside}) for x in {var_name} for y in x)\")\n elif num_lists == 3:\n assertions += [f\"all(isinstance(y, list) for x in {var_name} for y in x)\",\n f\"all(isinstance(z, {inside}) for x in {var_name} for y in x for z in y)\"]\n else:\n assert False, f'Unknown type {tys}'\n\n assert inside in [\"int\", \"float\", \"bool\", \"str\"], f'Unknown type {tys}'\n return ans + \"\\n\\n\" + \"\\n\".join(f\"assert {a}, 'Type error: expecting `{tys}`'\" for a in assertions)", "def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP", "def getvarname(self,j_): # 3\n sizename_ = (1 + self.getvarnamelen((j_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getvarname(j_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def test_union():\n\n valid_test_cases = [\n {\"one\": 1, \"two\": 2, \"three\": 3},\n {\"one\": 1, \"two\": \"2\", \"three\": 3},\n {\"one\": \"1\", \"two\": 2, \"three\": 3},\n {\"one\": \"1\", \"two\": \"2\", \"three\": 3},\n {\"one\": 1, \"two\": 2, \"three\": None},\n {\"one\": 1, \"two\": \"2\", \"three\": None},\n {\"one\": \"1\", \"two\": 2, \"three\": None},\n {\"one\": \"1\", \"two\": \"2\", \"three\": None},\n {\"one\": 1, \"two\": 2, \"three\": \"3\"},\n {\"one\": 1, \"two\": \"2\", \"three\": \"3\"},\n {\"one\": \"1\", \"two\": 2, \"three\": \"3\"},\n {\"one\": \"1\", \"two\": \"2\", \"three\": \"3\"},\n ]\n\n invalid_test_cases = [\n {\"one\": None, \"two\": 2, \"three\": 3},\n {\"one\": 1, \"two\": None, \"three\": 3},\n {\"one\": 1, \"two\": 2, \"three\": BasicUnionClass()},\n ]\n\n for test_case in valid_test_cases:\n instance = deserialize.deserialize(SomeUnionClass, test_case)\n assert test_case[\"one\"] == instance.one\n assert test_case[\"two\"] == instance.two\n assert test_case[\"three\"] == instance.three\n\n for test_case in invalid_test_cases:\n with pytest.raises(deserialize.DeserializeException):\n _ = deserialize.deserialize(SomeUnionClass, test_case)", "def convert_sub(sub):\n\n args = sub.args\n (ref_aa, pos, new_aa) = args\n\n parent_fn_name = sub.parent_function.name_short\n prefix_list = {\"p\": \"p.\", \"r\": \"r.\", \"g\": \"c.\"}\n prefix = prefix_list[parent_fn_name]\n\n new_var_arg = f'\"{prefix}{belspec[\"namespaces\"][\"AminoAcid\"][\"to_short\"][ref_aa.value]}{pos.value}{belspec[\"namespaces\"][\"AminoAcid\"][\"to_short\"][new_aa.value]}\"'\n\n new_var = Function(\"var\", version=version)\n\n new_var.add_argument(StrArg(new_var_arg, new_var))\n\n return new_var", "def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob", "def _get_arg_name(self, arg, variable_name):", "def parse_variable(var):\n var_info = {} \n var_info[\"name\"] = var[\"name\"]\n\n # get the variable type\n raw_base_type = var[\"typeDescriptions\"][\"typeIdentifier\"].split(\"$\")[0]\n base_type = infer_type(raw_base_type)\n\n if (base_type != None):\n var_info[\"type\"] = base_type\n else:\n return None\n\n composite_types = parse_composite_types(var, base_type)\n if (composite_types != None):\n for k, v in composite_types.items():\n var_info[k] = v\n else: \n return None\n\n return var_info", "def interpret_packet_value_pair(data):\n if data is None:\n return None, None\n packet_type = int.from_bytes(data[3:4], 'little')\n name = value = None\n if packet_type == 1:\n name = str(data[17:], 'utf8') \n value = float(ustruct.unpack('<i', data[12:16])[0])\n elif packet_type == 5:\n name = str(data[21:29], 'ascii').strip()\n value = ustruct.unpack('<d', data[12:20])[0]\n else:\n display.scroll('Packet type {} not recognised'.format(packet_type))\n return name, value", "def _var_id_sub(self, sprintf):\n id_list = map(lambda x: self.cdict[x][1], sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(id_list)", "def test_rename_visitor_type_coverage(self) -> None:\n type_sets = [\n RenameSchemaTypesVisitor.noop_types,\n RenameSchemaTypesVisitor.rename_types,\n ]\n all_types = {snake_to_camel(node_type) + \"Node\" for node_type in QUERY_DOCUMENT_KEYS}\n type_sets_union: Set[str] = set()\n for type_set in type_sets:\n self.assertTrue(type_sets_union.isdisjoint(type_set))\n type_sets_union.update(type_set)\n self.assertEqual(all_types, type_sets_union)", "def _assert_union_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self._assert_parent_types_equal(type1, type2)", "def test_union_simple():\n valid_test_cases = [\n {\"one\": 1},\n {\"one\": \"1\"},\n ]\n\n invalid_test_cases = [\n {\"one\": 3.1415},\n {\"one\": None},\n {\"one\": BasicUnionClass()},\n ]\n\n for test_case in valid_test_cases:\n instance = deserialize.deserialize(BasicUnionClass, test_case)\n assert test_case[\"one\"] == instance.one\n\n for test_case in invalid_test_cases:\n with pytest.raises(deserialize.DeserializeException):\n _ = deserialize.deserialize(BasicUnionClass, test_case)", "def _parse_subroutine_type(self, die):\n prototyped_attribute = die.attributes.get(\"DW_AT_prototyped\")\n prototyped = \"\" if prototyped_attribute is None else \\\n self._get_value_by_attribute(die, prototyped_attribute)\n subroutine_type = Subroutine(prototyped)\n setattr(subroutine_type, \"die\", die)\n self._subroutine_type_by_offset[die.offset] = subroutine_type", "def __init__(self, name: str, owner_subtype_of: Sequence[Any] = ()):\n self.name = name\n self.subtype_of = tuple(owner_subtype_of)", "def testProtobufUnrecognizedField(self):\n decoded = protobuf.decode_message(test_util.OptionalMessage,\n self.unexpected_tag_message)\n self.assertEquals(1, len(decoded.all_unrecognized_fields()))\n self.assertEquals(15, decoded.all_unrecognized_fields()[0])\n self.assertEquals((5, messages.Variant.INT64),\n decoded.get_unrecognized_field_info(15))", "def filterToName(type: int) -> unicode:\n ...", "def _dlu_from_variable(variable):\n return variable.name.split('_')[0]", "def test_members_in_list_tuple_or_dict(self):\r\n\r\n def local_test(x,y):\r\n m1=Module()\r\n m1.x=x()\r\n m1.y=y()\r\n m1.emtpylist = []\r\n m1.lx=[x()]#cast Variable]\r\n m1.ly=[y()]\r\n m1.llx=[[x()]]#cast Variable]\r\n m1.lly=[[y()]]\r\n m1.ltx=[(x(),)]\r\n m1.lty=[(y(),)]\r\n m1.ldx=[{\"x\":x()}]\r\n m1.ldy=[{\"y\":y()}]\r\n m1.tx=(x(),)\r\n m1.ty=(y(),)\r\n m1.tlx=[(x(),)]\r\n m1.tly=[(y(),)]\r\n m1.ttx=((x(),),)\r\n m1.tty=((y(),),)\r\n m1.tdx=({\"x\":x()},)\r\n m1.tdy=({\"y\":y()},)\r\n m1.dx={\"x\":x()}\r\n m1.dy={\"y\":y()}\r\n m1.dlx={\"x\":[x()]}\r\n m1.dly={\"y\":[y()]}\r\n m1.dtx={\"x\":(x(),)}\r\n m1.dty={\"y\":(y(),)}\r\n m1.ddx={\"x\":{\"x\":x()}}\r\n m1.ddy={\"y\":{\"y\":y()}}\r\n\r\n assert isinstance(m1.x,(gof.Variable))\r\n assert isinstance(m1.y,(gof.Variable))\r\n for i, obj in enumerate([\r\n m1.lx[0], #0\r\n m1.llx[0][0],\r\n m1.ltx[0][0],\r\n m1.ldx[0]['x'],\r\n m1.lty[0][0],#5\r\n m1.ldy[0]['y'],\r\n m1.ly[0],\r\n m1.lly[0][0],\r\n m1.tx[0], #8\r\n m1.ty[0], m1.tlx[0][0],\r\n m1.tly[0][0], m1.ttx[0][0], m1.tty[0][0], m1.tdx[0]['x'],\r\n m1.tdy[0]['y'], m1.dx['x'],\r\n m1.dy['y'], m1.dlx['x'][0], m1.dly['y'][0],\r\n m1.dtx['x'][0], m1.dty['y'][0], m1.ddx['x']['x'],\r\n m1.ddy['y']['y']]):\r\n assert isinstance(obj,(gof.Variable))\r\n\r\n\r\n inst=m1.make()\r\n\r\n def get_l():\r\n return [inst.lx, inst.ly, inst.tx, inst.ty, inst.dx, inst.dy, inst.llx, inst.lly, inst.ltx, inst.lty, inst.ldx, inst.ldy, inst.tlx, inst.tly, inst.ttx, inst.tty, inst.tdx, inst.tdy, inst.dly, inst.dlx, inst.dty, inst.dtx, inst.ddy, inst.ddx]\r\n def get_l2():\r\n# return [inst.lx[0], inst.ly[0], inst.tx[0], inst.ty[0], inst.dx['x'], inst.dy['y'], inst.llx[0][0], inst.lly[0][0], inst.ltx[0][0], inst.lty[0][0], inst.ldx[0]['x'], inst.ldy[0]['y'], inst.tlx[0][0], inst.tly[0][0], inst.ttx[0][0], inst.tty[0][0], inst.tdx, inst.tdy, inst.dly, inst.dlx, inst.dty, inst.dtx, inst.ddy, inst.ddx]\r\n return [inst.lx, inst.ly, inst.tx, inst.ty, inst.llx[0], inst.lly[0], inst.ltx[0], inst.lty[0], inst.ldx[0], inst.ldy[0], inst.tlx[0], inst.tly[0], inst.ttx[0], inst.tty[0], inst.tdx[0], inst.tdy[0], inst.dly['y'], inst.dlx['x'], inst.dty['y'], inst.dtx['x']]#, inst.ddy['y'], inst.ddx['x']]\r\n\r\n\r\n #test that we can access the data\r\n inst.x\r\n inst.y\r\n for i in get_l():\r\n assert i\r\n\r\n #test that we can set a value to the data the get this value\r\n if not isinstance(m1.x, gof.Constant):\r\n inst.x=-1\r\n inst.y=-2\r\n inst.ldx[0]['x']=-3\r\n inst.ldy[0]['y']=-4\r\n inst.tdx[0]['x']=-5\r\n inst.tdy[0]['y']=-6\r\n inst.ddx['x']['x']=-7\r\n inst.ddy['y']['y']=-8\r\n for i,j in zip(get_l2(),range(len(get_l2()))):\r\n i[0]=j\r\n assert inst.x==-1\r\n assert inst.y==-2\r\n assert inst.ldx[0]['x']==-3\r\n assert inst.ldy[0]['y']==-4\r\n assert inst.tdx[0]['x']==-5\r\n assert inst.tdy[0]['y']==-6\r\n assert inst.ddx['x']['x']==-7\r\n assert inst.ddy['y']['y']==-8\r\n for i,j in zip(get_l2(),range(len(get_l2()))):\r\n assert i[0]==j\r\n\r\n local_test(lambda:T.dscalar(),lambda:T.dscalar())\r\n local_test(lambda:T.constant(1),lambda:T.constant(2))\r\n local_test(lambda:T.constant(1),lambda:T.constant(2))", "def _read_mutation_input_parameter(self, build_context: CRUDBuildContext) -> str:\r\n return f\"{stringcase.camelcase(build_context.django_type.__name__)}Match\"", "def gen_dump_code(var_name: str, ty: type) -> str:\n\n tys = type_str(ty)\n if tys.startswith(\"Set[\"):\n return \"print(json.dumps({k : 1 for k in \" + var_name + \"})) # write sets as dictionaries\\n\"\n return f\"print(json.dumps({var_name}))\\n\"", "def get_data_tuple_names(self):\n return (('test_number',))", "def test_get_call_name1(self):\n tree = ast.parse(\"a.b.c.d(x,y)\").body[0].value\n name = b_utils.get_call_name(tree, {})\n self.assertEqual(\"a.b.c.d\", name)", "def ProtocolType(self) -> ProtocolType:", "def _find_vars(self, varnames, unique=False, evars=False, all_ok=False, \n empty_ok=False, single=False):\n if isinstance(varnames, str):\n varnames = (varnames,)\n elif not isinstance(varnames, collections.Iterable):\n raise TypeError(\"variable names should be str or iterable of str\")\n \n # first split into list of single abbrevs per str\n split_names = []\n for name in varnames:\n if not isinstance(name, str):\n raise TypeError(\"must specify variables as string(s)\")\n split_names += name.split()\n nnames = len(split_names)\n \n # check for _all, check for proper usage, and return copy of varlist\n # if evars==False or ['_dta'] + varlist if evars==True\n all_specified = False\n if '_all' in split_names:\n if not all_ok:\n raise ValueError(\"\\\"_all\\\" not allowed in this context\")\n elif not nnames == 1:\n raise ValueError(\n \"\\\"_all\\\" may not be combined with other names\")\n all_specified = True\n all_names = (['_dta'] if evars else []) + list(self._varlist)\n nnames = len(all_names)\n \n # check that more than 0 names specified if empty_ok==False, and\n # ignore extras (with message) if single==True\n if not empty_ok and nnames == 0:\n raise ValueError(\"no variables specified\")\n if single and nnames > 1:\n if not self._quiet:\n smcl = \"{err}\" if IN_STATA else \"\"\n msg = smcl + \"only one {}varname allowed; ignoring the rest\"\n print(msg.format('e' if evars else ''))\n split_names = split_names[:1]\n \n # if all_specified, return aleady-constructed all_names\n if all_specified:\n return all_names\n \n # Create match list of [abbrev, match1, match2, ...].\n # The loops below identify when exact varname given, but that varname\n # happens to be abbreviation of other varnames.\n varlist = self._varlist\n matches = []\n append = matches.append\n if evars:\n for name in split_names:\n if name == \"_dta\":\n append([name, name])\n else:\n match = [var for var in varlist if var.startswith(name)]\n append([name, name] if name in match else [name] + match)\n else:\n for name in split_names:\n match = [var for var in varlist if var.startswith(name)]\n append([name, name] if name in match else [name] + match)\n \n # abbreviation was a good, unambiguous abbreviation if exactly\n # one match found, i.e. if the corresponding entry in -matches- \n # is [abbrev, match1]\n if not all(len(m) == 2 for m in matches):\n # there were unmatched or ambiguous abbreviations\n zeros = \" \".join([m[0] for m in matches if len(m) == 1])\n twos = \" \".join([m[0] for m in matches if len(m) >= 3])\n if zeros != \"\" and twos != \"\":\n msg = \"no variables found for {}; multiple found for {}\"\n raise ValueError(msg.format(zeros, twos))\n if zeros != \"\":\n raise ValueError(\n \"no variables found for {}\".format(zeros, twos))\n # if getting here, twos != \"\" and zeros == \"\"\n raise ValueError(\"multiple variables found for '{}'\".format(twos))\n \n if not unique:\n return [m[1] for m in matches]\n seen = set()\n # if name has not been encountered, add to list and set of encountered\n return [m[1] for m in matches \n if m[1] not in seen and not seen.add(m[1])]", "def make_get_python_out_struct(self):\n res = \\\n\"\"\"DLLEXPORT ___madz_LANG_python_TYPE_* {}_get_out_struct(){{\n return &___madz_LANG_python_OUTPUT;\n}}\n\n\"\"\"\n return res.format(self.python_mangle)", "def _encode_union_old(data_type, obj, alias_validators, for_msgpack):\n if obj._tag is None:\n raise bv.ValidationError('no tag set')\n field_data_type = data_type.definition._tagmap[obj._tag]\n if field_data_type is None:\n return obj._tag\n else:\n if (isinstance(field_data_type, bv.Void) or\n (isinstance(field_data_type, bv.Nullable) and\n obj._value is None)):\n return obj._tag\n else:\n try:\n encoded_val = _json_compat_obj_encode_helper(\n field_data_type, obj._value, alias_validators, True,\n for_msgpack)\n except bv.ValidationError as e:\n e.add_parent(obj._tag)\n raise\n else:\n return {obj._tag: encoded_val}", "def parse_mutation_type(raw_schema: Dict) -> Union[str, None]:\n return Schema.parse_operation_type(raw_schema, \"mutationType\")", "def test_get_call_name2(self):\n tree = ast.parse(\"a.b.c.d(x,y)\").body[0].value\n\n name = b_utils.get_call_name(tree, {\"a\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y.b.c.d\", name)\n\n name = b_utils.get_call_name(tree, {\"a.b\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y.c.d\", name)\n\n name = b_utils.get_call_name(tree, {\"a.b.c.d\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y\", name)", "def _get_su_variable(grouped_data):\n var_name = None\n reference_datasets = None\n for (dataset_name, datasets) in grouped_data.items():\n if len(datasets) != 1:\n raise ValueError(\n f\"Expected exactly one file for dataset '{dataset_name}', got \"\n f\"{len(datasets):d}\")\n new_var_name = datasets[0]['short_name']\n new_reference_datasets = datasets[0].get('reference_dataset')\n if var_name is None:\n var_name = new_var_name\n else:\n if new_var_name != var_name:\n raise ValueError(\n f\"Expected identical 'short_name' for all datasets of Su \"\n f\"et al. (2014) constraint, got '{var_name}' and \"\n f\"'{new_var_name}'\")\n if reference_datasets is None:\n reference_datasets = new_reference_datasets\n else:\n if new_reference_datasets != reference_datasets:\n raise ValueError(\n f\"Expected identical 'reference_dataset' for all datasets \"\n f\"of Su et al. (2014) constraint, got \"\n f\"'{reference_datasets}' and '{new_reference_datasets}'\")\n if reference_datasets is None:\n raise ValueError(f\"'reference_dataset' not given for variable \"\n f\"'{var_name}'\")\n logger.info(\n \"Found variable '%s' for Su et al. (2014) constraint\", var_name)\n logger.info(\"Found reference datasets '%s'\", reference_datasets)\n return (var_name, reference_datasets)", "def test_from_name(self, testdata: TestData) -> None:\n for record in testdata['observation_type']:\n assert ObservationType.from_name(record['name']).name == record['name']", "def getOqiVarUnit( self, name ):\n\n if not self.oqiVarNames:\n self.getOqiVarNames( )\n\n if name not in self.oqiVarNames:\n for k, v in self.oqiVarNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"oqi\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit", "def merge_proto_types(tp1, tp2):\n if tp1 is None and tp2 is None:\n return _none_proto_type\n if tp1 is None:\n return tp2\n if tp2 is None:\n return tp1\n if tp1 == _none_proto_type and tp2 == _none_proto_type:\n return _none_proto_type\n if tp2 == _none_proto_type:\n return tp1\n if tp1 == _none_proto_type:\n return tp2\n if tp1 == tp2:\n return tp1\n if tp1.HasField(\"basic_type\") and tp2.HasField(\"basic_type\"):\n assert tp1.basic_type == tp2.basic_type, (tp1, tp2)\n return pb.SQLType(\n basic_type = tp1.basic_type,\n nullable = tp1.nullable or tp2.nullable)\n if tp1.HasField('array_type'):\n assert tp2.HasField('array_type'), (tp1, tp2)\n return pb.SQLType(\n array_type=merge_proto_types(tp1.array_type, tp2.array_type),\n nullable=tp1.nullable or tp2.nullable)\n if tp1.HasField('struct_type'):\n assert tp2.HasField('struct_type'), (tp1, tp2)\n l1 = tp1.struct_type.fields\n l2 = tp2.struct_type.fields\n assert len(l1) == len(l2), (l1, l2)\n l = []\n for (f1, f2) in zip(l1, l2):\n assert f1.field_name == f2.field_name, (f1, f2)\n l.append(pb.StructField(field_name=f1.field_name,\n field_type=merge_proto_types(f1.field_type, f2.field_type)))\n return pb.SQLType(\n struct_type=pb.StructType(fields=l),\n nullable=tp1.nullable or tp2.nullable)\n raise Exception(\"Cannot merge incompatible types %s and %s\" % (tp1, tp2))", "def test_translate_struct_tuple(self):\n root = netapp_api.NaElement('root')\n child = ('e1', 'e2')\n root.translate_struct(child)\n self.assertEqual(len(root.get_children()), 2)\n self.assertIsNone(root.get_child_content('e1'))\n self.assertIsNone(root.get_child_content('e2'))", "def test_conversion_modifiers() -> None:\n animal, name = (\"eel\", \"Bob\")\n assert f\"The {animal!s}'s name is {name!r}\" == \"The eel's name is 'Bob'\"", "def split_Typename(name):\n if name.startswith('Vk') or name.startswith('PFN_vk'):\n return split_CamelCase(name)\n return [name]", "def _tuple_to_cpppo_tag_multiple(cls, what, values=None, serializer=':'):\n tag_string = ''\n\n if values == None:\n for i in range(len(what)):\n tag_string += what[i][0] + EnipProtocol._SERIALIZER + str(what[i][1]) + \" \"\n else:\n for i in range(len(what)):\n tag_string += what[i][0] + EnipProtocol._SERIALIZER + str(what[i][1]) + \"=\" + str(values[i]) + \" \"\n\n return tag_string", "def test_translate_struct_tuple(self):\n root = netapp_api.NaElement('root')\n child = ('e1', 'e2')\n root.translate_struct(child)\n self.assertEqual(2, len(root.get_children()))\n self.assertIsNone(root.get_child_content('e1'))\n self.assertIsNone(root.get_child_content('e2'))", "def infer_union_arg(payload, data_cls):\n\n hints = getattr(data_cls, _DREALM_HINTS, None)\n if hints is None:\n hints = build_union_hints(data_cls)\n setattr(data_cls, \"__drealm_hints__\", hints)\n\n simp_types, dobj_field_groups = hints\n\n if isinstance(payload, dict): # maybe a dataclass object\n for fields, owners in dobj_field_groups:\n if any(f in payload for f in fields):\n return owners[0]\n\n if isinstance(payload, dict):\n dict_generic_type = simp_types[1]\n if dict_generic_type is not None:\n alias_origin = get_origin(dict_generic_type)\n if issubclass(alias_origin, dict):\n return dict_generic_type\n\n elif isinstance(payload, (list, tuple)):\n list_generic_type = simp_types[2]\n if list_generic_type is not None:\n alias_origin = get_origin(list_generic_type)\n if issubclass(alias_origin, (list, tuple)):\n return list_generic_type\n else:\n for data_type in simp_types[0]:\n if isinstance(payload, data_type):\n return data_type\n\n raise IncompliantError(f\"The payload's type '{type(payload)}' \"\n f\"is incompliant with {data_cls}\")", "def _tuple_to_cpppo_tag(cls, what, value=None, serializer=':'):\n\n tag_string = ''\n tag_string += str(what[0])\n\n if len(what) > 1:\n for field in what[1:]:\n tag_string += EnipProtocol._SERIALIZER\n tag_string += str(field)\n if value is not None:\n if type(value) is str:\n # TODO: add support for SSTRING tags\n # ''' enip_client -a 192.168.1.20 'README:2[0]=(SSTRING)\"string\"' '''\n pass\n tag_string += '='\n tag_string += str(value)\n # print 'DEBUG _tuple_to_cpppo_tag tag_string: ', tag_string\n\n return tag_string", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def testtuple ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTup1, fracTup2, tupleValue in self.knownTupleValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracTup1 ) )\r\n\t\t\tfrac2 = eval ( r.sub ( 'frac.frac', fracTup2 ) )\r\n\t\t\tself.assertEqual ( frac1.tuple (), frac2.tuple () )\r\n\t\t\tself.assertEqual ( frac1.tuple () [0], tupleValue [0] )\r\n\t\t\tself.assertEqual ( frac1.tuple () [1], tupleValue [1] )", "def test_tb12_strings():\n\n err = _do_test_raw(\"\"\"\n var f = \"editImageMapButton.label\";\n var x = \"haveSmtp1.suffix2\";\n \"\"\", versions=TB12_DEFINITION)\n assert err.failed()\n assert err.warnings\n assert err.notices\n assert err.compat_summary[\"errors\"]", "def test_by_variable():\n pass", "def get_variable_full_name(var):\n if var._save_slice_info:\n return var._save_slice_info.full_name\n else:\n return var.op.name", "def _get_python_prop_type(prop_type: Type[Variable]) -> str:\n if prop_type is VariableBool:\n return \"bool\"\n if prop_type in (VariableInt, VariableUInt):\n return \"int\"\n if prop_type is VariableFloat:\n return \"float\"\n if prop_type is VariableString:\n return \"bytes\"\n if prop_type is VariableVec2:\n return \"(float, float)\"\n if prop_type is VariableStruct:\n return \"dict[str, Variable]\"\n if prop_type is VariableArray:\n return \"MutableSequence\"\n raise TypeError(\"unexpected variable type\")", "def getVar(tree):\n if(tree.data == \"string_expression\"):\n if(tree.children[0].data == \"string\"):\n return tree.children[0].children[0]\n elif(tree.children[0].data == \"variable\"):\n return getValue(tree.children[0].children[0])\n elif(tree.children[0].data == \"string_expression\"):\n # if the child is a string expression apply getVar again on the child\n if(len(tree.children)== 2):\n return getVar(tree.children[0])+getVar(tree.children[1])\n return getVar(tree.children[0])\n elif(tree.data == \"integer\"):\n return evalInteger(tree) \n \n elif(tree.data == \"string_list\"):\n return getStringInterior(tree.children[0],[])\n return \"ERROR\"", "def test_named_tuple_return_str_push_mode():\n def namedtuple_container(param1: str, param2: float, param3: bool) \\\n -> NamedTuple('Output', [('val1', str), ('val2', int), ('val3', bool)]):\n import collections\n Output = collections.namedtuple('Output', ['val1', 'val2', 'val3'])\n return Output('1', 2, True)\n func_name = inspect.currentframe().f_code.co_name\n container = components.create_component_from_func(namedtuple_container, base_image=config.BASE_IMAGE)\n gen_code = Caching._code_wrapper_generator(user_kwargs=container.component_spec.inputs,\n disdat_kwargs=disdat_kwargs,\n input_artifact_list=container.component_spec.outputs,\n core_code=core_code_str,\n return_signature='''str''',\n generated_func_name='generated_' + func_name)\n user_params = {'param1': '1', 'param2': 2.0, 'param3': True}\n artifacts = {'reserve_disdat_{}'.format(key): '' for key in ['val1', 'val2', 'val3']}\n signature = inspect.signature(gen_code)\n # check signature\n check_signature(signature, container, push_mode=True)\n # check functionality\n user_params.update(artifacts)\n result = json.loads(gen_code(**user_params, **disdat_params))\n assert result['user_kwargs'] == user_params\n assert result['disdat_kwargs'] == disdat_params", "def type(name):", "def decode_tuple1(as_bytes: typing.List[int]) -> tuple:\n raise NotImplementedError()", "def _get_variable_name(param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def unpack_str_from(buffer: bytes, offset: int = 0) -> tuple[str, int]:\n length, length_size = _unpack_length_from(buffer, offset=offset)\n offset += length_size\n str_bytes, *_ = struct.unpack_from(f'!{length}s', buffer, offset=offset)\n value = str_bytes.decode('utf-8')\n return value, length_size + length", "def _encode_union(data_type, obj, alias_validators, for_msgpack):\n if obj._tag is None:\n raise bv.ValidationError('no tag set')\n field_data_type = data_type.definition._tagmap[obj._tag]\n\n if (isinstance(field_data_type, bv.Void) or\n (isinstance(field_data_type, bv.Nullable) and obj._value is None)):\n return {'.tag': obj._tag}\n else:\n try:\n encoded_val = _json_compat_obj_encode_helper(\n field_data_type, obj._value, alias_validators, False,\n for_msgpack)\n except bv.ValidationError as e:\n e.add_parent(obj._tag)\n raise\n else:\n if isinstance(field_data_type, bv.Nullable):\n # We've already checked for the null case above, so now we're\n # only interested in what the wrapped validator is.\n field_data_type = field_data_type.validator\n if (isinstance(field_data_type, bv.Struct) and\n not isinstance(field_data_type, bv.StructTree)):\n d = collections.OrderedDict()\n d['.tag'] = obj._tag\n d.update(encoded_val)\n return d\n else:\n return collections.OrderedDict([\n ('.tag', obj._tag),\n (obj._tag, encoded_val)])", "def test_proto_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob, from_proto=True)\n assert obj == obj2", "def UnionFromMojom(self, union, mojom_type):\n assert mojom_type.tag == mojom_types_mojom.UserDefinedType.Tags.union_type\n mojom_union = mojom_type.union_type\n self.PopulateUserDefinedType(union, mojom_union)\n union.fields = [self.UnionFieldFromMojom(f) for f in mojom_union.fields]", "def fixture_union(name, # type: str\n fixtures, # type: Iterable[Union[str, Callable]]\n scope=\"function\", # type: str\n idstyle='compact', # type: Optional[Union[str, Callable]]\n ids=None, # type: Union[Callable, Iterable[str]]\n unpack_into=None, # type: Iterable[str]\n autouse=False, # type: bool\n hook=None, # type: Callable[[Callable], Callable]\n **kwargs):\n # grab the caller module, so that we can later create the fixture directly inside it\n caller_module = get_caller_module()\n\n # test the `fixtures` argument to avoid common mistakes\n if not isinstance(fixtures, (tuple, set, list)):\n raise TypeError(\"fixture_union: the `fixtures` argument should be a tuple, set or list\")\n\n # unpack the pytest.param marks\n custom_pids, p_marks, fixtures = extract_parameterset_info((name, ), fixtures)\n\n # get all required fixture names\n f_names = [get_fixture_name(f) for f in fixtures]\n\n # create all alternatives and reapply the marks on them\n fix_alternatives = []\n f_names_args = []\n for _idx, (_name, _id, _mark) in enumerate(zip(f_names, custom_pids, p_marks)):\n # create the alternative object\n alternative = UnionFixtureAlternative(union_name=name, alternative_name=_name, alternative_index=_idx)\n\n # remove duplicates in the fixture arguments: each is required only once by the union fixture to create\n if _name in f_names_args:\n warn(\"Creating a fixture union %r where two alternatives are the same fixture %r.\" % (name, _name))\n else:\n f_names_args.append(_name)\n\n # reapply the marks\n if _id is not None or (_mark or ()) != ():\n alternative = pytest.param(alternative, id=_id, marks=_mark or ())\n fix_alternatives.append(alternative)\n\n union_fix = _fixture_union(caller_module, name,\n fix_alternatives=fix_alternatives, unique_fix_alt_names=f_names_args,\n scope=scope, idstyle=idstyle, ids=ids, autouse=autouse, hook=hook, **kwargs)\n\n # if unpacking is requested, do it here\n if unpack_into is not None:\n # Note: we can't expose the `in_cls` argument as we would not be able to output both the union and the\n # unpacked fixtures. However there is a simple workaround for this scenario of unpacking a union inside a class:\n # call unpack_fixture separately.\n _make_unpack_fixture(caller_module, argnames=unpack_into, fixture=name, hook=hook, in_cls=False)\n\n return union_fix", "def parse_hint(hint: Type) -> Tuple[Type, Optional[List]]:\n if hasattr(hint, \"__origin__\"):\n # This is a type hint (eg typing.Union)\n # Filter out TypeVars such as KT & VT_co (they generally\n # indicate that no explicit hint was given)\n hint_args = [a for a in getattr(hint, \"__args__\", []) if not isinstance(a, TypeVar)]\n return hint.__origin__, hint_args or None\n else:\n # This is something other than a type hint\n # (e.g. an int or datetime)\n return hint, None", "def parse_variable(self, variable): # pragma: no cover\n raise NotImplementedError('Implemented in child class')", "def testPickle(self):\n global MyEnum\n global AnotherMessage\n global MyMessage\n\n class MyEnum(messages.Enum):\n val1 = 1\n val2 = 2\n\n class AnotherMessage(messages.Message):\n string = messages.StringField(1, repeated=True)\n\n class MyMessage(messages.Message):\n field1 = messages.IntegerField(1)\n field2 = messages.EnumField(MyEnum, 2)\n field3 = messages.MessageField(AnotherMessage, 3)\n\n message = MyMessage(field1=1, field2=MyEnum.val2,\n field3=AnotherMessage(string=['a', 'b', 'c']))\n message.set_unrecognized_field(\n 'exists', 'value', messages.Variant.STRING)\n message.set_unrecognized_field('repeated', ['list', 0, ('test',)],\n messages.Variant.STRING)\n unpickled = pickle.loads(pickle.dumps(message))\n self.assertEquals(message, unpickled)\n self.assertTrue(AnotherMessage.string is unpickled.field3.string.field)\n self.assertTrue('exists' in message.all_unrecognized_fields())\n self.assertEquals(('value', messages.Variant.STRING),\n message.get_unrecognized_field_info('exists'))\n self.assertEquals((['list', 0, ('test',)], messages.Variant.STRING),\n message.get_unrecognized_field_info('repeated'))", "def decode_message(self, buf, message_type=None):\n self.debugStack = 0\n value, typedef, _ = self._decode_message(\"\", buf, message_type)\n return value, typedef", "def make_union(self, *args, **kwargs): # real signature unknown\n pass", "def cpp_type_to_python(self, ot: str):\n t = ot\n t = remove_cvref(t)\n t = self._remove_variable_type_prefix(t)\n try:\n return cpp_base_type_to_python(t)\n except KeyError:\n pass\n if is_function_pointer_type(t):\n func = function_pointer_type_info(t)\n args = \",\".join([self.cpp_type_to_python(arg.type) for arg in func.args])\n return f'Callable[[{args}], {self.cpp_type_to_python(func.ret_type)}]'\n\n if is_function_type(t):\n func = function_type_info(t)\n args = \",\".join([self.cpp_type_to_python(arg.type) for arg in func.args])\n return f'Callable[[{args}], {self.cpp_type_to_python(func.ret_type)}]'\n\n if is_pointer_type(t):\n cpp_base = self.resolve_to_basic_type_remove_const(pointer_base(t))\n if is_pointer_type(cpp_base) or is_array_type(cpp_base):\n return f'\"level 2 pointer:{t}\"' # un-convertible: level 2 pointer\n if cpp_base in ARRAY_BASES:\n return ARRAY_BASES[cpp_base]\n return self.cpp_type_to_python(cpp_base)\n if is_array_type(t):\n b = array_base(t)\n if b in ARRAY_BASES: # special case: string array\n return ARRAY_BASES[b]\n base = self.cpp_type_to_python(b)\n return f'List[{base}]'\n if is_tuple_type(t):\n es = tuple_elements(t)\n bases = [self.cpp_type_to_python(i) for i in es]\n bases_str = \",\".join(bases)\n return f'Tuple[{bases_str}]'\n\n # check classes\n objects = self.objects\n if t in objects:\n o = objects[t]\n if isinstance(o, GeneratorClass) or isinstance(o, GeneratorEnum):\n return t.replace(\"::\", \".\").strip(\" .\") # todo fix this\n if isinstance(o, GeneratorTypedef):\n return self.cpp_type_to_python(o.target)\n\n if t.startswith(\"(anonymous\"):\n return f'\"{t}\"'\n\n # this means this is\n logger.warning(\"%s might be an internal symbol, failed to resolve to basic type\", t)\n return t", "def ShapeVar(name):\n return TypeVar(name, kind=Kind.ShapeVar)" ]
[ "0.7497985", "0.54891527", "0.5454183", "0.5442474", "0.5129124", "0.5115415", "0.51112336", "0.5045163", "0.5033024", "0.5018397", "0.49975044", "0.49971396", "0.49895564", "0.4978763", "0.4951379", "0.49307013", "0.49243486", "0.48797363", "0.48714188", "0.485954", "0.48464656", "0.4840963", "0.48244455", "0.4813817", "0.48101464", "0.47849223", "0.46974272", "0.46804968", "0.46798742", "0.46779147", "0.4664157", "0.46636707", "0.46617192", "0.46586013", "0.4653217", "0.46427134", "0.46365038", "0.46346483", "0.46301013", "0.4629794", "0.4625485", "0.46189365", "0.4614211", "0.4587586", "0.4578775", "0.45732346", "0.45683756", "0.4561455", "0.45568773", "0.45568722", "0.45565236", "0.45542288", "0.45512077", "0.4549421", "0.4532007", "0.45196804", "0.45138666", "0.4503457", "0.44939056", "0.4493088", "0.44928095", "0.44899502", "0.4482687", "0.44821805", "0.44780824", "0.4471034", "0.44680282", "0.4465608", "0.44632494", "0.44626254", "0.44613037", "0.44589338", "0.44579285", "0.4455991", "0.4455991", "0.4455991", "0.4455991", "0.4455991", "0.4455734", "0.44478968", "0.44455746", "0.44440028", "0.4442683", "0.44396532", "0.44395313", "0.4437758", "0.4436612", "0.4433612", "0.44335845", "0.4431472", "0.44146043", "0.44145945", "0.44114923", "0.44112453", "0.43994427", "0.43918014", "0.43858624", "0.4382344", "0.4381004", "0.43803155" ]
0.8391995
0
Test _includes_custom_type method positive result.
def test__includes_custom_type_positive(self, *mocks): content_type = "Union[str]" result = self.protocol_generator._includes_custom_type(content_type) self.assertTrue(result) content_type = "Optional[str]" result = self.protocol_generator._includes_custom_type(content_type) self.assertTrue(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _includes_custom_type(content_type: str) -> bool:\n\n if content_type.startswith(\"Optional\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n result = _includes_custom_type(sub_type)\n elif content_type.startswith(\"Union\"):\n sub_types = _get_sub_types_of_compositional_types(content_type)\n result = False\n for sub_type in sub_types:\n if _includes_custom_type(sub_type):\n result = True\n break\n elif (\n content_type.startswith(\"FrozenSet\")\n or content_type.startswith(\"Tuple\")\n or content_type.startswith(\"Dict\")\n or content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys()\n ):\n result = False\n else:\n result = True\n return result", "def testTheType(self, theTestType):\n \n pass", "def test_get_types(self):\n pass", "def has_type(self, item_type):\n raise NotImplementedError()", "def is_custom(self):\n return self._is_custom", "def check_type(self):\n return True", "def test(types, _):\n return 'Date' in types and 'Postal Code' in types", "def should_be_included(self):\n return True", "def is_required_data(self, typename):\n return typename in self.required_data_products", "def can_contain(self):\n return False", "def validatePredefinedType(self, type: int) -> bool:\n ...", "def has_custom(self, phrase_string: str, custom_property: str) -> bool:\n print('CUSTOM:', self.custom)\n return phrase_string in self.custom and custom_property in self.custom[phrase_string]", "def test_expected_custom_types(self):\n handler = self.create_handler(\n r'json_list=[\"a\", \"b\"]&'\n r'json_dict={\"a\": 1}&'\n r'datetime=2007-03-04T21:08:12Z&'\n r'date=2007-03-04')\n param_types = {\n 'json_list': 'json',\n 'json_dict': 'json',\n 'datetime': 'datetime',\n 'date': 'date',\n }\n expected_params = {\n u'json_list': [u'a', u'b'],\n u'json_dict': {u'a': 1},\n u'datetime': datetime.datetime(2007, 3, 4, 21, 8, 12),\n u'date': datetime.date(2007, 3, 4),\n }\n self.assertEqual(handler.get_params(param_types), expected_params)", "def is_required_data(self, typename):\r\n return typename in self.required_data_products", "def test_sample_type(self):\r\n \r\n self.assertEqual(self.test_sample.sampleType, 'TUMOUR')", "def tests_ti_document_get_includes(self, request: FixtureRequest):\n super().group_get_includes(request)", "def hasCustomEffect(self, type_):\n for effect in getHandle().effects:\n if CraftPotionUtil == effect.getMobEffect(, type_):\n return True\n return False", "def testContentTypes_Extended(self):\n self.mox.ReplayAll()\n\n mapper = service_handlers.RPCMapper(['GET', 'POST'],\n 'my-content-type',\n self.protocol,\n content_types=['a', 'b'])\n\n self.assertEquals(frozenset(['GET', 'POST']), mapper.http_methods)\n self.assertEquals('my-content-type', mapper.default_content_type)\n self.assertEquals(frozenset(['my-content-type', 'a', 'b']),\n mapper.content_types)\n\n self.mox.VerifyAll()", "def test_type_code(self):\n inv_search = \"collection:review\"\n spi_search = \"find tc review\"\n self._compare_searches(inv_search, spi_search)\n inv_search = \"collection:review\"\n spi_search = \"find ps review\"\n self._compare_searches(inv_search, spi_search)\n inv_search = \"collection:review\"\n spi_search = \"find scl review\"\n self._compare_searches(inv_search, spi_search)", "def test_allow(self, incl, value):\n i = include(*incl)\n assert i(fields(C).a, value) is True", "def test_linked_list_includes_exists():\n assert LinkedList.includes", "def test_publish_with_custom_fields(self):\n class RichField(BaseTextAreaField):\n field_id = 'rich_field'\n\n class SpecialRichField(BaseTextAreaField):\n # Exercise special case field name 'text'\n field_id = 'text'\n\n class BasicField(BaseEditableField):\n field_id = 'basic_field'\n\n fieldset = get_review_request_fieldset('main')\n fieldset.add_field(RichField)\n fieldset.add_field(SpecialRichField)\n fieldset.add_field(BasicField)\n\n try:\n draft = self._get_draft()\n review_request = draft.review_request\n\n draft.description = 'New description'\n draft.extra_data['rich_field'] = '**Rich custom text**'\n draft.extra_data['rich_field_text_type'] = 'markdown'\n draft.extra_data['text'] = 'Nothing special'\n draft.extra_data['text_type'] = 'plain'\n draft.extra_data['basic_field'] = 'Basic text'\n draft.target_people.add(review_request.submitter)\n\n draft.publish()\n\n self.assertNotIn('description_text_type',\n review_request.extra_data)\n self.assertIn('rich_field', review_request.extra_data)\n self.assertIn('rich_field_text_type', review_request.extra_data)\n self.assertIn('text', review_request.extra_data)\n self.assertIn('text_type', review_request.extra_data)\n self.assertIn('basic_field', review_request.extra_data)\n self.assertNotIn('basic_field_text_type',\n review_request.extra_data)\n\n self.assertEqual(review_request.description, draft.description)\n self.assertEqual(review_request.extra_data['rich_field'],\n draft.extra_data['rich_field'])\n self.assertEqual(review_request.extra_data['rich_field_text_type'],\n draft.extra_data['rich_field_text_type'])\n self.assertEqual(review_request.extra_data['text'],\n draft.extra_data['text'])\n self.assertEqual(review_request.extra_data['text_type'],\n draft.extra_data['text_type'])\n self.assertEqual(review_request.extra_data['basic_field'],\n draft.extra_data['basic_field'])\n finally:\n fieldset.remove_field(RichField)\n fieldset.remove_field(SpecialRichField)\n fieldset.remove_field(BasicField)", "def custom_added(self, key: _K) -> bool:\n return key in self._customs", "def test_single_named_link_with_custom_type():\n pass", "def use_types( self ) :\n return self._use_types", "def __contains__(self, *args, **kwargs): # real signature unknown\n pass", "def __contains__(self, *args, **kwargs): # real signature unknown\n pass", "def __contains__(self, *args, **kwargs): # real signature unknown\n pass", "def __contains__(self, *args, **kwargs): # real signature unknown\n pass", "def __contains__(self, *args, **kwargs): # real signature unknown\n pass", "def isrequired(self, typename):\n return typename in self.required_products", "def __contains__(self, *args, **kwargs):\n ...", "def __contains__ (self, item):\n return False", "def test_boolean_in_serializer() -> None:\n assert cv.custom_serializer(cv.boolean) == {\n \"type\": \"boolean\",\n }", "def test_is_primitive_returns_true(self):\n for primitive_type in (535, \"test_string\", False, 12.45, u\"test_unicode_stirng\"):\n is_primitive = self.tested_class._is_primitive(primitive_type)\n self.assertTrue(is_primitive)", "def tests_ti_file_get_includes(self, request: FixtureRequest):\n super().indicator_get_includes(request)", "def test_types(self):\n field_types = (\n ('clip_id', int), ('created_at', datetime.datetime),\n ('description', str), ('filename', str),\n ('format', smscsv.MediaFormat), ('media_id', int), ('title', str)\n )\n for item in self.items:\n for name, type_ in field_types:\n self.assertIsInstance(getattr(item, name), type_)", "def test_type(self):\n return self._test_type", "def __contains__(name):", "def test_sample_one_sample_type(self):\r\n self.assertEqual(self.test_sample.sampleType, 'TUMOUR')", "def _should_ignore_type(self, typ):\n return typ in self.config.IGNORED_TYPES", "def test_47(self):\n assert 'False' == Api.requestBlock('test-47', CustomFields=True)", "def hasCustomData( self, key ):\n return str(key) in self._customData", "def is_my_case(self, type_):\n return (\n isinstance(self.__apply_sequence(type_), self.declaration_class)\n )", "def test_string_in_serializer() -> None:\n assert cv.custom_serializer(cv.string) == {\n \"type\": \"string\",\n }", "def __contains__(self, name: str) -> bool:\n ...", "def CheckType(self, *args, **kwargs):\n pass", "def test_contains_true(self):\n self.assertTrue('BarcodeSequence' in self.tester)\n self.assertTrue('barcodesequence' in self.tester)", "def missing_types():\n\n return ...", "def test_parse_include(self):\n old_type = Include('include', [\n Relationship('student', PersonSchema(), None),\n Relationship('school', StudentSchema(), None)])\n new_type = self.driver.parse(old_type)\n\n assert old_type.relationships != new_type.relationships\n assert isinstance(new_type.relationships[0], Mapper)", "def hasCustomCategory( self, context ):\n return ObjectHasCustomCategory( context )", "def include_object(obj, name, type_, reflected, compare_to):\n results = signals.alembic_include_object.send(\n obj, name=name, type_=type_, reflected=reflected, compare_to=compare_to\n )\n if results:\n return all([res[1] for res in results])\n\n return True", "def _supports(self, item):\n return type(item) in Result.SUPPORTED_DATA", "def test_blacklisted_types_in_results(self):\n portal = self.layer['portal']\n registry = getUtility(IRegistry)\n search_settings = registry.forInterface(ISearchSchema, prefix=\"plone\")\n q = {'SearchableText': 'spam'}\n res = portal.restrictedTraverse('@@search').results(query=q,\n batch=False)\n self.assertTrue('my-page1' in [r.getId() for r in res],\n 'Test document is not found in the results.')\n\n # Now let's exclude 'Document' from the search results:\n search_settings.types_not_searched = ('Document',)\n res = portal.restrictedTraverse('@@search').results(query=q,\n batch=False)\n self.assertFalse(\n 'my-page1' in [r.getId() for r in res],\n 'Blacklisted type \"Document\" has been found in search results.')", "def test_add_relation_types(self):\n pass", "def test_custom_schema():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(codec.schema, is_(instance_of(DerivedSchema)))", "def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")", "def _is_simple_type(cls):\n return all([\n AnnotationWrapper(anno).is_simple_in_opt_and_not_opt\n for anno in cls._used_annotations()\n ])", "def is_custom(self, is_custom):\n\n self._is_custom = is_custom", "def is_custom(self, is_custom):\n\n self._is_custom = is_custom", "def test_list_source_type(self):\n\n # check if documentalist has access to list view\n self.login_documentalist()\n response = self.client.get('/types/' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n response = self.client.get('/types/')\n self.assertContains(response, \"Base de dados\")", "def test_add_relation_type(self):\n pass", "def test_custom_raw_row_results_all_types(self):\n # Connect using a custom protocol handler that tracks the various types the result message is used with.\n session = Cluster(protocol_version=PROTOCOL_VERSION).connect(keyspace=\"custserdes\")\n session.client_protocol_handler = CustomProtocolHandlerResultMessageTracked\n session.row_factory = tuple_factory\n\n colnames = create_table_with_all_types(\"alltypes\", session, 1)\n columns_string = \", \".join(colnames)\n\n # verify data\n params = get_all_primitive_params(0)\n results = session.execute(\"SELECT {0} FROM alltypes WHERE primkey=0\".format(columns_string))[0]\n for expected, actual in zip(params, results):\n self.assertEqual(actual, expected)\n # Ensure we have covered the various primitive types\n self.assertEqual(len(CustomResultMessageTracked.checked_rev_row_set), len(PRIMITIVE_DATATYPES)-1)\n session.shutdown()", "def _contains_op(spec):", "def test_ticket_type_add_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('ticket_type add new_type')\n rv, output = self._execute('ticket_type list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def _AddType(self, entity_type):\n if not entity_type.IsValid():\n self.AddFindings(entity_type.GetFindings())\n return False\n return self.local_namespace.InsertType(entity_type)", "def is_registered(self, type):\n attr = self._type_to_attr(type)\n return getattr(self, attr, None) is not None", "def test_find_relation_types(self):\n pass", "def test_eventtype(\n self,\n splunk_search_util,\n splunk_searchtime_fields_eventtypes,\n record_property,\n caplog,\n ):\n record_property(\n \"eventtype\", splunk_searchtime_fields_eventtypes[\"stanza\"]\n )\n index_list = \"(index=\" + \" OR index=\".join(splunk_search_util.search_index.split(',')) + \")\"\n search = (f\"search {index_list} AND \"\n f\"eventtype=\"\n f\"\\\"{splunk_searchtime_fields_eventtypes['stanza']}\\\"\")\n search += \" | stats count by sourcetype\"\n\n self.logger.info(\n \"Testing eventtype =%s\", splunk_searchtime_fields_eventtypes[\"stanza\"]\n )\n\n self.logger.info(\"Search query for testing =%s\", search)\n\n # run search\n result = splunk_search_util.checkQueryCountIsGreaterThanZero(\n search, interval=splunk_search_util.search_interval, retries=splunk_search_util.search_retry\n )\n record_property(\"search\", search)\n assert result, (\n f\"No result found for the search.\\nsearch={search}\\n\"\n f\"interval={splunk_search_util.search_interval}, retries={splunk_search_util.search_retry}\"\n )", "def test(self) -> Any:\n pass", "def __contains__(self, image: Any) -> bool:\n return isinstance(image, self.native_image_type)", "def contains(self, *args):\n pass", "def test_add_source_type(self):\n # check if documentalist has access to create form\n self.login_documentalist()\n response = self.client.get('/type/new' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n form_data = { \n 'status': '0',\n 'acronym': 'site',\n 'name': 'Website',\n 'language' : 'pt-br',\n 'sourcetypelocal_set-TOTAL_FORMS': '0', \n 'sourcetypelocal_set-INITIAL_FORMS': '0',\n }\n\n response = self.client.post('/type/new', form_data, follow=True )\n \n self.assertRedirects(response, '/types')\n self.assertContains(response, \"Website\")", "def __contains__(self, item: Any) -> bool:\n try:\n return item in self.contents\n except TypeError:\n try:\n return item is self.contents\n except TypeError:\n return item == self.contents # type: ignore", "def test_match_types(self):\n f = lws.match_types\n # assert f(str, u'test') is True\n assert f(str, 'test') is True\n assert f(int, 123) is True\n assert f(int, 123.00) is False\n assert f(bool, [1, 2, 3]) is False", "def filter(self, record):\n\n if record.exc_info:\n is_included = 0\n else:\n is_included = 1\n return is_included", "def test_create_obj_by_type_from_primitive_type(self):\n test_obj = \"test_primitive\"\n returned_obj = self.tested_class._create_obj_by_type(test_obj)\n self.assertEqual(returned_obj, test_obj)", "def test_positive_time_period_dict_in_serializer() -> None:\n assert cv.custom_serializer(cv.positive_time_period_dict) == {\n \"type\": \"positive_time_period_dict\",\n }", "def on_includes(self, includes):\n pass", "def test_items(self):\n self.assertEqual([(\"described_model_type\", self.expected_described_model)], list(self.mapped_model.items()))", "def test_get_contact_person_types(self):\n pass", "def test_contains_true(self):\n self.assertTrue('1.SKM7.640188' in self.tester)", "def test_contains_true(self):\n self.assertTrue('1.SKM7.640188' in self.tester)", "def test_check_detail_code_returns_true():\n plugin_instance = PluginVipCustomisation()\n assert plugin_instance._check_detail_code('replace me with real xml') == True", "def test_get_item(self):\n self.assertEqual(self.expected_described_model, self.mapped_model[\"described_model_type\"])", "def _include_entity(self, entity: Entity) -> bool:\n impact = entity[\"impact\"]\n if impact and impact not in self._parameter(\"impact\"):\n return False\n element_include_filter = self._parameter(\"element_include_filter\")\n if element_include_filter and not match_string_or_regular_expression(entity[\"element\"], element_include_filter):\n return False\n element_exclude_filter = self._parameter(\"element_exclude_filter\")\n if element_exclude_filter and match_string_or_regular_expression(entity[\"element\"], element_exclude_filter):\n return False\n return True", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_49(self):\n assert 'False' == Api.requestBlock('test-49', CustomFields=True)", "def can_be_registered(self, event_type):\n return True", "def test_json(testapp, item_type):\n res = testapp.get('/' + item_type).follow(status=200)\n assert (item_type + 'Collection') in res.json['@type']", "def contains(*types):\n\n frame = sys._getframe(1)\n f_locals = frame.f_locals\n f_globals = frame.f_globals\n\n if not (f_locals is not f_globals\n and f_locals.get('__module__')\n and f_locals.get('__module__') == f_globals.get('__name__')\n ):\n raise TypeError(\"contains not called from suite\")\n\n def __setitem__(key, value):\n \"\"\"\n This serves as a copy of IContainer.__setitem__ to hold\n the ``precondition`` attribute. Note that it replaces a local\n __setitem__ defined before.\n \"\"\"\n __setitem__.__doc__ = IContainer['__setitem__'].__doc__\n __setitem__.precondition = ItemTypePrecondition(\n *types,\n **dict(module=f_globals['__name__'])\n )\n f_locals['__setitem__'] = __setitem__", "def test_resource_and_code_content_type(self):\n\n def do_check(path):\n \"\"\"The contents of the .iml file should certain sourceFolder entries:\n\n <sourceFolder url=\".../testprojects/src/java/org/pantsbuild/testproject/ideacodeandresources/resources_and_code\" isTestSource=\"false\" />\n <sourceFolder url=\".../testprojects/src/java/org/pantsbuild/testproject/ideacodeandresources/resources_only\" type=\"java-resource\" />\n <sourceFolder url=\".../testprojects/src/resources/org/pantsbuild/testproject/ideacodeandresources\" type=\"java-resource\" />\n ...\n \"\"\"\n found = set()\n iml_file = os.path.join(path, 'project.iml')\n self.assertTrue(os.path.exists(iml_file))\n dom = minidom.parse(iml_file)\n for sourceFolder in self._get_sourceFolders(dom):\n url = sourceFolder.getAttribute('url')\n is_test_source = sourceFolder.getAttribute('isTestSource')\n type_attr = sourceFolder.getAttribute('type')\n url = re.sub(r'^.*/testprojects/', 'testprojects/', url)\n found.add(url)\n if url == 'testprojects/src/resources/org/pantsbuild/testproject/ideacodeandresources':\n self.assertEquals('java-resource', type_attr)\n self.assertEquals('False', is_test_source)\n if url == 'testprojects/tests/resources/org/pantsbuild/testproject/ideacodeandresources':\n self.assertEquals('java-test-resource', type_attr)\n self.assertEquals('True', is_test_source)\n if url == 'testprojects/src/java/org/pantsbuild/testproject/ideacodeandresources':\n self.assertEquals('', type_attr)\n self.assertEquals('False', is_test_source)\n if url == 'testprojects/tests/java/org/pantsbuild/testproject/ideacodeandresources':\n self.assertEquals('', type_attr)\n self.assertEquals('True', is_test_source)\n\n self.assertEquals(set([\n 'testprojects/src/resources/org/pantsbuild/testproject/ideacodeandresources',\n 'testprojects/src/java/org/pantsbuild/testproject/ideacodeandresources',\n 'testprojects/tests/resources/org/pantsbuild/testproject/ideacodeandresources',\n 'testprojects/tests/java/org/pantsbuild/testproject/ideacodeandresources',\n ]), found)\n\n self._idea_test([\n 'testprojects/src/java/org/pantsbuild/testproject/ideacodeandresources::',\n 'testprojects/tests/java/org/pantsbuild/testproject/ideacodeandresources::'\n ], check_func=do_check)", "def test_data_type_id(self):\n self.assertTrue(self.tester.data_type(ret_id=True), 2)", "def test_meta_data_is_not_inherited(self):", "def test_import():\n assert hasattr(waves, 'wave_number')", "def __contains__(self, item):\n pass", "def test_superType(self):\n self.assertTrue(ChangeType().superType is not None)", "def __contains__(self, item):\n return item in self.contents", "def is_expected_content_type(\n response_content_type: str, expected_content_type: str\n) -> bool:\n if expected_content_type == \"application/json\":\n return json_re.match(response_content_type) is not None\n return expected_content_type in response_content_type", "def addCustomTests(self, tests):\n pass" ]
[ "0.7456461", "0.6151709", "0.57726073", "0.5564573", "0.5552539", "0.5481914", "0.5474497", "0.54267174", "0.5376972", "0.534768", "0.5330371", "0.5328861", "0.5311295", "0.5270746", "0.52539307", "0.5240947", "0.5239048", "0.5234515", "0.5231774", "0.5193669", "0.5185735", "0.5179525", "0.5166746", "0.51475644", "0.51340014", "0.5124719", "0.5124719", "0.5124719", "0.5124719", "0.5124719", "0.5119233", "0.5118847", "0.51181513", "0.5117686", "0.50999516", "0.50829405", "0.5079793", "0.5077572", "0.50673217", "0.50395745", "0.50342137", "0.5020132", "0.500541", "0.498977", "0.49879414", "0.49806002", "0.4964097", "0.4963661", "0.49625012", "0.49585533", "0.4957017", "0.49536058", "0.49525946", "0.49522766", "0.4944548", "0.49247813", "0.49201918", "0.49198037", "0.49136797", "0.49136797", "0.4909315", "0.4894661", "0.4892704", "0.4889798", "0.48865572", "0.48841822", "0.48834845", "0.48803052", "0.48786584", "0.48775184", "0.48771653", "0.48652577", "0.4860535", "0.48533288", "0.48523772", "0.48511901", "0.4850999", "0.48494107", "0.48403952", "0.48343217", "0.4822592", "0.4819764", "0.4819764", "0.48194623", "0.47970113", "0.4793643", "0.4792279", "0.47899556", "0.478611", "0.47845143", "0.47731712", "0.47728238", "0.4768726", "0.47617006", "0.47573325", "0.47569185", "0.47532183", "0.47502396", "0.4749082", "0.4745769" ]
0.7806248
0
Implement the setup for the handler.
def setup(self) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self, *args, **kwargs):\n pass", "def _setup(self) -> None:\n\t\treturn", "def setUp(self):\n\n installHandler()", "def setUp(self):\n\n installHandler()", "def setup(self,**kwargs):\n pass", "def setup(self):\n raise NotImplementedError(\"Need to be implemented in subclasses\")", "def setup(cls):\n super().setup()\n cls.http_handler = cast(\n HttpHandler, cls._skill.skill_context.handlers.http_handler\n )\n cls.logger = cls._skill.skill_context.logger\n\n cls.http_dialogues = cast(\n HttpDialogues, cls._skill.skill_context.http_dialogues\n )\n\n cls.get_method = \"get\"\n cls.post_method = \"post\"\n cls.url = \"some_url\"\n cls.version = \"some_version\"\n cls.headers = \"some_headers\"\n cls.body = b\"some_body\"\n cls.sender = \"fetchai/some_skill:0.1.0\"\n cls.skill_id = str(cls._skill.skill_context.skill_id)\n\n cls.status_code = 100\n cls.status_text = \"some_status_text\"\n\n cls.content = b\"some_content\"\n cls.list_of_messages = (\n DialogueMessage(\n HttpMessage.Performative.REQUEST,\n {\n \"method\": cls.get_method,\n \"url\": cls.url,\n \"version\": cls.version,\n \"headers\": cls.headers,\n \"body\": cls.body,\n },\n ),\n )", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def setup( self ):", "def _setup(self):", "def _setup(self):", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "async def setup(self):\n\t\tlogging.config.dictConfig(self.log_settings['log'])\n\t\tself.logger = logging.getLogger('Responder3')\n\t\tself.create_dir_strucutre()\n\n\t\tif 'handlers' in self.log_settings:\n\t\t\tasync for handlerclass, handler in self.get_handlers():\n\t\t\t\tawait self.start_extension(handlerclass, self.log_settings[self.log_settings['handlers'][handler]])", "async def setup(self, ctx):\n pass", "def setup(self) -> None:\n self.setup_logging()\n self.setup_plugins()\n self.post_setup()", "def setup(self) -> None:", "async def _setup(self):", "def _setup(self):\n raise NotImplementedError()", "def setUp(self):\n h = self.MyTestHandler()\n h.request = Request.blank('/rpc/')\n h.response = Response()\n self.handler = h", "def setup(self):\n pass", "def test_setup(self):\n with pytest.raises(NotImplementedError):\n self.handler.setup()", "def setup(self):\n\t\tpass", "async def _setup(self, *args, **kwargs):\n return self", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\r\n pass", "def setup(self):\n pass", "def setup(self):\n ...", "def setup(self,context,result):\n pass", "def __init__(self, handler):\n self.__handler = handler", "def setup(self):\n self.ae = None", "def setup(self, app_args):\n raise NotImplementedError", "def test_init_adds_handler(self):\n pass", "async def setup(self):\n pass", "def setup_class(cls):\n cls.handler = MyScaffoldHandler(\"handler\", SkillContext())", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):\n self.build_serverkeyhash()\n self.build_agent_pubkey()\n self.load_registration_key()", "def prepare(self):\n self.uri = self.request.uri\n self.path = self.request.uri.split('?')[0]\n self.method = self.path.split('/')[-1]\n self.default_methods = {}\n #\n # You can use the before_handler in a local controller to\n # process your own prepare stuff.\n # a common use case is to call: self.print_debug_info().\n # which then applies only to this specific handler.\n # \n before_handler = getattr(self, \"before_handler\", None)\n print(\"calling before_handler for \" + str(self.__class__))\n if callable(before_handler):\n before_handler()", "def post_setup(self, context):\n pass", "def setUp(self):\n self.simulation = FooSimulation(count=5, g=6, h=9, i=12)\n self.foo = Foo(simulation=self.simulation, name='foo', a=4, b=42, c=\"Hello\")\n self.handler = Handler(ProxyLock(self.simulation),\n ProxyLock(self.foo),\n [Attribute(\"count\"), Attribute(\"g\")],\n [Attribute('b')],\n [Attribute('a')])", "def setup(self):\n pass # pragma: no cover", "def setup(self):\n raise NotImplemented", "def setup_hooks(self):\n pass", "def setup(self, *args, **kwargs):\n return True", "def initialize(self, context: InitCommandContext) -> None:\n super().initialize(context)\n self.handler.initialize(context, self.logger)", "def prepare(self):\n return HandlerReady()", "def setup(self):\n self.log.debug('upm - in upm setup()')\n # Add resource setup code here", "def on_setup(self, request, trigger_context):\n raise NotImplementedError", "def setup_method(self):\n self.ae = None", "def setup(cls):\n super().setup()\n cls.search_behaviour = cast(\n GenericSearchBehaviour, cls._skill.skill_context.behaviours.search\n )\n cls.tx_behaviour = cast(\n GenericTransactionBehaviour, cls._skill.skill_context.behaviours.transaction\n )\n cls.strategy = cast(GenericStrategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger", "def _setup(app_obj):", "def __init__(self, handler, **kwargs):\n # Attributes used by the auth mixins (required).\n self.request = RequestAdapter(handler.request)\n self.settings = kwargs\n\n # Attributes used only internally by this class (specific to the webapp\n # implementation).\n self._request = handler.request\n self._response = handler.response", "def setup(self, ctxConfig, drvConfig):\n superClass.setup(self, ctxConfig, drvConfig)\n # TODO Your startup stuff here", "def setup(cls):\n cls.location = {\"longitude\": 0.1270, \"latitude\": 51.5194}\n cls.search_query = {\n \"search_key\": \"intro_service\",\n \"search_value\": \"intro_alice\",\n \"constraint_type\": \"==\",\n }\n cls.search_radius = 5.0\n cls.admin_host = \"127.0.0.1\"\n cls.admin_port = 8021\n cls.ledger_url = \"http://127.0.0.1:9000\"\n config_overrides = {\n \"models\": {\n \"strategy\": {\n \"args\": {\n \"location\": cls.location,\n \"search_query\": cls.search_query,\n \"search_radius\": cls.search_radius,\n \"admin_host\": cls.admin_host,\n \"admin_port\": cls.admin_port,\n \"ledger_url\": cls.ledger_url,\n }\n }\n },\n }\n\n super().setup(config_overrides=config_overrides)\n\n # behaviours\n cls.faber_behaviour = cast(\n FaberBehaviour,\n cls._skill.skill_context.behaviours.faber,\n )\n\n # dialogues\n cls.default_dialogues = cast(\n DefaultDialogues, cls._skill.skill_context.default_dialogues\n )\n cls.http_dialogues = cast(\n HttpDialogues, cls._skill.skill_context.http_dialogues\n )\n cls.oef_search_dialogues = cast(\n OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues\n )\n\n # handlers\n cls.http_handler = cast(HttpHandler, cls._skill.skill_context.handlers.http)\n cls.oef_search_handler = cast(\n OefSearchHandler, cls._skill.skill_context.handlers.oef_search\n )\n\n # models\n cls.strategy = cast(Strategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger\n\n # mocked objects\n cls.mocked_method = \"SOME_METHOD\"\n cls.mocked_url = \"www.some-url.com\"\n cls.mocked_version = \"some_version\"\n cls.mocked_headers = \"some_headers\"\n cls.body_dict = {\"some_key\": \"some_value\"}\n cls.body_str = \"some_body\"\n cls.body_bytes = b\"some_body\"\n cls.mocked_body_bytes = json.dumps(cls.body_str).encode(\"utf-8\")\n cls.mocked_query = Query(\n [Constraint(\"some_attribute_name\", ConstraintType(\"==\", \"some_value\"))],\n DataModel(\n \"some_data_model_name\",\n [\n Attribute(\n \"some_attribute_name\",\n str,\n False,\n \"Some attribute descriptions.\",\n )\n ],\n ),\n )\n cls.mocked_proposal = Description(\n {\n \"contract_address\": \"some_contract_address\",\n \"token_id\": \"123456\",\n \"trade_nonce\": \"876438756348568\",\n \"from_supply\": \"543\",\n \"to_supply\": \"432\",\n \"value\": \"67\",\n }\n )\n\n # list of messages\n cls.list_of_http_messages = (\n DialogueMessage(\n HttpMessage.Performative.REQUEST,\n {\n \"method\": cls.mocked_method,\n \"url\": cls.mocked_url,\n \"headers\": cls.mocked_headers,\n \"version\": cls.mocked_version,\n \"body\": cls.mocked_body_bytes,\n },\n is_incoming=False,\n ),\n )\n\n cls.list_of_oef_search_messages = (\n DialogueMessage(\n OefSearchMessage.Performative.SEARCH_SERVICES,\n {\"query\": cls.mocked_query},\n ),\n )", "def setup(self): \n pass", "async def async_setup(self):\n pass", "def setUp(self):\n\n self._set_up()", "def __init__(self, handler):\n\n self.event_handler = handler", "def setUp(self):\n class TestHandler(SimpleHandler):\n view = mock.MagicMock()\n\n self.route = mock.Mock()\n self.request = mock.Mock()\n self.handler = TestHandler(self.route)\n self.route.get_handler.return_value = self.handler\n self.view = TestHandler.view", "async def setup(self):\n load_base_templates()\n uris = URI.gather()\n for uri, resource in uris.items():\n methods = resource.methods\n if \"get\" not in methods:\n methods[\"get\"] = None\n\n for method in methods.keys():\n self.app.add_routes([\n getattr(aioweb, method)(uri, resource.process)\n ])\n self.app.add_routes([aioweb.get(\"/hello\", hello)])\n\n # TMP code\n max_age = 3600 * 24 * 365 # 1 year\n setup(self.app, PonyStorage(max_age=max_age))\n self.preparing_task = asyncio.create_task(self.prepare_web())", "def _handler_init(self):\r\n\t\tself._handlers[\"player-join\"] = FunctionDelegate()\r\n\t\tself._handlers[\"player-quit\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-start\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-stop\"] = FunctionDelegate()", "def __init__(self):\n self.setup_called = False", "def _setup(self):\n if self.image_blob_key:\n self.fullsize_url = self.get_image_url()\n self.thumbnail_url = self.get_image_url(self.DEFAULT_THUMBNAIL_SIZE)\n if self.is_saved():\n key = self.key()\n self.num_votes = Vote.all().filter(\"photo_id =\", key.id()).count()\n template = '%s/index.html?photoId=%s%s'\n self.vote_cta_url = template % (\n handlers.get_base_url(), key.id(), '&action=VOTE')\n template = '%s/photo.html?photoId=%s'\n self.photo_content_url = template % (\n handlers.get_base_url(), key.id())\n else:\n self.num_votes = 0", "def setup(self, rc):\n pass", "def setup(self):\n self.log.debug('RFSwitch - in RFSwitch setup()')\n # Add resource setup code here\n print(\"Calling RFSwitch:setup\")", "def test_setup(self):\n assert self.http_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "def _configure(self):\n pass", "def Setup(cls):\n\t\tif cls.SINGLETON is None:\n\t\t\tcls.SINGLETON = Signals()\n\t\tcls.SINGLETON.setup()", "def __init__(self, handler):\n self._handler = handler\n self._storage = []", "def setup(self):\n self.create_songs_index()\n self.create_fingerprints_index()\n self.delete_unfingerprinted()", "def __init__(self, handler_factory):\n self.handler_factory = handler_factory", "def setUpClass(cls):\n cls.setup_log()\n cls.setup_conn()\n cls.setup_cache()\n cls.setup_params()", "def setup_script(self, *args, **kwargs):\n pass", "async def setup(self, context: InjectionContext):", "def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)", "async def prepare(self):\n pass", "def __init__(self):\n from django.core.handlers.wsgi import WSGIHandler as djhandler\n self._handler = djhandler()", "def _setup(self) -> None:\n self._api = get_api(\n self._password,\n self._host,\n self._username,\n self._port,\n self._ssl,\n )\n\n self._info = self._api.get_info()\n self.device_name = self._info.get(\"DeviceName\", DEFAULT_NAME)\n self.model = self._info.get(\"ModelName\")\n self.firmware_version = self._info.get(\"Firmwareversion\")\n\n for model in MODELS_V2:\n if self.model.startswith(model):\n self._method_version = 2" ]
[ "0.74647546", "0.7427192", "0.74090225", "0.74090225", "0.73212504", "0.7227166", "0.7225722", "0.7213479", "0.7213479", "0.7213479", "0.7213479", "0.7213479", "0.7210512", "0.7194558", "0.7194558", "0.7176307", "0.7176307", "0.7176307", "0.7176307", "0.7132219", "0.7116914", "0.7105827", "0.70878446", "0.7074322", "0.70640355", "0.70131993", "0.70002794", "0.6995695", "0.69829434", "0.6973218", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.6956978", "0.6893244", "0.6817163", "0.6798899", "0.6763085", "0.6737236", "0.67066085", "0.67063206", "0.6677959", "0.6656652", "0.6650302", "0.6650302", "0.6650302", "0.6650302", "0.66454536", "0.66397196", "0.66157866", "0.6608052", "0.6595438", "0.65815175", "0.6567989", "0.65664196", "0.65442306", "0.6527076", "0.651796", "0.65130967", "0.64878744", "0.6479978", "0.6473933", "0.64683807", "0.6462236", "0.6455409", "0.645479", "0.64181376", "0.6382526", "0.6377296", "0.6373025", "0.6363469", "0.635759", "0.63569665", "0.6345585", "0.63451546", "0.6320587", "0.6311438", "0.63064384", "0.6300261", "0.62953866", "0.628488", "0.62824607", "0.6281564", "0.62660784", "0.6263719", "0.6263057", "0.6260547", "0.6224438", "0.6217898" ]
0.7172614
20
Implement the reaction to a message.
def handle(self, message: Message) -> None: self.handled_message = message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def custom_mqtt_reaction(self, topic, message):\n raise NotImplementedError(\"Must override method custom_mqtt_reaction\")", "async def process(self, chan_id: str, msg_id: str, emoji: str, member: discord.Member, add: bool):\n logger.debug(f\"Processing reaction: [ add: {add}, msg_id: {msg_id}, emoji: {emoji}, member: {member} ]\")\n\n if isinstance(chan_id, int):\n chan_id = str(chan_id)\n \n if not isinstance(chan_id, str):\n raise TypeError(f\"Arg 1: Expected a channel id (str)! (got {type(chan_id)} instead)\")\n \n if isinstance(msg_id, int):\n msg_id = str(msg_id)\n \n if not isinstance(msg_id, str):\n raise TypeError(f\"Arg 2: Expected a message id (str)! (got {type(msg_id)} instead)\")\n\n\n if not chan_id in self._messages:\n logger.debug(\"No message is listened to in this channel.\")\n return\n\n if not msg_id in self._messages[chan_id]:\n logger.debug(\"The message was not listened to.\")\n return\n \n if not emoji in self._messages[chan_id][msg_id][\"reactions\"]:\n logger.debug(\"The emoji wasn't listened to.\")\n return\n \n logger.debug(\"The reaction is listened to! Calling callbacks!\")\n\n if add:\n callbacks = self._messages[chan_id][msg_id][\"reactions\"][emoji][\"add_callbacks\"]\n else:\n callbacks = self._messages[chan_id][msg_id][\"reactions\"][emoji][\"rm_callbacks\"]\n \n for callback in callbacks:\n await callback(msg_id, emoji, member)", "async def handle(self, message: discord.Message):\n raise NotImplementedError()", "def onMessage(self, message):\n raise NotImplementedError", "async def on_raw_reaction_add(self, payload):\n\n\t\tguild = self.bot.get_guild(payload.guild_id)\n\t\tif guild is not None:\n\t\t\tchannel = guild.get_channel(payload.channel_id)\n\t\t\tmessage = await channel.fetch_message(payload.message_id)\n\t\t\tuser = guild.get_member(payload.user_id)\n\n\t\t\t# Update cached leaderboards\n\t\t\tif not payload.member.bot:\n\t\t\t\tif payload.message_id in self.cachedMessages:\n\t\t\t\t\tif payload.emoji.name == \"➡️\":\n\t\t\t\t\t\tawait self.update_leaderboard_message(message, 1)\n\t\t\t\t\t\tawait message.remove_reaction(\"➡️\", user)\n\t\t\t\t\telif payload.emoji.name == \"⬅️\":\n\t\t\t\t\t\tawait self.update_leaderboard_message(message, -1)\n\t\t\t\t\t\tawait message.remove_reaction(\"⬅️\", user)\n\n\t\t\t# Update reaction leaderboards\n\t\t\tif not payload.member.bot:\n\t\t\t\treactionLeaderboard = self.leaderboards[str(payload.guild_id)][\"reactionLeaderboard\"]\n\n\t\t\t\tif payload.emoji.id is not None:\n\t\t\t\t\tfor guildEmoji in guild.emojis:\n\t\t\t\t\t\tif payload.emoji.id == guildEmoji.id:\n\t\t\t\t\t\t\tif (\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\") not in reactionLeaderboard:\n\t\t\t\t\t\t\t\treactionLeaderboard[\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\"] = 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\treactionLeaderboard[\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\"] += 1\n\n\n\n\t\t\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tif payload.emoji.name not in reactionLeaderboard:\n\t\t\t\t\t\treactionLeaderboard[str(payload.emoji.name)] = 1\n\t\t\t\t\telse:\n\t\t\t\t\t\treactionLeaderboard[str(payload.emoji.name)] += 1\n\n\t\t\t\tif str(payload.emoji.id) in self.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"]:\n\t\t\t\t\tself.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"][str(payload.emoji.id)] += 1", "def addReaction(self, *args):\n return _libsbml.Model_addReaction(self, *args)", "def react(self, request, *args, **kwargs):\n comment = self.get_object()\n serializer = ReactionCommentModelSerializer(\n data=request.data, context={'user': request.user, 'comment': comment})\n \n try:\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except AssertionError:\n return Response(\n {'message': \"The comment's reaction has been delete.\"}, status=status.HTTP_200_OK)", "async def reaction_on_reschedule_message(self, ctx, emoji, reschedule_message):\n if ctx.author.id != reschedule_message.opponent_user_id:\n return\n try:\n tournament = self.get_tournament(ctx.guild.id)\n tournament.current_bracket_id = reschedule_message.bracket_id\n if not tournament.current_bracket:\n raise tosurnament.UnknownError(\"Bracket not found\")\n if emoji.name == \"👍\":\n await self.agree_to_reschedule(ctx, reschedule_message, tournament)\n else:\n self.bot.session.delete(reschedule_message)\n ally_to_mention = None\n if reschedule_message.ally_team_role_id:\n ally_to_mention = tosurnament.get_role(ctx.guild.roles, reschedule_message.ally_team_role_id)\n if not ally_to_mention:\n ally_to_mention = ctx.guild.get_member(reschedule_message.ally_user_id)\n if ally_to_mention:\n await self.send_reply(ctx, \"refused\", ally_to_mention.mention, reschedule_message.match_id)\n else:\n raise tosurnament.OpponentNotFound(ctx.author.mention)\n except Exception as e:\n await self.on_cog_command_error(ctx, e)", "def messageReceived(self, message):\n raise NotImplementedError(self)", "async def route_message(self, msg):\n raise NotImplementedError", "def receive_message(self, message):\r\n return", "def _handle_message(self, msg):\n self.event('message', msg)", "async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):\n # Get the message id of the message that the user reacted to.\n message_id = payload.message_id\n\n # Get the message id of the message we want the user to react to.\n actual_message_id = constants.MessageIDs.RULES_MSGID\n\n # Compare that id's match, and if true continue to give the role.\n if message_id == actual_message_id:\n guild_id = payload.guild_id\n guild = self.bot.get_guild(guild_id)\n role = get(payload.member.guild.roles, name='Not Verified')\n\n if role is not None:\n member = get(guild.members, id=payload.user_id)\n if member is not None:\n await payload.member.add_roles(role)\n print(f\"Added role to {member}\")\n else:\n print(\"User not found . . .\")\n else:\n print(\"Role not found . . .\")", "def message_react(channel, message, reaction):\n\n if not settings.SLACK_TOKEN:\n return {'ok': False, 'error': 'config_error'}\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.reactions_add(channel=channel, timestamp=message, name=reaction)\n assert response['ok'] is True\n return response\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response", "def receive_message(self, message):", "def received(self, message):\n raise NotImplementedError()", "def handle_message(self, message):", "def process(self, msg):\n raise NotImplemented", "def handle_message(self, msg):\n pass", "async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):\n \n guild = self.bot.get_guild(payload.guild_id)\n user = discord.utils.get(guild.members, id=payload.user_id)\n # Skipping bot reactions\n if user.bot:\n return\n\n # Search the event by message id\n message_id = payload.message_id\n channel_id = payload.channel_id\n guild_id = payload.guild_id\n event = connector.getEventByMessage(guild_id, message_id, channel_id)\n if event is None:\n return\n\n role = discord.utils.get(payload.member.guild.roles, id=event.role_id)\n\n if (payload.event_type == \"REACTION_ADD\") and (event.emoji == str(payload.emoji)):\n # Adding role to user\n await payload.member.add_roles(role)\n elif (payload.event_type == \"REACTION_REMOVE\") and (event.emoji == str(payload.emoji)) and (role in payload.member.roles):\n # Remove role from user\n await payload.member.remove_roles(role)", "def on_receive(self, msg):\n raise NotImplementedError", "def msg(self, message, **kwargs):\n self.crafter.msg(message, {\"type\": \"crafting\"})", "def handle(self, message):", "def receive_message(self, message):\r\n self.state.receive_message(message)\r\n return", "async def process(self, message):\n return await self.dispatcher.dispatch(message)", "async def react_with_action(\n pre_command, message: Message, is_private: bool, guild_id: int, author_id: int\n):\n # Change nickname\n if (\n is_whitelisted(\"nickname_auto_change\", guild_id)\n and message.author.permissions_in(message.channel).change_nickname\n ):\n try:\n if len(message.content.split(\" \")) > 2:\n if (\n message.content.split(\" \")[0] == \"i\",\n message.content.split(\" \")[1],\n ) == (\"i\", \"am\"):\n new_name = message.content.partition(\" \")[2].partition(\" \")[2]\n await change_nickname_with(pre_command, message, new_name)\n return {ACTION: True}\n if (\n message.content.split(\" \")[0] == \"i\"\n and message.content.split(\" \")[1] == \"am\"\n ):\n new_name = message.content.partition(\" \")[2].partition(\" \")[2]\n if len(new_name) <= 32:\n await change_nickname_with(pre_command, message, new_name)\n return {ACTION: True}\n if len(message.content.split(\" \")) > 1:\n if message.content.split(\" \")[0] in [\"i'm\", \"im\"]:\n new_name = message.content.partition(\" \")[2]\n if len(new_name) <= 32:\n await change_nickname_with(pre_command, message, new_name)\n return {ACTION: True}\n except Forbidden:\n pass\n\n # Add reaction\n if (\n message.author.id\n in [constants.KAPPAid, constants.RAZid, constants.POLYid, constants.NYAid]\n and is_whitelisted(\"uumuu_reaction\", message.guild.id)\n and message.content.lower() in [\"owo\", \"uwu\", \"umu\"]\n ):\n if await pre_command(\n message=message,\n channel=message.channel,\n command=\"uumuu_reaction\",\n delete_message=False,\n is_typing=False,\n ):\n await message.add_reaction(\":uumuu:715594968328175687\")\n return {ACTION: True}\n return {}", "def on_action(self, message):\n with self.handler.wrapee as wrapee:\n log.debug(\"Calling {method} on {name}\", method=message['action'], name=self.name)\n try:\n func = getattr(wrapee, message['action'])\n except AttributeError as ex:\n log.warn(\"Trying to call a method {method} that does not exsist!\",\n method=ex.args[0])\n return\n res, msg = func(*message['args'])\n if not res:\n log.warn(\"Error while calling {method}: {msg}\", msg=msg,\n method=message['action'])\n else:\n log.debug(\"Called method succesfully\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n if msg != '':\n protocol.send_news(msg)", "def _add_message(self, chan_id: str, msg_id: str):\n if not msg_id in self._messages[chan_id]:\n self._messages[chan_id][msg_id] = {\n \"reactions\": {}\n }\n else:\n raise ValueError(\"ReactionListener tried to create space for an already listened message!\")", "def _intent(self) -> MessageIntent:\r\n pass", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def receive(self, message):", "async def on_raw_reaction_add(self, payload):\n emoji = str(payload.emoji)\n member = payload.member\n\n if member.bot:\n return\n\n channel = await self.bot.fetch_channel(payload.channel_id)\n message = await channel.fetch_message(payload.message_id)\n\n if emoji != settings.get_ticket_create_emoji():\n return\n \n if len(message.embeds) == 0 or message.embeds[0].title != settings.get_ticket_panel_embed().title:\n return\n \n await message.remove_reaction(emoji, member)\n await self.create_ticket(member,message.guild)", "async def edit(self, ctx: commands.Context, message_id: int):\n\n # Standard wait_for check function for message inputs, makes sure the command user's messages in command channel are considered\n def message_check(m: discord.Message):\n return m.author == ctx.author and m.channel == ctx.channel\n\n # Standard reaction check that ensures no duplicate reacrole entry, just name the relevant message 'm' before adding this one to check kwarg in wait_for\n def reaction_check_nd(_r: discord.Reaction, _u):\n return _u == ctx.author and _r.message == m and str(_r.emoji) not in self._cache[ctx.guild.id][PM.id]\n\n if message_id in self._cache[ctx.guild.id]:\n\n # Not actually channel id int but I decided to name it that way anyway\n chanid = await self.bot.pool.fetchrow(\"SELECT channelid FROM selfrole_lookup WHERE messageid = $1\", message_id)\n chan: discord.TextChannel = ctx.guild.get_channel(chanid['channelid'])\n\n # Currently need message content for title, might start saving title in db to avoid this api call idk\n try:\n PM: discord.Message = await chan.fetch_message(message_id)\n except discord.NotFound:\n await ctx.send(\"It would seem that the message for the role menu you're trying to edit has been deleted, please try creating a new one\")\n return\n\n buttons = [\"\\U0001f1e6\", \"\\U0001f1e7\", \"\\U0001f1e8\", \"\\U0001f1e9\"]\n\n e1 = discord.Embed(title=\"What aspect of the menu do you wish to change?\",\n description=\"\\U0001f1e6 - Add a role\\n\\n\"\n \"\\U0001f1e7 - Remove existing role\\n\\n\"\n \"\\U0001f1e8 - Edit the reaction of a role\\n\\n\"\n \"\\U0001f1e9 - Change the title\",\n colour=discord.Colour.blue())\n # Send the initial menu\n menu = await ctx.send(embed=e1)\n\n for button in buttons:\n await menu.add_reaction(button)\n\n # We need the first reaction where the emoji is one of the buttons\n def button_check(_r, _u):\n return _u == ctx.author and _r.message == menu and str(_r.emoji) in buttons\n # Get the option the user chose\n try:\n r, u = await self.bot.wait_for('reaction_add', check=button_check, timeout=20)\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n\n # If user wanted to add a new role to the menu\n if str(r.emoji) == buttons[0]:\n await menu.clear_reactions()\n await menu.edit(content=\"What role do you wish to be added? Enter its mention, id, or name\", embed=None)\n\n # Get the role object for the new role to be added\n try:\n m = await self.bot.wait_for('message', check=message_check, timeout=30)\n newrole = await self.rc.convert(ctx, m.content)\n\n if newrole.id in self._cache[ctx.guild.id][PM.id].values():\n await ctx.send(\"Error: role already exists in the menu, perhaps you meant to edit it?\")\n return\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n except commands.BadArgument:\n await ctx.send(\"Role not found, please try again\")\n return\n\n m = await ctx.send(f\"React on this message with the reaction that will correspond to the role `{newrole}`\")\n\n # Get the reaction/emoji that will correspond to the new role and yank everything into db\n try:\n r, u = await self.bot.wait_for('reaction_add', check=reaction_check_nd, timeout=30)\n self._cache[ctx.guild.id][PM.id][str(r.emoji)] = newrole.id\n\n query = \"\"\"\n INSERT INTO selfrole (messageid, emoji, roleid)\n VALUES ($1, $2, $3)\n \"\"\"\n\n await self.bot.pool.execute(query, PM.id, str(r.emoji), newrole.id)\n\n # Standard way of getting the embed description of the role menu\n newmenudesc = \"\\n\\n\".join([f\"{k} - {ctx.guild.get_role(v)}\" for k, v in self._cache[ctx.guild.id][PM.id].items()])\n\n newembed = discord.Embed(title=PM.embeds[0].title,\n description=newmenudesc,\n colour=discord.Colour.blue())\n\n await PM.edit(embed=newembed)\n await PM.add_reaction(r.emoji)\n await ctx.send(\"Menu edit completed successfully, you may now delete the messages other than the menu itself\")\n\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n\n elif str(r.emoji) == buttons[1]:\n # Gotta yank the buttons to make everything squeaky clean\n await menu.clear_reactions()\n await menu.edit(content=\"Enter the role you wish to remove from the menu, can be mention, id or name\",\n embed=None)\n\n try:\n # Get role from user\n m = await self.bot.wait_for('message', check=message_check, timeout=20)\n role = await self.rc.convert(ctx, m.content)\n\n # If user trying to edit reaction to role that wasn't even in the menu to begin with\n if role.id not in self._cache[ctx.guild.id][PM.id].values():\n raise commands.BadArgument(\"Role not in cache\")\n\n # Get the key to delete using the old fashioned way, and subsequently delete it\n targetkey = \"\"\n for key, value in self._cache[ctx.guild.id][PM.id].items():\n if value == role.id:\n targetkey = key\n break\n self._cache[ctx.guild.id][PM.id].pop(targetkey)\n\n # After everything is done and dusted, make database entry and edit the menu\n query = \"\"\"\n DELETE FROM selfrole WHERE messageid = $1 AND roleid = $2\n \"\"\"\n await self.bot.pool.execute(query, PM.id, role.id)\n newmenudesc = \"\\n\\n\".join(\n [f\"{k} - {ctx.guild.get_role(v)}\" for k, v in self._cache[ctx.guild.id][PM.id].items()])\n\n newembed = discord.Embed(title=PM.embeds[0].title,\n description=newmenudesc,\n colour=discord.Colour.blue())\n await PM.edit(embed=newembed)\n await PM.clear_reaction(targetkey)\n await ctx.send(\n \"Menu edit completed successfully, you may now delete the messages other than the menu itself\")\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n except commands.BadArgument:\n await ctx.send(\"I don't think that role exists in that menu, run the command again\")\n return\n\n elif str(r.emoji) == buttons[2]:\n # Same drill, remove buttons to make it look clean\n await menu.clear_reactions()\n await menu.edit(embed=None, content=\"Enter the role for which you wish to change the reaction.\")\n\n try:\n m = await self.bot.wait_for('message', check=message_check, timeout=20)\n role = await self.rc.convert(ctx, m.content)\n\n if role.id not in self._cache[ctx.guild.id][PM.id].values():\n raise commands.BadArgument(\"Role not in cache\")\n\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n except commands.BadArgument:\n await ctx.send(\"Couldn't find the role you wished to edit in the menu\")\n return\n\n # Get the reaction/emoji that will correspond to the new role and yank everything into db\n m = await ctx.send(f\"React on this message with the new reaction that will correspond to the role {role}\")\n try:\n r, u = await self.bot.wait_for('reaction_add', check=reaction_check_nd, timeout=30)\n\n # Can only delete entry if have the key so....\n TargetKey = \"\" # Set default value so IDE stops screaming\n for k, v in self._cache[ctx.guild.id][PM.id].items():\n if v == role.id:\n TargetKey = k\n\n # Make new entry and delete the old one\n self._cache[ctx.guild.id][PM.id][str(r.emoji)] = role.id\n self._cache[ctx.guild.id][PM.id].pop(TargetKey)\n\n # After everything is done and dusted, at last update the database entry\n await self.bot.pool.execute(\"UPDATE selfrole SET emoji = $1 WHERE roleid = $2 AND messageid = $3\", str(r.emoji), role.id, PM.id)\n\n # Hehehehehehe\n newmenudesc = \"\\n\\n\".join(\n [f\"{k} - {ctx.guild.get_role(v)}\" for k, v in self._cache[ctx.guild.id][PM.id].items()])\n\n newembed = discord.Embed(title=PM.embeds[0].title,\n description=newmenudesc,\n colour=discord.Colour.blue())\n\n await PM.edit(embed=newembed)\n await PM.clear_reaction(TargetKey)\n await PM.add_reaction(str(r.emoji))\n await ctx.send(\n \"Menu edit completed successfully, you may now delete the messages other than the menu itself\")\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n\n elif str(r.emoji) == buttons[3]:\n # This one speaks for itself I think.\n await menu.clear_reactions()\n await menu.edit(embed=None, content=\"Enter the new title you want the menu to have\")\n try:\n m = await self.bot.wait_for('message', check=message_check, timeout=30)\n e = discord.Embed(title=f\"Role menu: {m.content}\",\n description=PM.embeds[0].description,\n colour=PM.embeds[0].colour)\n await PM.edit(embed=e)\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n\n else:\n await ctx.send(\"Menu not found in this server, double check if the id was entered correctly\")", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def consume_user_message(self, message):\n pass", "async def update_reactions(self, message, data):\n\n emojis = []\n\n for role in data.roles.all():\n if role.emoji.startswith(\":\") and role.emoji.endswith(\":\"):\n em = discord.utils.get(message.guild.emojis, name=role.emoji[1:-1])\n emojis.append(em)\n else:\n emojis.append(role.emoji)\n\n for emoji in emojis:\n await message.add_reaction(emoji)\n\n for reaction in message.reactions:\n if reaction.emoji not in emojis:\n await message.clear_reaction(reaction.emoji)", "def createReaction(self):\n return _libsbml.Model_createReaction(self)", "def Message(self, *args, **kwargs):\n pass", "def reply(self, message):\n self.logger.info(\"message came as {}\".format(message))\n message = message.lower()\n if message in [\"start over\", \"get started\", \"hello\", \"hi\", \"say hello\"]:\n self.params = \"\"\n self.readyseteatparams = \"\"\n # self.api.send_text_facebook(\n # self.user_id,\n # 'What type of recipe would you like to make? You can type \"start over\" at any time'\n # )\n # return self.api.send_facebook(self.user_id, self.config.QUESTION_MAIN)\n self.send_welcome_messages()\n return self.api.send_facebook(self.user_id, self.config.QUICK_REPLY_MAIN)\n if message in [\"more\", \"show more\"] and self.data:\n self.index += 5\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n return self.api.send_facebook(self.user_id, m_data)\n if message == \"ask-tomorrow-payload\":\n self.usersModule.makeNotificationDaily(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"This notification has been set up.\")\n if message == \"ask-week-payload\":\n self.usersModule.makeNotificationWeekly(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"This notification has been set up.\")\n if message == \"activate notifications\":\n self.usersModule.makeNotificationDaily(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"Notification has been activated.\")\n if message in [\"do-nothing\", \"payload_unsubscribe\"]:\n if message == \"payload_unsubscribe\":\n self.usersModule.deactivateNotification(self.user_id)\n return self.api.send_text_facebook(\n self.user_id,\n 'Notification has been deactivated. You can type \"start over\" anytime.')\n else:\n return self.api.send_text_facebook(\n self.user_id,\n 'You can type \"start over\" when you are looking for new recipes.')\n\n try:\n title, choice = message.split(\"_\")\n except:\n title = None\n choice = message\n\n if title == \"category\":\n self.params = \"\"\n self._type = choice\n if choice == \"dinner\":\n self.params += \"&category=89\"\n self.readyseteatparams += \"&category=89\"\n # self.api.send_text_facebook(self.user_id, \"Select a main ingredient:\")\n # return self.api.send_facebook(self.user_id, self.config.DINNER_INGREDIENTS)\n return self.api.send_facebook(self.user_id, self.config.DINNER_GUICK_REPLY)\n elif choice == \"dessert\":\n self.params += \"&category=88\"\n self.readyseteatparams += \"&category=88\"\n # self.api.send_text_facebook(self.user_id, \"What kind of dessert would you like to make?\")\n # return self.api.send_facebook(self.user_id, self.config.DESSERTS)\n return self.api.send_facebook(self.user_id, self.config.DESSERTS_QUICK_REPLY)\n elif choice == \"breakfast\":\n self.params += \"&category=87\"\n self.readyseteatparams += \"&category=87\"\n # self.api.send_text_facebook(self.user_id, \"What kind of breakfast do you want?\")\n # return self.api.send_facebook(self.user_id, self.config.BREAKFAST_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.BREAKFAST_QUICK_REPLY)\n elif choice == \"appetizer\":\n self.params += \"&category=85\"\n self.readyseteatparams += \"&category=85\"\n # self.api.send_text_facebook(self.user_id, \"What kind of appetizer or snack sounds good?\")\n # return self.api.send_facebook(self.user_id, self.config.APPETIZER_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.APPETIZER_QUICK_REPLY)\n elif choice == \"side dish\":\n self.params += \"&category=95\"\n self.readyseteatparams += \"&category=95\"\n # self.api.send_text_facebook(self.user_id, \"Select a main ingredient\")\n # return self.api.send_facebook(self.user_id, self.config.SIDE_DISH_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.SIDE_DISH_QUICK_REPLY)\n else:\n return self.api.send_text_facebook(self.user_id,\n \"I don't know answer that belongs to {} yet\".format(message))\n\n if title == \"main-ingredient\":\n self.mainIngredient = choice\n if choice == \"chicken\":\n self.params += \"&mainingredient=76\"\n self.readyseteatparams += \"&mainingredient=76\"\n elif choice == \"beef\":\n self.params += \"&mainingredient=70\"\n self.readyseteatparams += \"&mainingredient=70\"\n elif choice == \"pork\":\n self.params += \"&mainingredient=249\"\n self.readyseteatparams += \"&mainingredient=249\"\n elif choice == \"seafood\":\n self.params += \"&mainingredient=73\"\n self.readyseteatparams += \"&mainingredient=73\"\n elif choice == \"pasta\":\n self.params += \"&mainingredient=272\"\n self.readyseteatparams += \"&mainingredient=272\"\n elif choice == \"vegetarian\":\n self.params += \"&lifestyle=299\"\n self.readyseteatparams += \"&lifestyle=299\"\n return self.api.send_facebook(self.user_id, self.config.TIME_QUICK_REPLY)\n if title == \"bre-time\":\n self.breakfastTime = choice\n if choice == \"15\":\n self.params += \"&totaltime=15\"\n self.readyseteatparams += \"&totaltime=15\"\n elif choice == \"30\":\n self.params += \"&totaltime=30\"\n self.readyseteatparams += \"&totaltime=30\"\n elif choice == \"45\":\n pass\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n if title == \"time\":\n self.time = choice\n self.params += \"&totaltime={}\".format(choice)\n self.readyseteatparams += \"&totaltime={}\".format(choice)\n # self.api.send_text_facebook(self.user_id, \"What sounds Good?\")\n # return self.api.send_facebook(self.user_id, self.config.REGION_DINNER_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.REGION_QUICK_REPLY)\n\n if title == \"region\":\n self.region = choice\n if choice == \"asian\":\n self.params += \"&cuisine=44\"\n self.readyseteatparams += \"&cuisine=44\"\n elif choice == \"italian\":\n self.params += \"&cuisine=46\"\n self.readyseteatparams += \"&cuisine=46\"\n elif choice == \"mediterranean\":\n self.params += \"&cuisine=367\"\n self.readyseteatparams += \"&cuisine=367\"\n elif choice == \"mexican\":\n self.params += \"&cuisine=45\"\n self.readyseteatparams += \"&cuisine=45\"\n elif choice == \"american\":\n self.params += \"&suppresstraits=44,35,355,46,367,45,356,261\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"dessert\":\n self.dessert = choice\n if choice == \"cookies\":\n self.params += \"&trait=48,10,20,110&suppresstraits=22,24&keywords=cookies\"\n self.readyseteatparams += \"&trait=48,10,20,110&keywords=cookies\"\n elif choice == \"cakes\":\n self.params += \"&suppresstraits=24&keywords=cake\"\n self.readyseteatparams += \"&keywords=cake\"\n elif choice == \"pies\":\n self.params = \"sortby=season,rating&order=desc,desc&negativeingredientkeyword=pieces&keywords=pie&suppresstraits=24&category=88\"\n self.readyseteatparams = \"&negativeingredientkeyword=pieces&keywords=pie&category=88\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=257&goodforyou=258&goodforyou=260\"\n self.readyseteatparams += \"&goodforyou=257&goodforyou=258&goodforyou=260\"\n elif choice == \"seasonal\":\n self.params = \"sortby=season,newest,rating,publisheddate&order=desc,desc,desc,desc&category=88&season=330\"\n self.readyseteatparams = \"&category=88&season=330\"\n elif choice == \"quick\":\n self.params = \"&totaltime=30\"\n self.readyseteatparams = \"&totaltime=30\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"breakfast\":\n self.breakfastIngredient = choice\n if choice == \"eggs\":\n self.params += \"&mainingredient=72\"\n self.readyseteatparams += \"&mainingredient=72\"\n self.params += \"&trait=9\"\n self.readyseteatparams += \"&trait=9\"\n elif choice == \"casserole\":\n self.params += \"&keywords=casserole\"\n self.readyseteatparams += \"&keywords=casserole\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=260&goodforyou=258\"\n self.readyseteatparams += \"&goodforyou=260&goodforyou=258\"\n elif choice == \"sweet\":\n self.params += \"&trait=22\"\n self.readyseteatparams += \"&trait=22\"\n # will add something sweet\n pass\n return self.api.send_facebook(self.user_id, self.config.BREAKFAST_TIME_QUICK_REPLY)\n\n if title == \"appetizer\":\n self.appetizerIng = choice\n if choice == \"cheesy\" or choice == \"meaty\":\n if choice == \"cheesy\":\n self.params += \"&keywords=cheese\"\n self.readyseteatparams += \"&keywords=cheese\"\n elif choice == \"meaty\":\n self.params += \"&mainingredient=70&mainingredient=76&mainingredient=249\"\n self.readyseteatparams += \"&mainingredient=70&mainingredient=76&mainingredient=249\"\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n elif choice == \"veggies\" or choice == \"healthier\":\n if choice == \"veggies\":\n self.params += \"&mainingredient=77&mainingredient=310\"\n self.readyseteatparams += \"&mainingredient=77&mainingredient=310\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=260\"\n self.readyseteatparams += \"&goodforyou=260\"\n return self.api.send_facebook(self.user_id, self.config.HOT_OR_COLD_QUICK_REPLY)\n\n if title == \"hot-cold\":\n self.appetizerType = choice\n if choice == \"hot\":\n self.params += \"&suppresstraits=252\"\n elif choice == \"cold\":\n self.params += \"&cookingmethod=252\"\n self.readyseteatparams += \"&cookingmethod=252\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"side-dish\":\n self.sideDish = choice\n if choice == \"potato\":\n self.params += \"&mainingredient=298\"\n self.readyseteatparams += \"&mainingredient=298\"\n elif choice == \"vegetable\":\n self.params += \"&mainingredient=77\"\n self.readyseteatparams += \"&mainingredient=77\"\n elif choice == \"rice\":\n self.params += \"&mainingredient=272\"\n self.readyseteatparams += \"&mainingredient=272\"\n elif choice == \"pasta\":\n self.params += \"&mainingredient=75\"\n self.readyseteatparams += \"&mainingredient=75\"\n elif choice == \"salad\":\n self.params = \"sortby=season,newest,rating,publisheddate&order=desc,desc,desc,desc&category=95&mainingredient=77\"\n self.readyseteatparams = \"&category=95&mainingredient=77&trait=92\"\n elif choice == \"beans\":\n self.params += \"&mainingredient=310\"\n self.readyseteatparams += \"&mainingredient=310\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n isParamInMessage = self.fetch_parameters(message)\n if isParamInMessage:\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n return self.api.send_text_facebook(self.user_id, \"You can write ‘start over’ to go to the first step\")", "async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):\n\n # Skipping bot reactions\n if payload.member.bot:\n return\n\n # Search the event by message id\n message_id = payload.message_id\n channel_id = payload.channel_id\n guild_id = payload.guild_id\n event_type = connector.getEventTypeByMessage(guild_id, message_id, channel_id)\n if event_type is None:\n return\n\n role = discord.utils.get(payload.member.guild.roles, id=event_type.role_id)\n\n if (payload.event_type == \"REACTION_ADD\") and (event_type.emoji == str(payload.emoji)):\n # Adding role to user\n await payload.member.add_roles(role)", "async def redo(self, ctx: commands.Context):\n ref = ctx.message.reference\n if not ref:\n return\n try:\n message = await ctx.channel.fetch_message(ref.message_id)\n except NotFound:\n return await ctx.reply(\"Couldn't find that message\")\n if message.author != ctx.author:\n return\n await self.bot.process_commands(message)", "def addReactionGlyph(self, *args):\n return _libsbml.Layout_addReactionGlyph(self, *args)", "def cast(self, message):\n self._mailbox.append(message)\n # if this is the only message, the coro could be waiting\n if len(self._mailbox) == 1:\n self._event.send()", "async def message_edit_button(self, payload: discord.RawReactionActionEvent) -> None:\n\n self.bits = flip_action_bits(LoggingActions.MESSAGE_EDIT, self.bits)\n await self.update_embed()", "async def reactions(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"reactions\")", "def setReaction(self, *args):\n return _libsbml.FluxBound_setReaction(self, *args)", "def OnChanActionMessage(self, msg):\n self.handle_inbound_irc_msg(\"OnChanActionMessage\", msg)\n return znc.CONTINUE", "def RIPReaction(sc, event):\n sc.api_call('reactions.add', as_user='true', channel=event['channel'],\n timestamp=event['ts'], name='rip')", "def message_callback(self, message):\n pass", "async def handle_role_reaction_press(interaction: disnake.MessageInteraction):\n if interaction.message not in await ReactionRoleMessage.get_all():\n return\n\n role_id = int(interaction.component.custom_id)\n member: disnake.Member = interaction.author\n user = await User.get(member.id)\n role = member.get_role(role_id)\n if role:\n await member.remove_roles(role, reason=\"Reaction Role Message\")\n await send_message(user=user, key=\"role_removed\", inter=interaction, ephemeral=True)\n else:\n role = interaction.guild.get_role(role_id)\n if role:\n try:\n await member.add_roles(role, reason=\"Reaction Role Message\")\n await send_message(user=user, key=\"role_added\", inter=interaction, ephemeral=True)\n except disnake.errors.Forbidden as e:\n await send_message(user=user, key=\"no_permissions\", inter=interaction, ephemeral=True)\n else:\n await send_message(user=user, key=\"role_not_found\", inter=interaction, ephemeral=True)", "def process_message(self, message):\n self.post_to_redis(message)\n return", "def received_message(self, m):\n self.receiver.handle_message(m)", "def _process_msg(cls, msg):\n raise NotImplementedError", "def receive(self, msg):\n pass", "async def on_raw_reaction_remove(self, payload):\n\n\t\tguild = self.bot.get_guild(payload.guild_id)\n\t\tif guild is not None:\n\t\t\t# Update reaction leaderboards\n\t\t\treactionLeaderboard = self.leaderboards[str(payload.guild_id)][\"reactionLeaderboard\"]\n\n\t\t\tif payload.emoji.id is not None:\n\t\t\t\tfor guildEmoji in guild.emojis:\n\t\t\t\t\tif payload.emoji.id == guildEmoji.id:\n\t\t\t\t\t\treactionLeaderboard[\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\"] -= 1\n\t\t\t\t\t\tbreak\n\n\t\t\telse:\n\t\t\t\treactionLeaderboard[str(payload.emoji.name)] -= 1\n\n\t\t\tif str(payload.emoji.id) in self.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"]:\n\t\t\t\tself.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"][str(payload.emoji.id)] -= 1", "def _message(self, msg):\n\n self.log('Message received:', msg['body'], pretty=True)\n\n if msg['type'] in ('chat', 'normal'):\n body = str(msg['body'])\n if body.startswith('/'):\n cmd, arg_string = body.split(' ', maxsplit=1)\n cmd = cmd.lstrip('/')\n\n if arg_string:\n args = arg_string.split(' ')\n else:\n args = None\n\n self.log('IRC remote command received:', cmd, args)\n return\n else:\n if True:\n msg.reply(\"Sorry, I did not understand that:\\n%s\" % body).send()", "def handle(self, message: discord.Message, intent: Intent) -> Optional[str]:\n pass", "def msg(self, chan, msg):\n self._msg(chan, msg)", "def handleMessage(msg):", "def dispatch_message(self, message):\n\n self.log.debug(\"Incoming message %r\", message)\n if message.code.is_request():\n # Responses don't get deduplication because they \"are idempotent or\n # can be handled in an idempotent fashion\" (RFC 7252 Section 4.5).\n # This means that a separate response may get a RST when it is\n # arrives at the aiocoap client twice. Note that this does not\n # impede the operation of observations: Their token is still active\n # so they are ACK'd, and deduplication based on observation numbers\n # filters out the rest.\n #\n # This saves memory, and allows stateful transports to be shut down\n # expeditiously unless kept alive by something else (otherwise,\n # they'd linger for EXCHANGE_LIFETIME with no good reason).\n if self._deduplicate_message(message) is True:\n return\n\n if message.mtype in (ACK, RST):\n self._remove_exchange(message)\n\n if message.code is EMPTY and message.mtype is CON:\n self._process_ping(message)\n elif message.code is EMPTY and message.mtype in (ACK, RST):\n pass # empty ack has already been handled above\n elif message.code.is_request() and message.mtype in (CON, NON):\n # the request handler will have to deal with sending ACK itself, as\n # it might be timeout-related\n self._process_request(message)\n elif message.code.is_response() and message.mtype in (CON, NON, ACK):\n success = self._process_response(message)\n if success:\n if message.mtype is CON:\n self._send_empty_ack(message.remote, message.mid, reason=\"acknowledging incoming response\")\n else:\n # A peer mustn't send a CON to multicast, but if a malicious\n # peer does, we better not answer\n if message.mtype == CON and not message.remote.is_multicast_locally:\n self.log.info(\"Response not recognized - sending RST.\")\n rst = Message(mtype=RST, mid=message.mid, code=EMPTY, payload='')\n rst.remote = message.remote.as_response_address()\n self._send_initially(rst)\n else:\n self.log.info(\"Ignoring unknown response (which is not a unicast CON)\")\n else:\n self.log.warning(\"Received a message with code %s and type %s (those don't fit) from %s, ignoring it.\", message.code, message.mtype, message.remote)", "def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)", "def addReaction(self, *args, **kwargs):\n if isinstance(args[0], Reaction):\n reaction = args[0]\n else:\n reaction = self._sim.reaction(*args, **kwargs)\n\n self._sim.assignReaction(reaction, self)\n return self", "def message_routed(self, message):\n \n # Send it through the transport\n self.send_message(message = message)", "async def _on_raw_reaction(\n self,\n payload: RawReactionActionEvent,\n reaction_type: EnumReactionType,\n ) -> None:\n if self.__is_self(payload.user_id):\n print(\"reaction added by the bot itself\")\n return\n\n guild = self._client.get_guild(payload.guild_id)\n\n if reaction_type == EnumReactionType.ADD:\n await self.__roles.add_role(\n guild, payload.message_id,\n payload.emoji, payload.user_id,\n\n )\n elif reaction_type == EnumReactionType.REMOVE:\n await self.__roles.remove_role(\n guild, payload.message_id,\n payload.emoji, payload.user_id,\n )\n else:\n raise InvalidReactionType", "def handle_message(self, message):\n print \"[WARNING] No message handling implemented!\"", "def dispatch(self, message):\n data = ujson.loads(message)\n command = data.get(\"command\", \"no command field!\")\n if command in self._command_hash_views:\n self._command_hash_views[command](self, data)\n else:\n # handler.send(\"404 Error\")\n logger.warning(\"[Local] System don't understand command[%s]\" % command)", "async def send_react(self, reactions, *args, **kwargs):\n message = await self.send(*args, **kwargs)\n if isinstance(reactions, str): # Handle two-character emojis\n reactions = (reactions,)\n for reaction in reactions:\n await self.add_reaction(message, reaction)\n return message", "async def redaction_state(message: Message, state: FSMContext):\n await state.finish()\n await message.bot.send_message(\n chat_id=message.bot.config.REDACTION_CHAT,\n **MessageFromUser(message.from_user, message.text).as_dict()\n )\n await message.answer(**MessageForwarded('редакции').as_dict())\n await message.answer(**Start(message.from_user.first_name).as_dict())", "async def addreact(self, ctx, word, emoji):\n guild = ctx.message.guild\n message = ctx.message\n emoji = https://i.imgur.com/CWeQ620.jpg", "def reply(cls, user, context, message, reply_message):\r\n pass", "def processMessage(self, *args, **kwargs):\r\n pass", "async def on_raw_reaction_add(self, payload):\n\n # Don't accept DMs\n if not payload.guild_id:\n return\n\n # Ignore Bot\n if payload.user_id == self.bot.user.id:\n return\n\n if payload.emoji.name not in {'\\U00002705', '\\U0000274C', '\\U0001FA91'}: # Green Check, X, Chair\n return\n\n user = await self.bot.fetch_user(payload.user_id)\n if user.bot:\n return\n\n # U+2705 (:white_check_mark: ), U+2611(:ballot_box_with_check:) ,U+1FA91(:chair:),\n # U+1F1FD(:regional_indicator_x:), U+1F1E7(:regional_indicator_b:), U+274C(:x:)\n\n # Is this ID attached to a raid message? (Also technically checks if this is the right channel)\n message_id = payload.message_id\n s = search_format('messageID', 'equals', str(message_id))\n s = \"\".join(str(s).split())\n r = requests.get(self.bot.raidAPI + '?constraints=[' + s + ']')\n raidData = r.json()['response']['results']\n\n if raidData:\n raid_id = raidData[0]['_id']\n else:\n print(\"User liked a post that isn't a raid\" + payload.member.name + '#' + str(payload.member.discriminator))\n return # Returns if messageID isn't attached to a Raid in DB\n\n # UserName Checks\n discord_name = payload.member.name\n discord_suffix = payload.member.discriminator\n discord_full = quote(discord_name + ' #' + discord_suffix)\n\n s = search_format('DiscordID', 'equals', discord_full)\n s = \"\".join(str(s).split())\n r = requests.get(self.bot.discordAPI + '?constraints=[' + s + ']')\n userData = r.json()['response']['results']\n\n if userData:\n RR_id = userData[0]['UserID']\n else:\n # DMs User to update RR account\n dmchannel = await payload.member.create_dm()\n print(\"This user liked a post and was told he wasn't signed up:\" + discord_name + '%20%23' + str(\n discord_suffix) + ', Full:' + discord_full)\n await dmchannel.send(\n \"Error! Please Link Your Discord Account to ReadyRaider Here: https://www.readyraider.com/profile2\")\n\n # Removes Wrong Reaction\n channel = self.bot.get_channel(payload.channel_id)\n msg = await channel.fetch_message(message_id)\n user = await self.bot.fetch_user(payload.user_id)\n await msg.remove_reaction(payload.emoji.name, user)\n return\n\n if payload.emoji.name == '\\U00002705': # GREEN CHECK\n signAPI = self.bot.signAPI\n\n elif payload.emoji.name == '\\U0000274C': # 'X'\n signAPI = self.bot.declineAPI\n\n elif payload.emoji.name == '\\U0001FA91': # CHAIR\n signAPI = self.bot.benchAPI\n else:\n signAPI = self.bot.declineAPI\n\n headers = {\"Authorization\": \"Bearer \" + self.bot.api_key}\n body = {\"rid\": str(raid_id), \"raider\": str(RR_id)}\n requests.post(signAPI, data=body, headers=headers)\n\n s = search_format('messageID', 'equals', str(message_id))\n s = \"\".join(str(s).split())\n r = requests.get(self.bot.raidAPI + '?constraints=[' + s + ']')\n raidData = r.json()['response']['results']\n await self.raidUpdate(raidData[0], payload.channel_id, payload.guild_id)", "async def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent):\n\n guild = self.bot.get_guild(payload.guild_id)\n user = guild.get_member(payload.user_id)\n # Skipping bot reactions\n if user.bot:\n return\n\n # Search the event by message id\n message_id = payload.message_id\n channel_id = payload.channel_id\n guild_id = payload.guild_id\n event = connector.getEventByMessage(guild_id, message_id, channel_id)\n if event is None:\n return\n\n emoji_id = str(payload.emoji).split(':')[2][:-1]\n role = discord.utils.get(guild.roles, id=event.role_id)\n if (payload.event_type == \"REACTION_REMOVE\") and (event.emoji.split(':')[2][:-1] == emoji_id) and (role in user.roles):\n # Remove role from user\n await user.remove_roles(role)", "def receive_message(self, context, message):\r\n pass", "def handle_msg(self, state_id, msg):\n pass", "async def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent):\n\n guild = self.bot.get_guild(payload.guild_id)\n user = guild.get_member(payload.user_id)\n # Skipping bot reactions\n if user.bot:\n return\n\n # Search the event by message id\n message_id = payload.message_id\n channel_id = payload.channel_id\n guild_id = payload.guild_id\n event_type = connector.getEventTypeByMessage(guild_id, message_id, channel_id)\n if event_type is None:\n return\n\n emoji_id = str(payload.emoji).split(':')[2][:-1]\n role = discord.utils.get(guild.roles, id=event_type.role_id)\n if (payload.event_type == \"REACTION_REMOVE\") and (event_type.emoji.split(':')[2][:-1] == emoji_id) and (role in user.roles):\n # Remove role from user\n await user.remove_roles(role)", "async def on_raw_reaction_add(self, payload):\n\n # exclude all reactions which are not the original message\n if str(payload.message_id) != self.message_id:\n return\n\n # exclude the bot\n if payload.user_id == self.bot.user.id:\n return\n\n else:\n # get the model data for the role assigner object\n data = await self.get_objects(\n model=RoleAssigner, filter={\"bot__name\": str(self.bot_name)}\n )\n\n # role assigner object\n data = data[0]\n\n guild = self.get_guild(guild_id=payload.guild_id)\n\n user = self.get_user(guild=guild, user_id=payload.user_id)\n\n for db_role in data.roles.all():\n\n if db_role.emoji.startswith(\":\") and db_role.emoji.endswith(\":\"):\n\n ce = db_role.emoji[1:-1]\n\n else:\n ce = db_role.emoji\n\n if str(payload.emoji.name) == str(ce):\n\n role = self.get_role(guild, int(db_role.uid))\n\n if user not in role.members:\n\n await user.add_roles(role)\n\n print(\"Added \" + str(user) + \" to role: \" + str(role) + \"!\")\n\n else:\n print(\n \"User \" + str(user) + \" already in role: \" + str(role) + \"!\"\n )\n\n pass", "def dispatch_message(self, addr, message_dict, kind):\n try:\n yield from self.dispatcher.dispatch_message(addr, message_dict, kind)\n except Exception as e:\n self.logger.error(\n \"Failed to dispatch mochad message {}: {}\".format(\n message_dict, e))", "def process(self, message: Message, **kwargs: Any) -> None:", "def handle_message(self, data, channel):\n pass", "def handle_message(self, msg):\n self.messages.append({\n 'type': msg.category,\n 'module': msg.module,\n 'obj': msg.obj,\n 'line': msg.line,\n 'column': msg.column,\n 'path': msg.path,\n 'symbol': msg.symbol,\n 'message': msg.msg,\n 'message-id': msg.msg_id,\n })", "async def ri(self, ctx: commands.Context, *, msg: str):\n\n def mapper(s: str):\n if s.startswith('<'):\n return s\n return '\\u200b'.join(self.characters.get(c.lower(), c) for c in s)\n\n strings = re.split(r\"(<\\S*>)\", msg)\n\n new_msg = ''.join(map(mapper, strings))\n\n await ctx.message.edit(content=new_msg)", "def receiveMessage(self, user, message):\n pass", "def process(self, message: Message, **kwargs: Any) -> None:\n pass", "def message(self, msg):\n if (AZMessage.is_agilezen_xmpp_message(msg)):\n try:\n az_message = AZMessage(msg)\n except (MessageCreationException, api.APIException) as ex:\n print ex\n return None\n for handler in self.handlers:\n handler.handle(az_message)", "def reply(cls, user, context, message, reply_message):\n pass", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def acknowledgement(self, message: Message[ValueType]):", "def handle_message(event):\n intention = parse_intention(event.message.text)\n if intention == config.QUERY_INTENTION:\n handle_query_weather_message(event)\n elif intention == config.SUBSCRIBE_INTENTION:\n handle_subscribe_message(event)\n else:\n handle_unknown_message(event)", "def message(self, msg):\n self._message = msg", "def send(self, msg):\n return self._channel_action(msg, 1)", "def notify(cls, self, message):\n pass", "async def on_reaction_add(reaction, user):\n if reaction.message.content.startswith('http'):\n curator = re.sub(r'\\d|\\W|(TravelFeed)','',str(user),re.IGNORECASE|re.DOTALL)\n if not user.id in discordcuratorlist and not user.id == botid:\n \"\"\"Checks if user who added reaction is a curator\"\"\"\n await loop.create_task(send_discord(\"Curator unauthorised: \"+curator, logchannel))\n return\n else:\n author, permlink = resolve_authorperm(reaction.message.content)\n post = Comment(construct_authorperm(author, permlink))\n if reaction.emoji == '🌍':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"tf100\", curator, reaction.message))\n elif reaction.emoji == '🌐': \n await bot.add_reaction(reaction.message, \"⏳\") \n actionqueue.put(Post_Action(post, \"tf50\", curator, reaction.message))\n elif reaction.emoji == '👥':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"coop100\", None, reaction.message))\n elif reaction.emoji == '👋':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"ad10\", curator, reaction.message))\n elif reaction.emoji == '📏':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"short0\", None, reaction.message))\n elif reaction.emoji == '🇬🇧':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"lang0\", None, reaction.message))\n elif reaction.emoji == '📝':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"copyright0\", None, reaction.message))", "async def run(self, message: discord.Message) -> None:\n await message.edit(content=self.current(), view=self)", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def handle_message(self, message):\n\n\t\tself.console.handle_message(message)", "def message_unreact(channel, message, reaction):\n\n if not settings.SLACK_TOKEN:\n return {'ok': False, 'error': 'config_error'}\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.reactions_remove(channel=channel, timestamp=message, name=reaction)\n assert response['ok'] is True\n return response\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response", "def comsume_msg(self, msg_type):" ]
[ "0.6869177", "0.6487918", "0.63162875", "0.6300903", "0.6280189", "0.6270176", "0.6200644", "0.61323017", "0.613172", "0.6130621", "0.6107559", "0.6087531", "0.6074806", "0.60717124", "0.6063858", "0.6055714", "0.60538733", "0.6034891", "0.6023585", "0.60027444", "0.5996093", "0.5994024", "0.5983939", "0.5955966", "0.59559005", "0.59528965", "0.5919636", "0.59173197", "0.59153545", "0.5893467", "0.5853518", "0.5847134", "0.58343136", "0.5822588", "0.5812419", "0.58028793", "0.5793747", "0.57673234", "0.5759072", "0.57562727", "0.57558304", "0.5752882", "0.57432514", "0.57429415", "0.57425314", "0.5738211", "0.5734135", "0.5724122", "0.57147735", "0.5711033", "0.5708248", "0.5693282", "0.56899136", "0.5687087", "0.568562", "0.5674516", "0.5672874", "0.56721526", "0.5657641", "0.564565", "0.56398714", "0.5639739", "0.5629927", "0.5625185", "0.56209594", "0.5620075", "0.5619464", "0.56136143", "0.5612525", "0.56087995", "0.560568", "0.56039405", "0.56023073", "0.5599863", "0.55955523", "0.5590654", "0.5567404", "0.5561185", "0.55556107", "0.554391", "0.55382216", "0.5532761", "0.5531185", "0.55205005", "0.5515751", "0.5512881", "0.5505076", "0.5505076", "0.5496806", "0.5480437", "0.5468919", "0.54595953", "0.54583406", "0.5452015", "0.5451227", "0.54502505", "0.54473746", "0.54353356", "0.54326147", "0.543206" ]
0.55897665
76
Implement the handler teardown.
def teardown(self) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_teardown(self):\n with pytest.raises(NotImplementedError):\n self.handler.teardown()", "def _teardown(self):\n # No-op base implementation", "def teardown(self, event):\n pass", "def cleanup(self) -> None:\n self.handler.cleanup()\n super().cleanup()", "def teardown(self,**kwargs):\n pass", "def teardown(self) -> None:\n pass", "def teardown(self) -> None:\n pass", "def teardown(self):\n raise NotImplementedError", "def teardown(self):\n raise NotImplementedError", "def teardown(self):\n pass", "def teardown(self):\n pass", "def teardown(self):\n pass", "def teardown(self):\n pass # pylint: disable=unnecessary-pass", "def test_teardown(self):\n assert self.http_handler.teardown() is None\n self.assert_quantity_in_outbox(0)", "def teardown(self):", "def teardown(self):", "def teardown(self):", "def teardown(self, rc):\n pass", "def teardown_class(self):\n pass", "def teardown_method(self):", "def teardown(self):\n self.tcex.log.trace('teardown')", "def teardown(self, exception):", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown(self):\n # self.in_kwargs, self.ref_time,\n del self.warn_msgs, self.war\n return", "def teardown_provider(self):\n pass", "def teardown(self):\n try:\n self.loop.run_until_complete(self.webhook_connection.disconnect())\n except Exception:\n print_exc()\n raise", "def teardown(self):\n if self.ae:\n self.ae.shutdown()", "def teardown(self):\n\n self.dummy.set_current()\n self.endpoints.lock()", "def tearDown(self):\n super(TestCase, self).tearDown()\n self._context.check_done()", "def tearDown(self):\n\n self._tear_down()", "def __exit__(self, *args):\n if self.teardown:\n super().__exit__(*args)", "def tearDown(self):\n del self.output\n del self.input_stream\n del self.error_listener", "def teardown_method(self, method) -> None:", "def cleanup(self):\r\n logging.info(\"entered the cleanup\")", "def teardown(self):\n del self.testInst, self.dname\n\n return", "def teardown(self):\n\n self._periodic_refresh_subs.stop()\n self._interaction_subscriber.dispose()\n\n yield None", "def teardown(self, log, info):\n raise NotImplementedError", "def teardown_method(self, test_method):\n self.wo_obj = None\n self.config_data = None", "def teardown_method(self):\n world.clear_paths()\n print(\"\\nEnd of tests in: %s\\n-------------------\\n\" % __name__)\n self.bigml = {}", "def teardown(self, closer):\n def actual_closer(exception):\n value = self._value\n if value is not None:\n closer(value)\n APP.teardown_appcontext(actual_closer)", "def teardown(self):\n self._loop.stop()\n self._loop.close()\n super().teardown()", "def cleanup(self, *args, **kwargs):", "def teardown():\n\n self.zorp_mock.stop()", "def cleanup (self):\n pass", "def tearDown(self):\n self.segment = None", "def cleanup(self):\r\n pass", "def __del__(self):\n AppHelper.stopEventLoop()", "def __del__(self):\n AppHelper.stopEventLoop()", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):\r\n pass", "def cleanup(self):\r\n pass", "def tearDown(self):\n pass #because we dont have anything to tearDown.", "def cleanup(self):\r\n print(\"Cleanup not implemented\")", "def teardown_method(self, method):\n pass", "def teardown_method(self, method):\n pass", "def tearDown(self):\n pass\n # teardown called after each test\n # e.g. maybe write test results to some text file", "def teardown(self):\n storage.close()", "def teardown(self):\n storage.close()", "def teardown(self):\n storage.close()", "def teardown(self):\n storage.close()", "def on_cleanup(self):\n raise NotImplementedError", "def tearDown(self):\n self.teardown_beets()", "def teardown_method(self):\n self.hass.stop()", "def teardown_method(self):\n self.hass.stop()", "def teardown_method(self):\n self.hass.stop()", "def tearDown(self):\n self.api_context.pop()\n self.api_test_client = None", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def _tear_down():\n repl._tearDown = self.tearDown", "def teardown(self):\n\n del self.testInst, self.test_bins, self.test_label, self.test_data\n del self.out_keys, self.out_data\n\n return", "def cleanup(self):\n\n pass", "def teardown_class(self):\n self._tester = None\n self._sut = None", "def tearDown(self):\n\n BaseTest.tearDown(self)", "def tearDown(self):\n print('Calling \\'tearDown\\'')", "def dm_teardown(self):\n try:\n dispatcher.disconnect(\n self.dequeue_next_page_requests,\n signal=signals.spider_idle\n )\n except DispatcherKeyError:\n pass", "def cleanup(self):\n raise NotImplementedError", "def tearDownClass(cls):\n cls.context.close()", "def tearDown(self):\n test_env_teardown()", "def tearDown(self):\n self.tmp.cleanup()", "def tearDown(self):\n self.logger.info(\"tearDown begin\")\n self.logger.info(\"tearDown end\\n\")", "def teardown(self):\n del self.testInst, self.dname, self.test_val_length\n\n return", "def teardown(self):\n del self.testInst, self.dname, self.test_val_length\n\n return" ]
[ "0.8065864", "0.7863261", "0.78516686", "0.7751276", "0.7695875", "0.76800025", "0.76800025", "0.76762605", "0.76762605", "0.76578486", "0.76578486", "0.76578486", "0.7586377", "0.75539047", "0.7453597", "0.7453597", "0.7453597", "0.7220783", "0.7112508", "0.70983917", "0.7087754", "0.7085874", "0.7001528", "0.7001528", "0.7001528", "0.7001528", "0.7001528", "0.7001528", "0.69789845", "0.6975809", "0.6935623", "0.69172424", "0.6860113", "0.6859062", "0.6837898", "0.68220747", "0.68152905", "0.68058115", "0.679552", "0.6787844", "0.6747079", "0.67388815", "0.6701475", "0.6698241", "0.66952205", "0.66891843", "0.6683073", "0.6676516", "0.6662325", "0.6656234", "0.6649907", "0.6636182", "0.6636182", "0.6635551", "0.6635551", "0.6635551", "0.6631788", "0.6631788", "0.660411", "0.6604081", "0.660382", "0.660382", "0.6597178", "0.65948063", "0.65948063", "0.65948063", "0.65948063", "0.6593608", "0.6587096", "0.6581607", "0.6581607", "0.6581607", "0.65734476", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6569569", "0.6565425", "0.6564269", "0.65548694", "0.65425795", "0.65370864", "0.6536476", "0.6530922", "0.652666", "0.65252733", "0.65201366", "0.6513735", "0.64996207", "0.64996207" ]
0.7647432
13
Implement the setup for the handler.
def setup(self) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self, *args, **kwargs):\n pass", "def _setup(self) -> None:\n\t\treturn", "def setUp(self):\n\n installHandler()", "def setUp(self):\n\n installHandler()", "def setup(self,**kwargs):\n pass", "def setup(self):\n raise NotImplementedError(\"Need to be implemented in subclasses\")", "def setup(cls):\n super().setup()\n cls.http_handler = cast(\n HttpHandler, cls._skill.skill_context.handlers.http_handler\n )\n cls.logger = cls._skill.skill_context.logger\n\n cls.http_dialogues = cast(\n HttpDialogues, cls._skill.skill_context.http_dialogues\n )\n\n cls.get_method = \"get\"\n cls.post_method = \"post\"\n cls.url = \"some_url\"\n cls.version = \"some_version\"\n cls.headers = \"some_headers\"\n cls.body = b\"some_body\"\n cls.sender = \"fetchai/some_skill:0.1.0\"\n cls.skill_id = str(cls._skill.skill_context.skill_id)\n\n cls.status_code = 100\n cls.status_text = \"some_status_text\"\n\n cls.content = b\"some_content\"\n cls.list_of_messages = (\n DialogueMessage(\n HttpMessage.Performative.REQUEST,\n {\n \"method\": cls.get_method,\n \"url\": cls.url,\n \"version\": cls.version,\n \"headers\": cls.headers,\n \"body\": cls.body,\n },\n ),\n )", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def setup( self ):", "def _setup(self):", "def _setup(self):", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "async def setup(self):\n\t\tlogging.config.dictConfig(self.log_settings['log'])\n\t\tself.logger = logging.getLogger('Responder3')\n\t\tself.create_dir_strucutre()\n\n\t\tif 'handlers' in self.log_settings:\n\t\t\tasync for handlerclass, handler in self.get_handlers():\n\t\t\t\tawait self.start_extension(handlerclass, self.log_settings[self.log_settings['handlers'][handler]])", "async def setup(self, ctx):\n pass", "def setup(self) -> None:\n self.setup_logging()\n self.setup_plugins()\n self.post_setup()", "def setup(self) -> None:", "async def _setup(self):", "def _setup(self):\n raise NotImplementedError()", "def setUp(self):\n h = self.MyTestHandler()\n h.request = Request.blank('/rpc/')\n h.response = Response()\n self.handler = h", "def setup(self):\n pass", "def test_setup(self):\n with pytest.raises(NotImplementedError):\n self.handler.setup()", "def setup(self):\n\t\tpass", "async def _setup(self, *args, **kwargs):\n return self", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\r\n pass", "def setup(self):\n pass", "def setup(self):\n ...", "def setup(self,context,result):\n pass", "def __init__(self, handler):\n self.__handler = handler", "def setup(self):\n self.ae = None", "def setup(self, app_args):\n raise NotImplementedError", "def test_init_adds_handler(self):\n pass", "async def setup(self):\n pass", "def setup_class(cls):\n cls.handler = MyScaffoldHandler(\"handler\", SkillContext())", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):\n self.build_serverkeyhash()\n self.build_agent_pubkey()\n self.load_registration_key()", "def prepare(self):\n self.uri = self.request.uri\n self.path = self.request.uri.split('?')[0]\n self.method = self.path.split('/')[-1]\n self.default_methods = {}\n #\n # You can use the before_handler in a local controller to\n # process your own prepare stuff.\n # a common use case is to call: self.print_debug_info().\n # which then applies only to this specific handler.\n # \n before_handler = getattr(self, \"before_handler\", None)\n print(\"calling before_handler for \" + str(self.__class__))\n if callable(before_handler):\n before_handler()", "def post_setup(self, context):\n pass", "def setUp(self):\n self.simulation = FooSimulation(count=5, g=6, h=9, i=12)\n self.foo = Foo(simulation=self.simulation, name='foo', a=4, b=42, c=\"Hello\")\n self.handler = Handler(ProxyLock(self.simulation),\n ProxyLock(self.foo),\n [Attribute(\"count\"), Attribute(\"g\")],\n [Attribute('b')],\n [Attribute('a')])", "def setup(self):\n pass # pragma: no cover", "def setup(self):\n raise NotImplemented", "def setup_hooks(self):\n pass", "def setup(self, *args, **kwargs):\n return True", "def initialize(self, context: InitCommandContext) -> None:\n super().initialize(context)\n self.handler.initialize(context, self.logger)", "def prepare(self):\n return HandlerReady()", "def setup(self):\n self.log.debug('upm - in upm setup()')\n # Add resource setup code here", "def on_setup(self, request, trigger_context):\n raise NotImplementedError", "def setup_method(self):\n self.ae = None", "def setup(cls):\n super().setup()\n cls.search_behaviour = cast(\n GenericSearchBehaviour, cls._skill.skill_context.behaviours.search\n )\n cls.tx_behaviour = cast(\n GenericTransactionBehaviour, cls._skill.skill_context.behaviours.transaction\n )\n cls.strategy = cast(GenericStrategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger", "def _setup(app_obj):", "def __init__(self, handler, **kwargs):\n # Attributes used by the auth mixins (required).\n self.request = RequestAdapter(handler.request)\n self.settings = kwargs\n\n # Attributes used only internally by this class (specific to the webapp\n # implementation).\n self._request = handler.request\n self._response = handler.response", "def setup(self, ctxConfig, drvConfig):\n superClass.setup(self, ctxConfig, drvConfig)\n # TODO Your startup stuff here", "def setup(cls):\n cls.location = {\"longitude\": 0.1270, \"latitude\": 51.5194}\n cls.search_query = {\n \"search_key\": \"intro_service\",\n \"search_value\": \"intro_alice\",\n \"constraint_type\": \"==\",\n }\n cls.search_radius = 5.0\n cls.admin_host = \"127.0.0.1\"\n cls.admin_port = 8021\n cls.ledger_url = \"http://127.0.0.1:9000\"\n config_overrides = {\n \"models\": {\n \"strategy\": {\n \"args\": {\n \"location\": cls.location,\n \"search_query\": cls.search_query,\n \"search_radius\": cls.search_radius,\n \"admin_host\": cls.admin_host,\n \"admin_port\": cls.admin_port,\n \"ledger_url\": cls.ledger_url,\n }\n }\n },\n }\n\n super().setup(config_overrides=config_overrides)\n\n # behaviours\n cls.faber_behaviour = cast(\n FaberBehaviour,\n cls._skill.skill_context.behaviours.faber,\n )\n\n # dialogues\n cls.default_dialogues = cast(\n DefaultDialogues, cls._skill.skill_context.default_dialogues\n )\n cls.http_dialogues = cast(\n HttpDialogues, cls._skill.skill_context.http_dialogues\n )\n cls.oef_search_dialogues = cast(\n OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues\n )\n\n # handlers\n cls.http_handler = cast(HttpHandler, cls._skill.skill_context.handlers.http)\n cls.oef_search_handler = cast(\n OefSearchHandler, cls._skill.skill_context.handlers.oef_search\n )\n\n # models\n cls.strategy = cast(Strategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger\n\n # mocked objects\n cls.mocked_method = \"SOME_METHOD\"\n cls.mocked_url = \"www.some-url.com\"\n cls.mocked_version = \"some_version\"\n cls.mocked_headers = \"some_headers\"\n cls.body_dict = {\"some_key\": \"some_value\"}\n cls.body_str = \"some_body\"\n cls.body_bytes = b\"some_body\"\n cls.mocked_body_bytes = json.dumps(cls.body_str).encode(\"utf-8\")\n cls.mocked_query = Query(\n [Constraint(\"some_attribute_name\", ConstraintType(\"==\", \"some_value\"))],\n DataModel(\n \"some_data_model_name\",\n [\n Attribute(\n \"some_attribute_name\",\n str,\n False,\n \"Some attribute descriptions.\",\n )\n ],\n ),\n )\n cls.mocked_proposal = Description(\n {\n \"contract_address\": \"some_contract_address\",\n \"token_id\": \"123456\",\n \"trade_nonce\": \"876438756348568\",\n \"from_supply\": \"543\",\n \"to_supply\": \"432\",\n \"value\": \"67\",\n }\n )\n\n # list of messages\n cls.list_of_http_messages = (\n DialogueMessage(\n HttpMessage.Performative.REQUEST,\n {\n \"method\": cls.mocked_method,\n \"url\": cls.mocked_url,\n \"headers\": cls.mocked_headers,\n \"version\": cls.mocked_version,\n \"body\": cls.mocked_body_bytes,\n },\n is_incoming=False,\n ),\n )\n\n cls.list_of_oef_search_messages = (\n DialogueMessage(\n OefSearchMessage.Performative.SEARCH_SERVICES,\n {\"query\": cls.mocked_query},\n ),\n )", "def setup(self): \n pass", "async def async_setup(self):\n pass", "def setUp(self):\n\n self._set_up()", "def __init__(self, handler):\n\n self.event_handler = handler", "def setUp(self):\n class TestHandler(SimpleHandler):\n view = mock.MagicMock()\n\n self.route = mock.Mock()\n self.request = mock.Mock()\n self.handler = TestHandler(self.route)\n self.route.get_handler.return_value = self.handler\n self.view = TestHandler.view", "async def setup(self):\n load_base_templates()\n uris = URI.gather()\n for uri, resource in uris.items():\n methods = resource.methods\n if \"get\" not in methods:\n methods[\"get\"] = None\n\n for method in methods.keys():\n self.app.add_routes([\n getattr(aioweb, method)(uri, resource.process)\n ])\n self.app.add_routes([aioweb.get(\"/hello\", hello)])\n\n # TMP code\n max_age = 3600 * 24 * 365 # 1 year\n setup(self.app, PonyStorage(max_age=max_age))\n self.preparing_task = asyncio.create_task(self.prepare_web())", "def _handler_init(self):\r\n\t\tself._handlers[\"player-join\"] = FunctionDelegate()\r\n\t\tself._handlers[\"player-quit\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-start\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-stop\"] = FunctionDelegate()", "def __init__(self):\n self.setup_called = False", "def _setup(self):\n if self.image_blob_key:\n self.fullsize_url = self.get_image_url()\n self.thumbnail_url = self.get_image_url(self.DEFAULT_THUMBNAIL_SIZE)\n if self.is_saved():\n key = self.key()\n self.num_votes = Vote.all().filter(\"photo_id =\", key.id()).count()\n template = '%s/index.html?photoId=%s%s'\n self.vote_cta_url = template % (\n handlers.get_base_url(), key.id(), '&action=VOTE')\n template = '%s/photo.html?photoId=%s'\n self.photo_content_url = template % (\n handlers.get_base_url(), key.id())\n else:\n self.num_votes = 0", "def setup(self, rc):\n pass", "def setup(self):\n self.log.debug('RFSwitch - in RFSwitch setup()')\n # Add resource setup code here\n print(\"Calling RFSwitch:setup\")", "def test_setup(self):\n assert self.http_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "def _configure(self):\n pass", "def Setup(cls):\n\t\tif cls.SINGLETON is None:\n\t\t\tcls.SINGLETON = Signals()\n\t\tcls.SINGLETON.setup()", "def __init__(self, handler):\n self._handler = handler\n self._storage = []", "def setup(self):\n self.create_songs_index()\n self.create_fingerprints_index()\n self.delete_unfingerprinted()", "def __init__(self, handler_factory):\n self.handler_factory = handler_factory", "def setUpClass(cls):\n cls.setup_log()\n cls.setup_conn()\n cls.setup_cache()\n cls.setup_params()", "def setup_script(self, *args, **kwargs):\n pass", "async def setup(self, context: InjectionContext):", "def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)", "async def prepare(self):\n pass", "def __init__(self):\n from django.core.handlers.wsgi import WSGIHandler as djhandler\n self._handler = djhandler()", "def _setup(self) -> None:\n self._api = get_api(\n self._password,\n self._host,\n self._username,\n self._port,\n self._ssl,\n )\n\n self._info = self._api.get_info()\n self.device_name = self._info.get(\"DeviceName\", DEFAULT_NAME)\n self.model = self._info.get(\"ModelName\")\n self.firmware_version = self._info.get(\"Firmwareversion\")\n\n for model in MODELS_V2:\n if self.model.startswith(model):\n self._method_version = 2" ]
[ "0.74647546", "0.7427192", "0.74090225", "0.74090225", "0.73212504", "0.7227166", "0.7225722", "0.7213479", "0.7213479", "0.7213479", "0.7213479", "0.7213479", "0.7210512", "0.7194558", "0.7194558", "0.7176307", "0.7176307", "0.7176307", "0.7176307", "0.7132219", "0.7116914", "0.7105827", "0.70878446", "0.7074322", "0.70640355", "0.70131993", "0.70002794", "0.6995695", "0.69829434", "0.6973218", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.69602334", "0.6956978", "0.6893244", "0.6817163", "0.6798899", "0.6763085", "0.6737236", "0.67066085", "0.67063206", "0.6677959", "0.6656652", "0.6650302", "0.6650302", "0.6650302", "0.6650302", "0.66454536", "0.66397196", "0.66157866", "0.6608052", "0.6595438", "0.65815175", "0.6567989", "0.65664196", "0.65442306", "0.6527076", "0.651796", "0.65130967", "0.64878744", "0.6479978", "0.6473933", "0.64683807", "0.6462236", "0.6455409", "0.645479", "0.64181376", "0.6382526", "0.6377296", "0.6373025", "0.6363469", "0.635759", "0.63569665", "0.6345585", "0.63451546", "0.6320587", "0.6311438", "0.63064384", "0.6300261", "0.62953866", "0.628488", "0.62824607", "0.6281564", "0.62660784", "0.6263719", "0.6263057", "0.6260547", "0.6224438", "0.6217898" ]
0.7172614
21
Implement the reaction to a message.
def handle(self, message: Message) -> None: self.handled_message = message envelope = Envelope( to=message.counterparty, sender=self.context.agent_address, protocol_id=TwoPartyNegotiationMessage.protocol_id, message=self.encoded_message_2_in_bytes, ) self.context.outbox.put(envelope)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def custom_mqtt_reaction(self, topic, message):\n raise NotImplementedError(\"Must override method custom_mqtt_reaction\")", "async def process(self, chan_id: str, msg_id: str, emoji: str, member: discord.Member, add: bool):\n logger.debug(f\"Processing reaction: [ add: {add}, msg_id: {msg_id}, emoji: {emoji}, member: {member} ]\")\n\n if isinstance(chan_id, int):\n chan_id = str(chan_id)\n \n if not isinstance(chan_id, str):\n raise TypeError(f\"Arg 1: Expected a channel id (str)! (got {type(chan_id)} instead)\")\n \n if isinstance(msg_id, int):\n msg_id = str(msg_id)\n \n if not isinstance(msg_id, str):\n raise TypeError(f\"Arg 2: Expected a message id (str)! (got {type(msg_id)} instead)\")\n\n\n if not chan_id in self._messages:\n logger.debug(\"No message is listened to in this channel.\")\n return\n\n if not msg_id in self._messages[chan_id]:\n logger.debug(\"The message was not listened to.\")\n return\n \n if not emoji in self._messages[chan_id][msg_id][\"reactions\"]:\n logger.debug(\"The emoji wasn't listened to.\")\n return\n \n logger.debug(\"The reaction is listened to! Calling callbacks!\")\n\n if add:\n callbacks = self._messages[chan_id][msg_id][\"reactions\"][emoji][\"add_callbacks\"]\n else:\n callbacks = self._messages[chan_id][msg_id][\"reactions\"][emoji][\"rm_callbacks\"]\n \n for callback in callbacks:\n await callback(msg_id, emoji, member)", "async def handle(self, message: discord.Message):\n raise NotImplementedError()", "def onMessage(self, message):\n raise NotImplementedError", "async def on_raw_reaction_add(self, payload):\n\n\t\tguild = self.bot.get_guild(payload.guild_id)\n\t\tif guild is not None:\n\t\t\tchannel = guild.get_channel(payload.channel_id)\n\t\t\tmessage = await channel.fetch_message(payload.message_id)\n\t\t\tuser = guild.get_member(payload.user_id)\n\n\t\t\t# Update cached leaderboards\n\t\t\tif not payload.member.bot:\n\t\t\t\tif payload.message_id in self.cachedMessages:\n\t\t\t\t\tif payload.emoji.name == \"➡️\":\n\t\t\t\t\t\tawait self.update_leaderboard_message(message, 1)\n\t\t\t\t\t\tawait message.remove_reaction(\"➡️\", user)\n\t\t\t\t\telif payload.emoji.name == \"⬅️\":\n\t\t\t\t\t\tawait self.update_leaderboard_message(message, -1)\n\t\t\t\t\t\tawait message.remove_reaction(\"⬅️\", user)\n\n\t\t\t# Update reaction leaderboards\n\t\t\tif not payload.member.bot:\n\t\t\t\treactionLeaderboard = self.leaderboards[str(payload.guild_id)][\"reactionLeaderboard\"]\n\n\t\t\t\tif payload.emoji.id is not None:\n\t\t\t\t\tfor guildEmoji in guild.emojis:\n\t\t\t\t\t\tif payload.emoji.id == guildEmoji.id:\n\t\t\t\t\t\t\tif (\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\") not in reactionLeaderboard:\n\t\t\t\t\t\t\t\treactionLeaderboard[\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\"] = 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\treactionLeaderboard[\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\"] += 1\n\n\n\n\t\t\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tif payload.emoji.name not in reactionLeaderboard:\n\t\t\t\t\t\treactionLeaderboard[str(payload.emoji.name)] = 1\n\t\t\t\t\telse:\n\t\t\t\t\t\treactionLeaderboard[str(payload.emoji.name)] += 1\n\n\t\t\t\tif str(payload.emoji.id) in self.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"]:\n\t\t\t\t\tself.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"][str(payload.emoji.id)] += 1", "def addReaction(self, *args):\n return _libsbml.Model_addReaction(self, *args)", "def react(self, request, *args, **kwargs):\n comment = self.get_object()\n serializer = ReactionCommentModelSerializer(\n data=request.data, context={'user': request.user, 'comment': comment})\n \n try:\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except AssertionError:\n return Response(\n {'message': \"The comment's reaction has been delete.\"}, status=status.HTTP_200_OK)", "async def reaction_on_reschedule_message(self, ctx, emoji, reschedule_message):\n if ctx.author.id != reschedule_message.opponent_user_id:\n return\n try:\n tournament = self.get_tournament(ctx.guild.id)\n tournament.current_bracket_id = reschedule_message.bracket_id\n if not tournament.current_bracket:\n raise tosurnament.UnknownError(\"Bracket not found\")\n if emoji.name == \"👍\":\n await self.agree_to_reschedule(ctx, reschedule_message, tournament)\n else:\n self.bot.session.delete(reschedule_message)\n ally_to_mention = None\n if reschedule_message.ally_team_role_id:\n ally_to_mention = tosurnament.get_role(ctx.guild.roles, reschedule_message.ally_team_role_id)\n if not ally_to_mention:\n ally_to_mention = ctx.guild.get_member(reschedule_message.ally_user_id)\n if ally_to_mention:\n await self.send_reply(ctx, \"refused\", ally_to_mention.mention, reschedule_message.match_id)\n else:\n raise tosurnament.OpponentNotFound(ctx.author.mention)\n except Exception as e:\n await self.on_cog_command_error(ctx, e)", "def messageReceived(self, message):\n raise NotImplementedError(self)", "async def route_message(self, msg):\n raise NotImplementedError", "def receive_message(self, message):\r\n return", "def _handle_message(self, msg):\n self.event('message', msg)", "async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):\n # Get the message id of the message that the user reacted to.\n message_id = payload.message_id\n\n # Get the message id of the message we want the user to react to.\n actual_message_id = constants.MessageIDs.RULES_MSGID\n\n # Compare that id's match, and if true continue to give the role.\n if message_id == actual_message_id:\n guild_id = payload.guild_id\n guild = self.bot.get_guild(guild_id)\n role = get(payload.member.guild.roles, name='Not Verified')\n\n if role is not None:\n member = get(guild.members, id=payload.user_id)\n if member is not None:\n await payload.member.add_roles(role)\n print(f\"Added role to {member}\")\n else:\n print(\"User not found . . .\")\n else:\n print(\"Role not found . . .\")", "def message_react(channel, message, reaction):\n\n if not settings.SLACK_TOKEN:\n return {'ok': False, 'error': 'config_error'}\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.reactions_add(channel=channel, timestamp=message, name=reaction)\n assert response['ok'] is True\n return response\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response", "def receive_message(self, message):", "def received(self, message):\n raise NotImplementedError()", "def handle_message(self, message):", "def process(self, msg):\n raise NotImplemented", "def handle_message(self, msg):\n pass", "async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):\n \n guild = self.bot.get_guild(payload.guild_id)\n user = discord.utils.get(guild.members, id=payload.user_id)\n # Skipping bot reactions\n if user.bot:\n return\n\n # Search the event by message id\n message_id = payload.message_id\n channel_id = payload.channel_id\n guild_id = payload.guild_id\n event = connector.getEventByMessage(guild_id, message_id, channel_id)\n if event is None:\n return\n\n role = discord.utils.get(payload.member.guild.roles, id=event.role_id)\n\n if (payload.event_type == \"REACTION_ADD\") and (event.emoji == str(payload.emoji)):\n # Adding role to user\n await payload.member.add_roles(role)\n elif (payload.event_type == \"REACTION_REMOVE\") and (event.emoji == str(payload.emoji)) and (role in payload.member.roles):\n # Remove role from user\n await payload.member.remove_roles(role)", "def on_receive(self, msg):\n raise NotImplementedError", "def msg(self, message, **kwargs):\n self.crafter.msg(message, {\"type\": \"crafting\"})", "def handle(self, message):", "def receive_message(self, message):\r\n self.state.receive_message(message)\r\n return", "async def process(self, message):\n return await self.dispatcher.dispatch(message)", "async def react_with_action(\n pre_command, message: Message, is_private: bool, guild_id: int, author_id: int\n):\n # Change nickname\n if (\n is_whitelisted(\"nickname_auto_change\", guild_id)\n and message.author.permissions_in(message.channel).change_nickname\n ):\n try:\n if len(message.content.split(\" \")) > 2:\n if (\n message.content.split(\" \")[0] == \"i\",\n message.content.split(\" \")[1],\n ) == (\"i\", \"am\"):\n new_name = message.content.partition(\" \")[2].partition(\" \")[2]\n await change_nickname_with(pre_command, message, new_name)\n return {ACTION: True}\n if (\n message.content.split(\" \")[0] == \"i\"\n and message.content.split(\" \")[1] == \"am\"\n ):\n new_name = message.content.partition(\" \")[2].partition(\" \")[2]\n if len(new_name) <= 32:\n await change_nickname_with(pre_command, message, new_name)\n return {ACTION: True}\n if len(message.content.split(\" \")) > 1:\n if message.content.split(\" \")[0] in [\"i'm\", \"im\"]:\n new_name = message.content.partition(\" \")[2]\n if len(new_name) <= 32:\n await change_nickname_with(pre_command, message, new_name)\n return {ACTION: True}\n except Forbidden:\n pass\n\n # Add reaction\n if (\n message.author.id\n in [constants.KAPPAid, constants.RAZid, constants.POLYid, constants.NYAid]\n and is_whitelisted(\"uumuu_reaction\", message.guild.id)\n and message.content.lower() in [\"owo\", \"uwu\", \"umu\"]\n ):\n if await pre_command(\n message=message,\n channel=message.channel,\n command=\"uumuu_reaction\",\n delete_message=False,\n is_typing=False,\n ):\n await message.add_reaction(\":uumuu:715594968328175687\")\n return {ACTION: True}\n return {}", "def on_action(self, message):\n with self.handler.wrapee as wrapee:\n log.debug(\"Calling {method} on {name}\", method=message['action'], name=self.name)\n try:\n func = getattr(wrapee, message['action'])\n except AttributeError as ex:\n log.warn(\"Trying to call a method {method} that does not exsist!\",\n method=ex.args[0])\n return\n res, msg = func(*message['args'])\n if not res:\n log.warn(\"Error while calling {method}: {msg}\", msg=msg,\n method=message['action'])\n else:\n log.debug(\"Called method succesfully\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n if msg != '':\n protocol.send_news(msg)", "def _add_message(self, chan_id: str, msg_id: str):\n if not msg_id in self._messages[chan_id]:\n self._messages[chan_id][msg_id] = {\n \"reactions\": {}\n }\n else:\n raise ValueError(\"ReactionListener tried to create space for an already listened message!\")", "def _intent(self) -> MessageIntent:\r\n pass", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def receive(self, message):", "async def on_raw_reaction_add(self, payload):\n emoji = str(payload.emoji)\n member = payload.member\n\n if member.bot:\n return\n\n channel = await self.bot.fetch_channel(payload.channel_id)\n message = await channel.fetch_message(payload.message_id)\n\n if emoji != settings.get_ticket_create_emoji():\n return\n \n if len(message.embeds) == 0 or message.embeds[0].title != settings.get_ticket_panel_embed().title:\n return\n \n await message.remove_reaction(emoji, member)\n await self.create_ticket(member,message.guild)", "async def edit(self, ctx: commands.Context, message_id: int):\n\n # Standard wait_for check function for message inputs, makes sure the command user's messages in command channel are considered\n def message_check(m: discord.Message):\n return m.author == ctx.author and m.channel == ctx.channel\n\n # Standard reaction check that ensures no duplicate reacrole entry, just name the relevant message 'm' before adding this one to check kwarg in wait_for\n def reaction_check_nd(_r: discord.Reaction, _u):\n return _u == ctx.author and _r.message == m and str(_r.emoji) not in self._cache[ctx.guild.id][PM.id]\n\n if message_id in self._cache[ctx.guild.id]:\n\n # Not actually channel id int but I decided to name it that way anyway\n chanid = await self.bot.pool.fetchrow(\"SELECT channelid FROM selfrole_lookup WHERE messageid = $1\", message_id)\n chan: discord.TextChannel = ctx.guild.get_channel(chanid['channelid'])\n\n # Currently need message content for title, might start saving title in db to avoid this api call idk\n try:\n PM: discord.Message = await chan.fetch_message(message_id)\n except discord.NotFound:\n await ctx.send(\"It would seem that the message for the role menu you're trying to edit has been deleted, please try creating a new one\")\n return\n\n buttons = [\"\\U0001f1e6\", \"\\U0001f1e7\", \"\\U0001f1e8\", \"\\U0001f1e9\"]\n\n e1 = discord.Embed(title=\"What aspect of the menu do you wish to change?\",\n description=\"\\U0001f1e6 - Add a role\\n\\n\"\n \"\\U0001f1e7 - Remove existing role\\n\\n\"\n \"\\U0001f1e8 - Edit the reaction of a role\\n\\n\"\n \"\\U0001f1e9 - Change the title\",\n colour=discord.Colour.blue())\n # Send the initial menu\n menu = await ctx.send(embed=e1)\n\n for button in buttons:\n await menu.add_reaction(button)\n\n # We need the first reaction where the emoji is one of the buttons\n def button_check(_r, _u):\n return _u == ctx.author and _r.message == menu and str(_r.emoji) in buttons\n # Get the option the user chose\n try:\n r, u = await self.bot.wait_for('reaction_add', check=button_check, timeout=20)\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n\n # If user wanted to add a new role to the menu\n if str(r.emoji) == buttons[0]:\n await menu.clear_reactions()\n await menu.edit(content=\"What role do you wish to be added? Enter its mention, id, or name\", embed=None)\n\n # Get the role object for the new role to be added\n try:\n m = await self.bot.wait_for('message', check=message_check, timeout=30)\n newrole = await self.rc.convert(ctx, m.content)\n\n if newrole.id in self._cache[ctx.guild.id][PM.id].values():\n await ctx.send(\"Error: role already exists in the menu, perhaps you meant to edit it?\")\n return\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n except commands.BadArgument:\n await ctx.send(\"Role not found, please try again\")\n return\n\n m = await ctx.send(f\"React on this message with the reaction that will correspond to the role `{newrole}`\")\n\n # Get the reaction/emoji that will correspond to the new role and yank everything into db\n try:\n r, u = await self.bot.wait_for('reaction_add', check=reaction_check_nd, timeout=30)\n self._cache[ctx.guild.id][PM.id][str(r.emoji)] = newrole.id\n\n query = \"\"\"\n INSERT INTO selfrole (messageid, emoji, roleid)\n VALUES ($1, $2, $3)\n \"\"\"\n\n await self.bot.pool.execute(query, PM.id, str(r.emoji), newrole.id)\n\n # Standard way of getting the embed description of the role menu\n newmenudesc = \"\\n\\n\".join([f\"{k} - {ctx.guild.get_role(v)}\" for k, v in self._cache[ctx.guild.id][PM.id].items()])\n\n newembed = discord.Embed(title=PM.embeds[0].title,\n description=newmenudesc,\n colour=discord.Colour.blue())\n\n await PM.edit(embed=newembed)\n await PM.add_reaction(r.emoji)\n await ctx.send(\"Menu edit completed successfully, you may now delete the messages other than the menu itself\")\n\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n\n elif str(r.emoji) == buttons[1]:\n # Gotta yank the buttons to make everything squeaky clean\n await menu.clear_reactions()\n await menu.edit(content=\"Enter the role you wish to remove from the menu, can be mention, id or name\",\n embed=None)\n\n try:\n # Get role from user\n m = await self.bot.wait_for('message', check=message_check, timeout=20)\n role = await self.rc.convert(ctx, m.content)\n\n # If user trying to edit reaction to role that wasn't even in the menu to begin with\n if role.id not in self._cache[ctx.guild.id][PM.id].values():\n raise commands.BadArgument(\"Role not in cache\")\n\n # Get the key to delete using the old fashioned way, and subsequently delete it\n targetkey = \"\"\n for key, value in self._cache[ctx.guild.id][PM.id].items():\n if value == role.id:\n targetkey = key\n break\n self._cache[ctx.guild.id][PM.id].pop(targetkey)\n\n # After everything is done and dusted, make database entry and edit the menu\n query = \"\"\"\n DELETE FROM selfrole WHERE messageid = $1 AND roleid = $2\n \"\"\"\n await self.bot.pool.execute(query, PM.id, role.id)\n newmenudesc = \"\\n\\n\".join(\n [f\"{k} - {ctx.guild.get_role(v)}\" for k, v in self._cache[ctx.guild.id][PM.id].items()])\n\n newembed = discord.Embed(title=PM.embeds[0].title,\n description=newmenudesc,\n colour=discord.Colour.blue())\n await PM.edit(embed=newembed)\n await PM.clear_reaction(targetkey)\n await ctx.send(\n \"Menu edit completed successfully, you may now delete the messages other than the menu itself\")\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n except commands.BadArgument:\n await ctx.send(\"I don't think that role exists in that menu, run the command again\")\n return\n\n elif str(r.emoji) == buttons[2]:\n # Same drill, remove buttons to make it look clean\n await menu.clear_reactions()\n await menu.edit(embed=None, content=\"Enter the role for which you wish to change the reaction.\")\n\n try:\n m = await self.bot.wait_for('message', check=message_check, timeout=20)\n role = await self.rc.convert(ctx, m.content)\n\n if role.id not in self._cache[ctx.guild.id][PM.id].values():\n raise commands.BadArgument(\"Role not in cache\")\n\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n except commands.BadArgument:\n await ctx.send(\"Couldn't find the role you wished to edit in the menu\")\n return\n\n # Get the reaction/emoji that will correspond to the new role and yank everything into db\n m = await ctx.send(f\"React on this message with the new reaction that will correspond to the role {role}\")\n try:\n r, u = await self.bot.wait_for('reaction_add', check=reaction_check_nd, timeout=30)\n\n # Can only delete entry if have the key so....\n TargetKey = \"\" # Set default value so IDE stops screaming\n for k, v in self._cache[ctx.guild.id][PM.id].items():\n if v == role.id:\n TargetKey = k\n\n # Make new entry and delete the old one\n self._cache[ctx.guild.id][PM.id][str(r.emoji)] = role.id\n self._cache[ctx.guild.id][PM.id].pop(TargetKey)\n\n # After everything is done and dusted, at last update the database entry\n await self.bot.pool.execute(\"UPDATE selfrole SET emoji = $1 WHERE roleid = $2 AND messageid = $3\", str(r.emoji), role.id, PM.id)\n\n # Hehehehehehe\n newmenudesc = \"\\n\\n\".join(\n [f\"{k} - {ctx.guild.get_role(v)}\" for k, v in self._cache[ctx.guild.id][PM.id].items()])\n\n newembed = discord.Embed(title=PM.embeds[0].title,\n description=newmenudesc,\n colour=discord.Colour.blue())\n\n await PM.edit(embed=newembed)\n await PM.clear_reaction(TargetKey)\n await PM.add_reaction(str(r.emoji))\n await ctx.send(\n \"Menu edit completed successfully, you may now delete the messages other than the menu itself\")\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n\n elif str(r.emoji) == buttons[3]:\n # This one speaks for itself I think.\n await menu.clear_reactions()\n await menu.edit(embed=None, content=\"Enter the new title you want the menu to have\")\n try:\n m = await self.bot.wait_for('message', check=message_check, timeout=30)\n e = discord.Embed(title=f\"Role menu: {m.content}\",\n description=PM.embeds[0].description,\n colour=PM.embeds[0].colour)\n await PM.edit(embed=e)\n except asyncio.TimeoutError:\n await ctx.send(\"Timed out\")\n return\n\n else:\n await ctx.send(\"Menu not found in this server, double check if the id was entered correctly\")", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def consume_user_message(self, message):\n pass", "async def update_reactions(self, message, data):\n\n emojis = []\n\n for role in data.roles.all():\n if role.emoji.startswith(\":\") and role.emoji.endswith(\":\"):\n em = discord.utils.get(message.guild.emojis, name=role.emoji[1:-1])\n emojis.append(em)\n else:\n emojis.append(role.emoji)\n\n for emoji in emojis:\n await message.add_reaction(emoji)\n\n for reaction in message.reactions:\n if reaction.emoji not in emojis:\n await message.clear_reaction(reaction.emoji)", "def createReaction(self):\n return _libsbml.Model_createReaction(self)", "def Message(self, *args, **kwargs):\n pass", "def reply(self, message):\n self.logger.info(\"message came as {}\".format(message))\n message = message.lower()\n if message in [\"start over\", \"get started\", \"hello\", \"hi\", \"say hello\"]:\n self.params = \"\"\n self.readyseteatparams = \"\"\n # self.api.send_text_facebook(\n # self.user_id,\n # 'What type of recipe would you like to make? You can type \"start over\" at any time'\n # )\n # return self.api.send_facebook(self.user_id, self.config.QUESTION_MAIN)\n self.send_welcome_messages()\n return self.api.send_facebook(self.user_id, self.config.QUICK_REPLY_MAIN)\n if message in [\"more\", \"show more\"] and self.data:\n self.index += 5\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n return self.api.send_facebook(self.user_id, m_data)\n if message == \"ask-tomorrow-payload\":\n self.usersModule.makeNotificationDaily(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"This notification has been set up.\")\n if message == \"ask-week-payload\":\n self.usersModule.makeNotificationWeekly(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"This notification has been set up.\")\n if message == \"activate notifications\":\n self.usersModule.makeNotificationDaily(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"Notification has been activated.\")\n if message in [\"do-nothing\", \"payload_unsubscribe\"]:\n if message == \"payload_unsubscribe\":\n self.usersModule.deactivateNotification(self.user_id)\n return self.api.send_text_facebook(\n self.user_id,\n 'Notification has been deactivated. You can type \"start over\" anytime.')\n else:\n return self.api.send_text_facebook(\n self.user_id,\n 'You can type \"start over\" when you are looking for new recipes.')\n\n try:\n title, choice = message.split(\"_\")\n except:\n title = None\n choice = message\n\n if title == \"category\":\n self.params = \"\"\n self._type = choice\n if choice == \"dinner\":\n self.params += \"&category=89\"\n self.readyseteatparams += \"&category=89\"\n # self.api.send_text_facebook(self.user_id, \"Select a main ingredient:\")\n # return self.api.send_facebook(self.user_id, self.config.DINNER_INGREDIENTS)\n return self.api.send_facebook(self.user_id, self.config.DINNER_GUICK_REPLY)\n elif choice == \"dessert\":\n self.params += \"&category=88\"\n self.readyseteatparams += \"&category=88\"\n # self.api.send_text_facebook(self.user_id, \"What kind of dessert would you like to make?\")\n # return self.api.send_facebook(self.user_id, self.config.DESSERTS)\n return self.api.send_facebook(self.user_id, self.config.DESSERTS_QUICK_REPLY)\n elif choice == \"breakfast\":\n self.params += \"&category=87\"\n self.readyseteatparams += \"&category=87\"\n # self.api.send_text_facebook(self.user_id, \"What kind of breakfast do you want?\")\n # return self.api.send_facebook(self.user_id, self.config.BREAKFAST_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.BREAKFAST_QUICK_REPLY)\n elif choice == \"appetizer\":\n self.params += \"&category=85\"\n self.readyseteatparams += \"&category=85\"\n # self.api.send_text_facebook(self.user_id, \"What kind of appetizer or snack sounds good?\")\n # return self.api.send_facebook(self.user_id, self.config.APPETIZER_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.APPETIZER_QUICK_REPLY)\n elif choice == \"side dish\":\n self.params += \"&category=95\"\n self.readyseteatparams += \"&category=95\"\n # self.api.send_text_facebook(self.user_id, \"Select a main ingredient\")\n # return self.api.send_facebook(self.user_id, self.config.SIDE_DISH_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.SIDE_DISH_QUICK_REPLY)\n else:\n return self.api.send_text_facebook(self.user_id,\n \"I don't know answer that belongs to {} yet\".format(message))\n\n if title == \"main-ingredient\":\n self.mainIngredient = choice\n if choice == \"chicken\":\n self.params += \"&mainingredient=76\"\n self.readyseteatparams += \"&mainingredient=76\"\n elif choice == \"beef\":\n self.params += \"&mainingredient=70\"\n self.readyseteatparams += \"&mainingredient=70\"\n elif choice == \"pork\":\n self.params += \"&mainingredient=249\"\n self.readyseteatparams += \"&mainingredient=249\"\n elif choice == \"seafood\":\n self.params += \"&mainingredient=73\"\n self.readyseteatparams += \"&mainingredient=73\"\n elif choice == \"pasta\":\n self.params += \"&mainingredient=272\"\n self.readyseteatparams += \"&mainingredient=272\"\n elif choice == \"vegetarian\":\n self.params += \"&lifestyle=299\"\n self.readyseteatparams += \"&lifestyle=299\"\n return self.api.send_facebook(self.user_id, self.config.TIME_QUICK_REPLY)\n if title == \"bre-time\":\n self.breakfastTime = choice\n if choice == \"15\":\n self.params += \"&totaltime=15\"\n self.readyseteatparams += \"&totaltime=15\"\n elif choice == \"30\":\n self.params += \"&totaltime=30\"\n self.readyseteatparams += \"&totaltime=30\"\n elif choice == \"45\":\n pass\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n if title == \"time\":\n self.time = choice\n self.params += \"&totaltime={}\".format(choice)\n self.readyseteatparams += \"&totaltime={}\".format(choice)\n # self.api.send_text_facebook(self.user_id, \"What sounds Good?\")\n # return self.api.send_facebook(self.user_id, self.config.REGION_DINNER_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.REGION_QUICK_REPLY)\n\n if title == \"region\":\n self.region = choice\n if choice == \"asian\":\n self.params += \"&cuisine=44\"\n self.readyseteatparams += \"&cuisine=44\"\n elif choice == \"italian\":\n self.params += \"&cuisine=46\"\n self.readyseteatparams += \"&cuisine=46\"\n elif choice == \"mediterranean\":\n self.params += \"&cuisine=367\"\n self.readyseteatparams += \"&cuisine=367\"\n elif choice == \"mexican\":\n self.params += \"&cuisine=45\"\n self.readyseteatparams += \"&cuisine=45\"\n elif choice == \"american\":\n self.params += \"&suppresstraits=44,35,355,46,367,45,356,261\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"dessert\":\n self.dessert = choice\n if choice == \"cookies\":\n self.params += \"&trait=48,10,20,110&suppresstraits=22,24&keywords=cookies\"\n self.readyseteatparams += \"&trait=48,10,20,110&keywords=cookies\"\n elif choice == \"cakes\":\n self.params += \"&suppresstraits=24&keywords=cake\"\n self.readyseteatparams += \"&keywords=cake\"\n elif choice == \"pies\":\n self.params = \"sortby=season,rating&order=desc,desc&negativeingredientkeyword=pieces&keywords=pie&suppresstraits=24&category=88\"\n self.readyseteatparams = \"&negativeingredientkeyword=pieces&keywords=pie&category=88\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=257&goodforyou=258&goodforyou=260\"\n self.readyseteatparams += \"&goodforyou=257&goodforyou=258&goodforyou=260\"\n elif choice == \"seasonal\":\n self.params = \"sortby=season,newest,rating,publisheddate&order=desc,desc,desc,desc&category=88&season=330\"\n self.readyseteatparams = \"&category=88&season=330\"\n elif choice == \"quick\":\n self.params = \"&totaltime=30\"\n self.readyseteatparams = \"&totaltime=30\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"breakfast\":\n self.breakfastIngredient = choice\n if choice == \"eggs\":\n self.params += \"&mainingredient=72\"\n self.readyseteatparams += \"&mainingredient=72\"\n self.params += \"&trait=9\"\n self.readyseteatparams += \"&trait=9\"\n elif choice == \"casserole\":\n self.params += \"&keywords=casserole\"\n self.readyseteatparams += \"&keywords=casserole\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=260&goodforyou=258\"\n self.readyseteatparams += \"&goodforyou=260&goodforyou=258\"\n elif choice == \"sweet\":\n self.params += \"&trait=22\"\n self.readyseteatparams += \"&trait=22\"\n # will add something sweet\n pass\n return self.api.send_facebook(self.user_id, self.config.BREAKFAST_TIME_QUICK_REPLY)\n\n if title == \"appetizer\":\n self.appetizerIng = choice\n if choice == \"cheesy\" or choice == \"meaty\":\n if choice == \"cheesy\":\n self.params += \"&keywords=cheese\"\n self.readyseteatparams += \"&keywords=cheese\"\n elif choice == \"meaty\":\n self.params += \"&mainingredient=70&mainingredient=76&mainingredient=249\"\n self.readyseteatparams += \"&mainingredient=70&mainingredient=76&mainingredient=249\"\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n elif choice == \"veggies\" or choice == \"healthier\":\n if choice == \"veggies\":\n self.params += \"&mainingredient=77&mainingredient=310\"\n self.readyseteatparams += \"&mainingredient=77&mainingredient=310\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=260\"\n self.readyseteatparams += \"&goodforyou=260\"\n return self.api.send_facebook(self.user_id, self.config.HOT_OR_COLD_QUICK_REPLY)\n\n if title == \"hot-cold\":\n self.appetizerType = choice\n if choice == \"hot\":\n self.params += \"&suppresstraits=252\"\n elif choice == \"cold\":\n self.params += \"&cookingmethod=252\"\n self.readyseteatparams += \"&cookingmethod=252\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"side-dish\":\n self.sideDish = choice\n if choice == \"potato\":\n self.params += \"&mainingredient=298\"\n self.readyseteatparams += \"&mainingredient=298\"\n elif choice == \"vegetable\":\n self.params += \"&mainingredient=77\"\n self.readyseteatparams += \"&mainingredient=77\"\n elif choice == \"rice\":\n self.params += \"&mainingredient=272\"\n self.readyseteatparams += \"&mainingredient=272\"\n elif choice == \"pasta\":\n self.params += \"&mainingredient=75\"\n self.readyseteatparams += \"&mainingredient=75\"\n elif choice == \"salad\":\n self.params = \"sortby=season,newest,rating,publisheddate&order=desc,desc,desc,desc&category=95&mainingredient=77\"\n self.readyseteatparams = \"&category=95&mainingredient=77&trait=92\"\n elif choice == \"beans\":\n self.params += \"&mainingredient=310\"\n self.readyseteatparams += \"&mainingredient=310\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n isParamInMessage = self.fetch_parameters(message)\n if isParamInMessage:\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n return self.api.send_text_facebook(self.user_id, \"You can write ‘start over’ to go to the first step\")", "async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):\n\n # Skipping bot reactions\n if payload.member.bot:\n return\n\n # Search the event by message id\n message_id = payload.message_id\n channel_id = payload.channel_id\n guild_id = payload.guild_id\n event_type = connector.getEventTypeByMessage(guild_id, message_id, channel_id)\n if event_type is None:\n return\n\n role = discord.utils.get(payload.member.guild.roles, id=event_type.role_id)\n\n if (payload.event_type == \"REACTION_ADD\") and (event_type.emoji == str(payload.emoji)):\n # Adding role to user\n await payload.member.add_roles(role)", "async def redo(self, ctx: commands.Context):\n ref = ctx.message.reference\n if not ref:\n return\n try:\n message = await ctx.channel.fetch_message(ref.message_id)\n except NotFound:\n return await ctx.reply(\"Couldn't find that message\")\n if message.author != ctx.author:\n return\n await self.bot.process_commands(message)", "def addReactionGlyph(self, *args):\n return _libsbml.Layout_addReactionGlyph(self, *args)", "def cast(self, message):\n self._mailbox.append(message)\n # if this is the only message, the coro could be waiting\n if len(self._mailbox) == 1:\n self._event.send()", "async def message_edit_button(self, payload: discord.RawReactionActionEvent) -> None:\n\n self.bits = flip_action_bits(LoggingActions.MESSAGE_EDIT, self.bits)\n await self.update_embed()", "async def reactions(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"reactions\")", "def setReaction(self, *args):\n return _libsbml.FluxBound_setReaction(self, *args)", "def OnChanActionMessage(self, msg):\n self.handle_inbound_irc_msg(\"OnChanActionMessage\", msg)\n return znc.CONTINUE", "def RIPReaction(sc, event):\n sc.api_call('reactions.add', as_user='true', channel=event['channel'],\n timestamp=event['ts'], name='rip')", "def message_callback(self, message):\n pass", "async def handle_role_reaction_press(interaction: disnake.MessageInteraction):\n if interaction.message not in await ReactionRoleMessage.get_all():\n return\n\n role_id = int(interaction.component.custom_id)\n member: disnake.Member = interaction.author\n user = await User.get(member.id)\n role = member.get_role(role_id)\n if role:\n await member.remove_roles(role, reason=\"Reaction Role Message\")\n await send_message(user=user, key=\"role_removed\", inter=interaction, ephemeral=True)\n else:\n role = interaction.guild.get_role(role_id)\n if role:\n try:\n await member.add_roles(role, reason=\"Reaction Role Message\")\n await send_message(user=user, key=\"role_added\", inter=interaction, ephemeral=True)\n except disnake.errors.Forbidden as e:\n await send_message(user=user, key=\"no_permissions\", inter=interaction, ephemeral=True)\n else:\n await send_message(user=user, key=\"role_not_found\", inter=interaction, ephemeral=True)", "def process_message(self, message):\n self.post_to_redis(message)\n return", "def received_message(self, m):\n self.receiver.handle_message(m)", "def _process_msg(cls, msg):\n raise NotImplementedError", "def receive(self, msg):\n pass", "async def on_raw_reaction_remove(self, payload):\n\n\t\tguild = self.bot.get_guild(payload.guild_id)\n\t\tif guild is not None:\n\t\t\t# Update reaction leaderboards\n\t\t\treactionLeaderboard = self.leaderboards[str(payload.guild_id)][\"reactionLeaderboard\"]\n\n\t\t\tif payload.emoji.id is not None:\n\t\t\t\tfor guildEmoji in guild.emojis:\n\t\t\t\t\tif payload.emoji.id == guildEmoji.id:\n\t\t\t\t\t\treactionLeaderboard[\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\"] -= 1\n\t\t\t\t\t\tbreak\n\n\t\t\telse:\n\t\t\t\treactionLeaderboard[str(payload.emoji.name)] -= 1\n\n\t\t\tif str(payload.emoji.id) in self.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"]:\n\t\t\t\tself.leaderboards[str(payload.guild_id)][\"emojiLeaderboard\"][str(payload.emoji.id)] -= 1", "def _message(self, msg):\n\n self.log('Message received:', msg['body'], pretty=True)\n\n if msg['type'] in ('chat', 'normal'):\n body = str(msg['body'])\n if body.startswith('/'):\n cmd, arg_string = body.split(' ', maxsplit=1)\n cmd = cmd.lstrip('/')\n\n if arg_string:\n args = arg_string.split(' ')\n else:\n args = None\n\n self.log('IRC remote command received:', cmd, args)\n return\n else:\n if True:\n msg.reply(\"Sorry, I did not understand that:\\n%s\" % body).send()", "def handle(self, message: discord.Message, intent: Intent) -> Optional[str]:\n pass", "def msg(self, chan, msg):\n self._msg(chan, msg)", "def handleMessage(msg):", "def dispatch_message(self, message):\n\n self.log.debug(\"Incoming message %r\", message)\n if message.code.is_request():\n # Responses don't get deduplication because they \"are idempotent or\n # can be handled in an idempotent fashion\" (RFC 7252 Section 4.5).\n # This means that a separate response may get a RST when it is\n # arrives at the aiocoap client twice. Note that this does not\n # impede the operation of observations: Their token is still active\n # so they are ACK'd, and deduplication based on observation numbers\n # filters out the rest.\n #\n # This saves memory, and allows stateful transports to be shut down\n # expeditiously unless kept alive by something else (otherwise,\n # they'd linger for EXCHANGE_LIFETIME with no good reason).\n if self._deduplicate_message(message) is True:\n return\n\n if message.mtype in (ACK, RST):\n self._remove_exchange(message)\n\n if message.code is EMPTY and message.mtype is CON:\n self._process_ping(message)\n elif message.code is EMPTY and message.mtype in (ACK, RST):\n pass # empty ack has already been handled above\n elif message.code.is_request() and message.mtype in (CON, NON):\n # the request handler will have to deal with sending ACK itself, as\n # it might be timeout-related\n self._process_request(message)\n elif message.code.is_response() and message.mtype in (CON, NON, ACK):\n success = self._process_response(message)\n if success:\n if message.mtype is CON:\n self._send_empty_ack(message.remote, message.mid, reason=\"acknowledging incoming response\")\n else:\n # A peer mustn't send a CON to multicast, but if a malicious\n # peer does, we better not answer\n if message.mtype == CON and not message.remote.is_multicast_locally:\n self.log.info(\"Response not recognized - sending RST.\")\n rst = Message(mtype=RST, mid=message.mid, code=EMPTY, payload='')\n rst.remote = message.remote.as_response_address()\n self._send_initially(rst)\n else:\n self.log.info(\"Ignoring unknown response (which is not a unicast CON)\")\n else:\n self.log.warning(\"Received a message with code %s and type %s (those don't fit) from %s, ignoring it.\", message.code, message.mtype, message.remote)", "def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)", "def addReaction(self, *args, **kwargs):\n if isinstance(args[0], Reaction):\n reaction = args[0]\n else:\n reaction = self._sim.reaction(*args, **kwargs)\n\n self._sim.assignReaction(reaction, self)\n return self", "def message_routed(self, message):\n \n # Send it through the transport\n self.send_message(message = message)", "async def _on_raw_reaction(\n self,\n payload: RawReactionActionEvent,\n reaction_type: EnumReactionType,\n ) -> None:\n if self.__is_self(payload.user_id):\n print(\"reaction added by the bot itself\")\n return\n\n guild = self._client.get_guild(payload.guild_id)\n\n if reaction_type == EnumReactionType.ADD:\n await self.__roles.add_role(\n guild, payload.message_id,\n payload.emoji, payload.user_id,\n\n )\n elif reaction_type == EnumReactionType.REMOVE:\n await self.__roles.remove_role(\n guild, payload.message_id,\n payload.emoji, payload.user_id,\n )\n else:\n raise InvalidReactionType", "def handle_message(self, message):\n print \"[WARNING] No message handling implemented!\"", "def dispatch(self, message):\n data = ujson.loads(message)\n command = data.get(\"command\", \"no command field!\")\n if command in self._command_hash_views:\n self._command_hash_views[command](self, data)\n else:\n # handler.send(\"404 Error\")\n logger.warning(\"[Local] System don't understand command[%s]\" % command)", "async def send_react(self, reactions, *args, **kwargs):\n message = await self.send(*args, **kwargs)\n if isinstance(reactions, str): # Handle two-character emojis\n reactions = (reactions,)\n for reaction in reactions:\n await self.add_reaction(message, reaction)\n return message", "async def redaction_state(message: Message, state: FSMContext):\n await state.finish()\n await message.bot.send_message(\n chat_id=message.bot.config.REDACTION_CHAT,\n **MessageFromUser(message.from_user, message.text).as_dict()\n )\n await message.answer(**MessageForwarded('редакции').as_dict())\n await message.answer(**Start(message.from_user.first_name).as_dict())", "async def addreact(self, ctx, word, emoji):\n guild = ctx.message.guild\n message = ctx.message\n emoji = https://i.imgur.com/CWeQ620.jpg", "def reply(cls, user, context, message, reply_message):\r\n pass", "def processMessage(self, *args, **kwargs):\r\n pass", "async def on_raw_reaction_add(self, payload):\n\n # Don't accept DMs\n if not payload.guild_id:\n return\n\n # Ignore Bot\n if payload.user_id == self.bot.user.id:\n return\n\n if payload.emoji.name not in {'\\U00002705', '\\U0000274C', '\\U0001FA91'}: # Green Check, X, Chair\n return\n\n user = await self.bot.fetch_user(payload.user_id)\n if user.bot:\n return\n\n # U+2705 (:white_check_mark: ), U+2611(:ballot_box_with_check:) ,U+1FA91(:chair:),\n # U+1F1FD(:regional_indicator_x:), U+1F1E7(:regional_indicator_b:), U+274C(:x:)\n\n # Is this ID attached to a raid message? (Also technically checks if this is the right channel)\n message_id = payload.message_id\n s = search_format('messageID', 'equals', str(message_id))\n s = \"\".join(str(s).split())\n r = requests.get(self.bot.raidAPI + '?constraints=[' + s + ']')\n raidData = r.json()['response']['results']\n\n if raidData:\n raid_id = raidData[0]['_id']\n else:\n print(\"User liked a post that isn't a raid\" + payload.member.name + '#' + str(payload.member.discriminator))\n return # Returns if messageID isn't attached to a Raid in DB\n\n # UserName Checks\n discord_name = payload.member.name\n discord_suffix = payload.member.discriminator\n discord_full = quote(discord_name + ' #' + discord_suffix)\n\n s = search_format('DiscordID', 'equals', discord_full)\n s = \"\".join(str(s).split())\n r = requests.get(self.bot.discordAPI + '?constraints=[' + s + ']')\n userData = r.json()['response']['results']\n\n if userData:\n RR_id = userData[0]['UserID']\n else:\n # DMs User to update RR account\n dmchannel = await payload.member.create_dm()\n print(\"This user liked a post and was told he wasn't signed up:\" + discord_name + '%20%23' + str(\n discord_suffix) + ', Full:' + discord_full)\n await dmchannel.send(\n \"Error! Please Link Your Discord Account to ReadyRaider Here: https://www.readyraider.com/profile2\")\n\n # Removes Wrong Reaction\n channel = self.bot.get_channel(payload.channel_id)\n msg = await channel.fetch_message(message_id)\n user = await self.bot.fetch_user(payload.user_id)\n await msg.remove_reaction(payload.emoji.name, user)\n return\n\n if payload.emoji.name == '\\U00002705': # GREEN CHECK\n signAPI = self.bot.signAPI\n\n elif payload.emoji.name == '\\U0000274C': # 'X'\n signAPI = self.bot.declineAPI\n\n elif payload.emoji.name == '\\U0001FA91': # CHAIR\n signAPI = self.bot.benchAPI\n else:\n signAPI = self.bot.declineAPI\n\n headers = {\"Authorization\": \"Bearer \" + self.bot.api_key}\n body = {\"rid\": str(raid_id), \"raider\": str(RR_id)}\n requests.post(signAPI, data=body, headers=headers)\n\n s = search_format('messageID', 'equals', str(message_id))\n s = \"\".join(str(s).split())\n r = requests.get(self.bot.raidAPI + '?constraints=[' + s + ']')\n raidData = r.json()['response']['results']\n await self.raidUpdate(raidData[0], payload.channel_id, payload.guild_id)", "async def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent):\n\n guild = self.bot.get_guild(payload.guild_id)\n user = guild.get_member(payload.user_id)\n # Skipping bot reactions\n if user.bot:\n return\n\n # Search the event by message id\n message_id = payload.message_id\n channel_id = payload.channel_id\n guild_id = payload.guild_id\n event = connector.getEventByMessage(guild_id, message_id, channel_id)\n if event is None:\n return\n\n emoji_id = str(payload.emoji).split(':')[2][:-1]\n role = discord.utils.get(guild.roles, id=event.role_id)\n if (payload.event_type == \"REACTION_REMOVE\") and (event.emoji.split(':')[2][:-1] == emoji_id) and (role in user.roles):\n # Remove role from user\n await user.remove_roles(role)", "def receive_message(self, context, message):\r\n pass", "def handle_msg(self, state_id, msg):\n pass", "async def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent):\n\n guild = self.bot.get_guild(payload.guild_id)\n user = guild.get_member(payload.user_id)\n # Skipping bot reactions\n if user.bot:\n return\n\n # Search the event by message id\n message_id = payload.message_id\n channel_id = payload.channel_id\n guild_id = payload.guild_id\n event_type = connector.getEventTypeByMessage(guild_id, message_id, channel_id)\n if event_type is None:\n return\n\n emoji_id = str(payload.emoji).split(':')[2][:-1]\n role = discord.utils.get(guild.roles, id=event_type.role_id)\n if (payload.event_type == \"REACTION_REMOVE\") and (event_type.emoji.split(':')[2][:-1] == emoji_id) and (role in user.roles):\n # Remove role from user\n await user.remove_roles(role)", "def handle(self, message: Message) -> None:\n self.handled_message = message", "async def on_raw_reaction_add(self, payload):\n\n # exclude all reactions which are not the original message\n if str(payload.message_id) != self.message_id:\n return\n\n # exclude the bot\n if payload.user_id == self.bot.user.id:\n return\n\n else:\n # get the model data for the role assigner object\n data = await self.get_objects(\n model=RoleAssigner, filter={\"bot__name\": str(self.bot_name)}\n )\n\n # role assigner object\n data = data[0]\n\n guild = self.get_guild(guild_id=payload.guild_id)\n\n user = self.get_user(guild=guild, user_id=payload.user_id)\n\n for db_role in data.roles.all():\n\n if db_role.emoji.startswith(\":\") and db_role.emoji.endswith(\":\"):\n\n ce = db_role.emoji[1:-1]\n\n else:\n ce = db_role.emoji\n\n if str(payload.emoji.name) == str(ce):\n\n role = self.get_role(guild, int(db_role.uid))\n\n if user not in role.members:\n\n await user.add_roles(role)\n\n print(\"Added \" + str(user) + \" to role: \" + str(role) + \"!\")\n\n else:\n print(\n \"User \" + str(user) + \" already in role: \" + str(role) + \"!\"\n )\n\n pass", "def dispatch_message(self, addr, message_dict, kind):\n try:\n yield from self.dispatcher.dispatch_message(addr, message_dict, kind)\n except Exception as e:\n self.logger.error(\n \"Failed to dispatch mochad message {}: {}\".format(\n message_dict, e))", "def process(self, message: Message, **kwargs: Any) -> None:", "def handle_message(self, data, channel):\n pass", "def handle_message(self, msg):\n self.messages.append({\n 'type': msg.category,\n 'module': msg.module,\n 'obj': msg.obj,\n 'line': msg.line,\n 'column': msg.column,\n 'path': msg.path,\n 'symbol': msg.symbol,\n 'message': msg.msg,\n 'message-id': msg.msg_id,\n })", "async def ri(self, ctx: commands.Context, *, msg: str):\n\n def mapper(s: str):\n if s.startswith('<'):\n return s\n return '\\u200b'.join(self.characters.get(c.lower(), c) for c in s)\n\n strings = re.split(r\"(<\\S*>)\", msg)\n\n new_msg = ''.join(map(mapper, strings))\n\n await ctx.message.edit(content=new_msg)", "def receiveMessage(self, user, message):\n pass", "def process(self, message: Message, **kwargs: Any) -> None:\n pass", "def message(self, msg):\n if (AZMessage.is_agilezen_xmpp_message(msg)):\n try:\n az_message = AZMessage(msg)\n except (MessageCreationException, api.APIException) as ex:\n print ex\n return None\n for handler in self.handlers:\n handler.handle(az_message)", "def reply(cls, user, context, message, reply_message):\n pass", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def acknowledgement(self, message: Message[ValueType]):", "def handle_message(event):\n intention = parse_intention(event.message.text)\n if intention == config.QUERY_INTENTION:\n handle_query_weather_message(event)\n elif intention == config.SUBSCRIBE_INTENTION:\n handle_subscribe_message(event)\n else:\n handle_unknown_message(event)", "def message(self, msg):\n self._message = msg", "def send(self, msg):\n return self._channel_action(msg, 1)", "def notify(cls, self, message):\n pass", "async def on_reaction_add(reaction, user):\n if reaction.message.content.startswith('http'):\n curator = re.sub(r'\\d|\\W|(TravelFeed)','',str(user),re.IGNORECASE|re.DOTALL)\n if not user.id in discordcuratorlist and not user.id == botid:\n \"\"\"Checks if user who added reaction is a curator\"\"\"\n await loop.create_task(send_discord(\"Curator unauthorised: \"+curator, logchannel))\n return\n else:\n author, permlink = resolve_authorperm(reaction.message.content)\n post = Comment(construct_authorperm(author, permlink))\n if reaction.emoji == '🌍':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"tf100\", curator, reaction.message))\n elif reaction.emoji == '🌐': \n await bot.add_reaction(reaction.message, \"⏳\") \n actionqueue.put(Post_Action(post, \"tf50\", curator, reaction.message))\n elif reaction.emoji == '👥':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"coop100\", None, reaction.message))\n elif reaction.emoji == '👋':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"ad10\", curator, reaction.message))\n elif reaction.emoji == '📏':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"short0\", None, reaction.message))\n elif reaction.emoji == '🇬🇧':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"lang0\", None, reaction.message))\n elif reaction.emoji == '📝':\n await bot.add_reaction(reaction.message, \"⏳\")\n actionqueue.put(Post_Action(post, \"copyright0\", None, reaction.message))", "async def run(self, message: discord.Message) -> None:\n await message.edit(content=self.current(), view=self)", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def handle_message(self, message):\n\n\t\tself.console.handle_message(message)", "def message_unreact(channel, message, reaction):\n\n if not settings.SLACK_TOKEN:\n return {'ok': False, 'error': 'config_error'}\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.reactions_remove(channel=channel, timestamp=message, name=reaction)\n assert response['ok'] is True\n return response\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response", "def comsume_msg(self, msg_type):" ]
[ "0.6869177", "0.6487918", "0.63162875", "0.6300903", "0.6280189", "0.6270176", "0.6200644", "0.61323017", "0.613172", "0.6130621", "0.6107559", "0.6087531", "0.6074806", "0.60717124", "0.6063858", "0.6055714", "0.60538733", "0.6034891", "0.6023585", "0.60027444", "0.5996093", "0.5994024", "0.5983939", "0.5955966", "0.59559005", "0.59528965", "0.5919636", "0.59173197", "0.59153545", "0.5893467", "0.5853518", "0.5847134", "0.58343136", "0.5822588", "0.5812419", "0.58028793", "0.5793747", "0.57673234", "0.5759072", "0.57562727", "0.57558304", "0.5752882", "0.57432514", "0.57429415", "0.57425314", "0.5738211", "0.5734135", "0.5724122", "0.57147735", "0.5711033", "0.5708248", "0.5693282", "0.56899136", "0.5687087", "0.568562", "0.5674516", "0.5672874", "0.56721526", "0.5657641", "0.564565", "0.56398714", "0.5639739", "0.5629927", "0.5625185", "0.56209594", "0.5620075", "0.5619464", "0.56136143", "0.5612525", "0.56087995", "0.560568", "0.56039405", "0.56023073", "0.5599863", "0.55955523", "0.5590654", "0.55897665", "0.5567404", "0.5561185", "0.55556107", "0.554391", "0.55382216", "0.5532761", "0.5531185", "0.55205005", "0.5515751", "0.5512881", "0.5505076", "0.5505076", "0.5496806", "0.5480437", "0.5468919", "0.54595953", "0.54583406", "0.5452015", "0.5451227", "0.54502505", "0.54473746", "0.54353356", "0.54326147", "0.543206" ]
0.0
-1
Implement the handler teardown.
def teardown(self) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_teardown(self):\n with pytest.raises(NotImplementedError):\n self.handler.teardown()", "def _teardown(self):\n # No-op base implementation", "def teardown(self, event):\n pass", "def cleanup(self) -> None:\n self.handler.cleanup()\n super().cleanup()", "def teardown(self,**kwargs):\n pass", "def teardown(self) -> None:\n pass", "def teardown(self) -> None:\n pass", "def teardown(self):\n raise NotImplementedError", "def teardown(self):\n raise NotImplementedError", "def teardown(self):\n pass", "def teardown(self):\n pass", "def teardown(self):\n pass", "def teardown(self):\n pass # pylint: disable=unnecessary-pass", "def test_teardown(self):\n assert self.http_handler.teardown() is None\n self.assert_quantity_in_outbox(0)", "def teardown(self):", "def teardown(self):", "def teardown(self):", "def teardown(self, rc):\n pass", "def teardown_class(self):\n pass", "def teardown_method(self):", "def teardown(self):\n self.tcex.log.trace('teardown')", "def teardown(self, exception):", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown(self):\n # self.in_kwargs, self.ref_time,\n del self.warn_msgs, self.war\n return", "def teardown_provider(self):\n pass", "def teardown(self):\n try:\n self.loop.run_until_complete(self.webhook_connection.disconnect())\n except Exception:\n print_exc()\n raise", "def teardown(self):\n if self.ae:\n self.ae.shutdown()", "def teardown(self):\n\n self.dummy.set_current()\n self.endpoints.lock()", "def tearDown(self):\n super(TestCase, self).tearDown()\n self._context.check_done()", "def tearDown(self):\n\n self._tear_down()", "def __exit__(self, *args):\n if self.teardown:\n super().__exit__(*args)", "def tearDown(self):\n del self.output\n del self.input_stream\n del self.error_listener", "def teardown_method(self, method) -> None:", "def cleanup(self):\r\n logging.info(\"entered the cleanup\")", "def teardown(self):\n del self.testInst, self.dname\n\n return", "def teardown(self):\n\n self._periodic_refresh_subs.stop()\n self._interaction_subscriber.dispose()\n\n yield None", "def teardown(self, log, info):\n raise NotImplementedError", "def teardown_method(self, test_method):\n self.wo_obj = None\n self.config_data = None", "def teardown_method(self):\n world.clear_paths()\n print(\"\\nEnd of tests in: %s\\n-------------------\\n\" % __name__)\n self.bigml = {}", "def teardown(self, closer):\n def actual_closer(exception):\n value = self._value\n if value is not None:\n closer(value)\n APP.teardown_appcontext(actual_closer)", "def teardown(self):\n self._loop.stop()\n self._loop.close()\n super().teardown()", "def cleanup(self, *args, **kwargs):", "def teardown():\n\n self.zorp_mock.stop()", "def cleanup (self):\n pass", "def tearDown(self):\n self.segment = None", "def cleanup(self):\r\n pass", "def __del__(self):\n AppHelper.stopEventLoop()", "def __del__(self):\n AppHelper.stopEventLoop()", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):\r\n pass", "def cleanup(self):\r\n pass", "def tearDown(self):\n pass #because we dont have anything to tearDown.", "def cleanup(self):\r\n print(\"Cleanup not implemented\")", "def teardown_method(self, method):\n pass", "def teardown_method(self, method):\n pass", "def tearDown(self):\n pass\n # teardown called after each test\n # e.g. maybe write test results to some text file", "def teardown(self):\n storage.close()", "def teardown(self):\n storage.close()", "def teardown(self):\n storage.close()", "def teardown(self):\n storage.close()", "def on_cleanup(self):\n raise NotImplementedError", "def tearDown(self):\n self.teardown_beets()", "def teardown_method(self):\n self.hass.stop()", "def teardown_method(self):\n self.hass.stop()", "def teardown_method(self):\n self.hass.stop()", "def tearDown(self):\n self.api_context.pop()\n self.api_test_client = None", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def _tear_down():\n repl._tearDown = self.tearDown", "def teardown(self):\n\n del self.testInst, self.test_bins, self.test_label, self.test_data\n del self.out_keys, self.out_data\n\n return", "def cleanup(self):\n\n pass", "def teardown_class(self):\n self._tester = None\n self._sut = None", "def tearDown(self):\n\n BaseTest.tearDown(self)", "def tearDown(self):\n print('Calling \\'tearDown\\'')", "def dm_teardown(self):\n try:\n dispatcher.disconnect(\n self.dequeue_next_page_requests,\n signal=signals.spider_idle\n )\n except DispatcherKeyError:\n pass", "def cleanup(self):\n raise NotImplementedError", "def tearDownClass(cls):\n cls.context.close()", "def tearDown(self):\n test_env_teardown()", "def tearDown(self):\n self.tmp.cleanup()", "def tearDown(self):\n self.logger.info(\"tearDown begin\")\n self.logger.info(\"tearDown end\\n\")", "def teardown(self):\n del self.testInst, self.dname, self.test_val_length\n\n return", "def teardown(self):\n del self.testInst, self.dname, self.test_val_length\n\n return" ]
[ "0.8065864", "0.7863261", "0.78516686", "0.7751276", "0.7695875", "0.76800025", "0.76800025", "0.76762605", "0.76762605", "0.76578486", "0.76578486", "0.76578486", "0.7586377", "0.75539047", "0.7453597", "0.7453597", "0.7453597", "0.7220783", "0.7112508", "0.70983917", "0.7087754", "0.7085874", "0.7001528", "0.7001528", "0.7001528", "0.7001528", "0.7001528", "0.7001528", "0.69789845", "0.6975809", "0.6935623", "0.69172424", "0.6860113", "0.6859062", "0.6837898", "0.68220747", "0.68152905", "0.68058115", "0.679552", "0.6787844", "0.6747079", "0.67388815", "0.6701475", "0.6698241", "0.66952205", "0.66891843", "0.6683073", "0.6676516", "0.6662325", "0.6656234", "0.6649907", "0.6636182", "0.6636182", "0.6635551", "0.6635551", "0.6635551", "0.6631788", "0.6631788", "0.660411", "0.6604081", "0.660382", "0.660382", "0.6597178", "0.65948063", "0.65948063", "0.65948063", "0.65948063", "0.6593608", "0.6587096", "0.6581607", "0.6581607", "0.6581607", "0.65734476", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6572769", "0.6569569", "0.6565425", "0.6564269", "0.65548694", "0.65425795", "0.65370864", "0.6536476", "0.6530922", "0.652666", "0.65252733", "0.65201366", "0.6513735", "0.64996207", "0.64996207" ]
0.7647432
14
Does the program read in records, placing data into correct fields of record objects?
def test_CovidCase_creation(self): new_Covid = self.create_CovidCase() self.assertTrue(isinstance(new_Covid, CovidCase)) self.assertEqual(new_Covid.country_id, "TE")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parseRecords(self):\n # dict of parse methods for most common records that will be stored in structured arrays\n FLAG2METHOD = {'PS' : self.parseHighPassRecord,\n 'PC' : self.parseLowPassRecord,\n 'VD' : self.parseDigitalSValRecord}\n # dict of (record type, listname to store it in) tuples\n FLAG2REC = {'L' : (LayoutRecord, 'layoutrecords'),\n 'MS' : (SurfMessageRecord, 'messagerecords'),\n 'MU' : (UserMessageRecord, 'messagerecords'),\n 'PE' : (EpochRecord, 'epochrecords'),\n 'D' : (DisplayRecord, 'displayrecords'),\n 'VA' : (AnalogSValRecord, 'analogsvalrecords')}\n f = self.f\n while True:\n # returns an empty string when EOF is reached\n flag = f.read(2).rstrip(NULL).decode() # TODO: should this strip NULL?\n if flag == '':\n break\n # put file pointer back to start of flag\n f.seek(-2, 1) # TODO: unnecessary - doesn't this slow down parsing quite a bit?\n if flag in FLAG2METHOD: # these are the most common\n FLAG2METHOD[flag](f) # call the method\n elif flag in FLAG2REC:\n rectype, reclistname = FLAG2REC[flag]\n rec = rectype()\n rec.parse(f)\n #wx.Yield() # allow wx GUI event processing during parsing\n self._appendRecord(rec, reclistname)\n else:\n raise ValueError('Unexpected flag %r at offset %d' % (flag, f.tell()))\n #self.percentParsed = f.tell() / self.filesize * 100", "def read_data(self) -> List[BaseRecord]:\n pass", "def loadData(self,ins):\n #--Read subrecords\n bytesRead = 0\n objectId = None\n while bytesRead < self.size:\n (name,size) = ins.unpackSubHeader(self.name)\n #print name,size\n bytesRead += 8+size\n subData = ins.read(size, self.name+'.'+name)\n #--Id?\n if name == 'NAME':\n self.id = cstrip(subData)\n #--Flags\n elif name == 'DATA':\n flags = struct.unpack('i',subData)[0]\n if self.name == 'LEVC':\n self.calcFromAllLevels = (flags & 1) == 1\n else:\n self.calcForEachItem = (flags & 1) == 1\n self.calcFromAllLevels = (flags & 2) == 2\n #--Chance None\n elif name == 'NNAM':\n self.chanceNone = struct.unpack('B',subData)[0]\n #--Count\n elif name == 'INDX':\n self.count = struct.unpack('i',subData)[0]\n #--Creature/Item Id?\n elif name == 'CNAM' or name == 'INAM':\n objectId = cstrip(subData)\n #--PC Level\n elif name == 'INTV':\n pcLevel = struct.unpack('h',subData)[0]\n self.entries.append((pcLevel,objectId))\n objectId = None\n #--Deleted?\n elif name == 'DELE': \n self.isDeleted = True\n #--Else\n else: raise Tes3UnknownSubRecord(self.inName,name,self.name)\n #--No id?\n if not self.id:\n raise Tes3Error(self.inName,_('No id for %s record.') % (self.name,))\n #--Bad count?\n if self.count != len(self.entries):\n self.count = len(self.entries)\n self.setChanged()", "def parse_records(self):\n for record in sp.parse(gzip.open(\n \"./human_uniprot_04_07_20.gz\", 'rt')):\n # print(record.taxonomy_id)\n # if record.organism != \"Homo sapiens\":\n # continue\n # print(record.features[0])\n # for comment in record.comments:\n # if comment.startswith(\"SUBCELLULAR LOCATION\"):\n # print(comment)\n self.extract_features_to_dict(record)\n self.extract_localization(record)", "def _parse_records(self, customization=None):\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != \"\":\n logger.debug('The record is not empty. Let\\'s parse it.')\n parsed = self._parse_record(record, customization=customization)\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n\n records = []\n record = \"\"\n # read each line, bundle them up until they form an object, then send for parsing\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n # Remove leading whitespaces\n line = line.lstrip()\n logger.debug('Line starts with @')\n # Parse previous record\n _add_parsed_record(record, records)\n # Start new record\n logger.debug('The record is set to empty')\n record = \"\"\n # Keep adding lines to the record\n record += line\n\n # catch any remaining record and send it for parsing\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records", "def parse_record(self, record):\n raise NotImplementedError()", "def test_getknowndata(self):\n result = recordparser.getfields(self.rawdata, self.fieldmap,\n self.sourcekeys)\n self.assertEqual(self.knownvalues, result)", "def loadData(self,ins):\n self.isDeleted = False\n #--Read subrecords\n bytesRead = 0\n while bytesRead < self.size:\n (name,size) = ins.unpackSubHeader('BOOK')\n srData = ins.read(size,'BOOK.'+name)\n bytesRead += 8+size\n if name == 'NAME': self.id = cstrip(srData)\n elif name == 'MODL': self.model = cstrip(srData)\n elif name == 'FNAM': self.title = cstrip(srData)\n elif name == 'BKDT':\n (self.weight,self.value,self.isScroll,self.teaches,self.enchantPoints\n ) = struct.unpack('f4i',srData)\n elif name == 'SCRI': self.script = cstrip(srData)\n elif name == 'ITEX': self.icon = cstrip(srData)\n elif name == 'TEXT': self.text = cstrip(srData)\n elif name == 'ENAM': self.enchant = cstrip(srData)\n #--Deleted?\n elif name == 'DELE': self.isDeleted = True\n #--Bad record?\n else: \n raise Tes3Error(self.inName,_('Extraneous subrecord (%s) in %s record.') \n % (name,self.name))", "def loadData(self,ins):\n #--Read subrecords\n bytesRead = 0\n curTest = None\n while bytesRead < self.size:\n (name,size) = ins.unpackSubHeader('INFO')\n srData = ins.read(size,'INFO.'+name)\n bytesRead += 8+size\n #--Ids\n if name == 'INAM': self.id = cstrip(srData)\n elif name == 'PNAM': self.prevId = cstrip(srData)\n elif name == 'NNAM': self.nextId = cstrip(srData)\n #--Text/Script\n elif name == 'NAME': self.text = srData\n elif name == 'BNAM': self.script = srData\n elif name == 'SNAM': self.speak = srData\n #--Quest flags\n elif name == 'QSTN': self.qflag = 1\n elif name == 'QSTF': self.qflag = 2\n elif name == 'QSTR': self.qflag = 3\n #--String/Value Tests\n elif name == 'DATA': \n (self.type, self.spDisp, self.spRank, self.spSex, self.pcRank, self.unk02\n ) = struct.unpack('2i4B',srData)\n elif name == 'ONAM': self.spId = cstrip(srData)\n elif name == 'RNAM': self.spRace = cstrip(srData)\n elif name == 'CNAM': self.spClass = cstrip(srData)\n elif name == 'FNAM': self.spFaction = cstrip(srData)\n elif name == 'ANAM': self.cell = cstrip(srData)\n elif name == 'DNAM': self.pcFaction = cstrip(srData)\n #--Function/Value Tests\n elif name == 'SCVR': \n (index,type,func,oper) = struct.unpack('BB2sB',srData[:5])\n text = srData[5:]\n curTest = Info_Test(type,func,oper,text)\n self.tests[index-48] = curTest\n elif name == 'INTV':\n (curTest.value,) = struct.unpack('i',srData)\n elif name == 'FLTV':\n (curTest.value,) = struct.unpack('f',srData)\n #--Deleted?\n elif name == 'DELE': self.isDeleted = True\n #--Bad record?\n else: raise Tes3UnknownSubRecord(self.inName,name,self.name)", "def read_record(file_, num_evo_entries):\n dict_ = {}\n\n while True:\n next_line = file_.readline()\n case = switch(next_line)\n if case('[ID]' + '\\n'):\n id_ = file_.readline()[:-1]\n dict_.update({'id': id_})\n elif case('[PRIMARY]' + '\\n'):\n primary = letter_to_num(file_.readline()[:-1], _aa_dict)\n dict_.update({'primary': primary})\n elif case('[EVOLUTIONARY]' + '\\n'):\n evolutionary = []\n for residue in range(num_evo_entries):\n evolutionary.append([float(step) for step in file_.readline().split()])\n dict_.update({'evolutionary': evolutionary})\n elif case('[SECONDARY]' + '\\n'):\n secondary = letter_to_num(file_.readline()[:-1], _dssp_dict)\n dict_.update({'secondary': secondary})\n elif case('[TERTIARY]' + '\\n'):\n tertiary = []\n for axis in range(NUM_DIMENSIONS): \n tertiary.append([float(coord) for coord in file_.readline().split()])\n dict_.update({'tertiary': tertiary})\n elif case('[MASK]' + '\\n'):\n mask = letter_to_num(file_.readline()[:-1], _mask_dict)\n dict_.update({'mask': mask})\n elif case('\\n'):\n return dict_\n elif case(''):\n return None", "def dump_record(record):\n rec = E.record()\n\n leader = record.get('leader')\n if leader:\n rec.append(E.leader(leader))\n\n if isinstance(record, GroupableOrderedDict):\n items = record.iteritems(with_order=False, repeated=True)\n else:\n items = iteritems(record)\n\n for df, subfields in items:\n # Control fields\n if len(df) == 3:\n if isinstance(subfields, string_types):\n controlfield = E.controlfield(subfields)\n controlfield.attrib['tag'] = df[0:3]\n rec.append(controlfield)\n elif isinstance(subfields, (list, tuple, set)):\n for subfield in subfields:\n controlfield = E.controlfield(subfield)\n controlfield.attrib['tag'] = df[0:3]\n rec.append(controlfield)\n else:\n # Skip leader.\n if df == 'leader':\n continue\n\n if not isinstance(subfields, (list, tuple, set)):\n subfields = (subfields,)\n\n df = df.replace('_', ' ')\n for subfield in subfields:\n if not isinstance(subfield, (list, tuple, set)):\n subfield = [subfield]\n\n for s in subfield:\n datafield = E.datafield()\n datafield.attrib['tag'] = df[0:3]\n datafield.attrib['ind1'] = df[3]\n datafield.attrib['ind2'] = df[4]\n\n if isinstance(s, GroupableOrderedDict):\n items = s.iteritems(with_order=False, repeated=True)\n elif isinstance(s, dict):\n items = iteritems(s)\n else:\n datafield.append(E.subfield(s))\n\n items = tuple()\n\n for code, value in items:\n if not isinstance(value, string_types):\n for v in value:\n datafield.append(E.subfield(v, code=code))\n else:\n datafield.append(E.subfield(value, code=code))\n\n rec.append(datafield)\n return rec", "def parse_records(self, handle, do_features=...): # -> Generator[SeqRecord, None, None]:\n ...", "def _pre_process_record(self, data):\n result = []\n symbolic_split = \",\"\n if isinstance(data, dict):\n if self.measure is None:\n logging.error(\"Missing the name of keys pointing to values\")\n raise UnSADException.data_format_exception()\n if self.timestamp is not None:\n if self.timestamp in data:\n try:\n result.append(float(data[self.timestamp]))\n [result.append(data[measure])\n for measure in self.measure]\n except RuntimeError:\n logging.error(\"Invalid input data type, should be a numerical type\")\n logging.error(\"Input data should contain all the fields \"\n \"that are specified when initialize the detector: \" + str(self.measure))\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"Input data should contain a timestamp field:\" + str(self.timestamp))\n raise UnSADException.data_format_exception()\n else:\n try:\n [result.append(data[measure]) for measure in self.measure]\n except RuntimeError:\n logging.error(\"Input data should contain all the fields \"\n \"that are specified when initialize the detector: \" + str(self.measure))\n raise UnSADException.data_format_exception()\n elif isinstance(data, Iterable) and not isinstance(data, str):\n if self.timestamp is not None:\n if len(data) == len(self.measure) + 1:\n try:\n result = list(data)\n result[0] = float(result[0])\n except RuntimeError as e:\n logging.error(\"Invalid input data type, timestamp should be a numerical type\")\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"The number of input parameters:\" + str(\n len(data)) + \" does not match with this detectors:\" + str(len(self.measure) + 1))\n raise UnSADException.input_number_exception()\n else:\n if self.measure is None or len(data) == len(self.measure):\n result = data\n else:\n logging.error(\"The number of input parameters:\" + str(\n len(data)) + \" does not match with this detectors:\" + str(len(self.measure)))\n raise UnSADException.input_number_exception()\n else:\n if (self.measure is None or len(self.measure) == 1) and self.timestamp is None:\n if self.symbolic:\n return str(data)\n else:\n try:\n return float(data)\n except RuntimeError as e:\n logging.error(\"Invalid input data type, should be a numerical type\")\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"This detector is not initialized properly\")\n raise UnSADException.not_proper_initialize_exception()\n\n if not self.symbolic:\n try:\n processed_result = [float(result[i])\n for i in range(len(result))]\n except RuntimeError as e:\n logging.error(\"Invalid input data type, should be a numerical type\")\n raise UnSADException.data_type_exception()\n\n return processed_result[0] if len(processed_result) == 1 else processed_result\n\n else:\n if self.timestamp is not None:\n return [result[0], symbolic_split.join([str(s) for s in result[1:]])]\n else:\n return symbolic_split.join([str(s) for s in result])", "def test2(self):\n field1 = self.get_first_record_for_name(\"I5\")\n lons, lats = self.get_longitudes_and_latitudes_for_the_last_read_rec()", "def process_data(self):\n num_records = len(self.records_data)\n for i in range(len(self.keys)):\n student_key = self.keys[i]\n if (i < num_records):\n self._load_student_record(student_key,\n self.records_data[i])", "def triage_record(self, record):\n # Filter out any garbage commands/responses\n # Record any changes to the reader state\n # Pass OBD2 records on for formatting\n \n # We need to figure out whether this record is :\n # - line noise / garbage \"?\"\n # - the result of an \"AT\" command \n # - the result of an OBD2 command \n \n # skip over garbage \n if record == [] \\\n or record[0] == [] \\\n or record[0][0] == '' \\\n or record[0][0] == '?' :\n #print \"Garbage record. Skipping.\"\n return []\n\n # handle ELM327 errors\n # \"?\" - unrecognized command\n # \"NO DATA\" - reader timed out waiting for response from vehicle\n # \"BUFFER FULL\" - need to read data from reader faster, ie. increase baud rate on serial connection\n # many more...\n if len(record) > 1 :\n if record[1][0] == '?' \\\n or record[1][0] == 'NO':\n #print \"Garbage record. Skipping.\"\n return []\n\n # record the changes made by AT commands\n cmd = str.upper(record[0][0])\n if cmd[0:2] == 'AT':\n self.interpret_at_cmd(record)\n return []\n \n # remove \"SEARCHING...\" from response\n # example:\n # >0100\n # SEARCHING...\n # 41 00 BE 3E A8 11 \n if len(record) > 1 :\n if record[1][0] == 'SEARCHING...':\n record.pop(1)\n\n # BUFFER FULL - ugh, need to speed up the serial connection\n rl = len(record)\n rec = 0\n while rec < rl:\n if record[rec][0] == 'BUFFER' and record[rec][1] == 'FULL':\n record.pop(rec)\n print \" ERROR - BUFFER FULL - Increase speed of serial connection\"\n #return []\n rec += 1\n # \"BUS BUSY\", \"CAN ERROR\", ???\n\n # if we get a 7F, that means there was an error\n # 10 - general reject\n # 11 - service not supported\n # 12 - subfunction not supported OR invalid format\n # 21 - busy repeat\n # 22 - conditions or sequence not correct \n # 78 - response pending\n if record[1][0] == '7F':\n mode = record[1][1]\n err = record[1][2]\n if err == 10:\n print \"General Error -- Mode:\", mode\n elif err == 11:\n print \"Service Not Supported Error -- Mode:\", mode\n elif err == 12:\n print \"Subfunction Not Supported or Invalid Format Error -- Mode:\", mode\n elif err == 21:\n print \"BUSY, Repeat -- Mode:\", mode\n elif err == 22:\n print \"Conditions or Sequence Not Correct -- Mode:\", mode\n elif err == 78:\n print \"Unknown Error -- Mode:\", mode, \" -- Error code:\", err\n return []\n\n\n # format an OBD 2 command for further processing at a higher layer\n try:\n obd2_record = self.format_obd2_record(record)\n except self.ErrorIncompleteRecord:\n print \"Garbage record. Skipping.\"\n return []\n \n return obd2_record", "def loadData(self,ins):\n #--Read subrecords\n bytesRead = 0\n while bytesRead < self.size:\n (name,size) = ins.unpackSubHeader('GLOB')\n srData = ins.read(size,'GLOB.'+name)\n bytesRead += 8+size\n if name == 'NAME': self.id = cstrip(srData)\n elif name == 'FNAM': self.type = srData\n elif name == 'FLTV': self.value = struct.unpack('f',srData)\n #--Deleted?\n elif name == 'DELE': self.isDeleted = True\n #--Bad record?\n else: raise Tes3UnknownSubRecord(self.inName,name,self.name)", "def parse_record(self, in_rec):\n \n geo_util = geo.Geo()\n \n self.metadata = {}\n for k, v in in_rec.items():\n if k == 'metadata2': continue\n elif k == 'geometry':\n self.metadata['geometry'] = v\n coords = v['coordinates']\n self.metadata['wkt'] = geo_util.convert_imageGeom(\\\n coords, 'wkt')\n elif k == 'metadata':\n for m in v:\n key = to_camelCase(m[0])\n self.metadata[key] = m[1]\n else:\n self.metadata[k] = v", "def parse_record(self, in_rec):\n \n self.metadata = {}\n for k, v in in_rec.items():\n if k == 'parameters':\n for m, mv in v.items():\n self.metadata[m] = mv\n else:\n self.metadata[k] = v\n \n if self.image is not None:\n self.metadata['imageUrl'] = self.image.get_metadata(\\\n 'thisRecordUrl')\n self.metadata['imageMetadata'] = self.image.get_metadata(\\\n 'metadataUrl')\n self.metadata['imageStartDate'] = self.image.get_date()\n \n if 'dateRapiOrdered' not in self.metadata.keys():\n self.metadata['dateRapiOrdered'] = self.image.get_metadata(\\\n 'dateRapiOrdered')\n self.metadata['orderSubmitted'] = self.image.get_metadata(\\\n 'orderSubmitted')", "def test_process_B_record():\n\n # split up per extension for easy reading\n i_record = 'I08' + '3638FXA' + '3941ENL' + '4246TAS' + '4751GSP' + '5254TRT' + '5559VAT' + '6063OAT' + '6467ACZ'\n fix_record_extensions = LowLevelReader.decode_I_record(i_record)\n\n # split up per 10 to enable easy counting\n b_record = 'B093232520' + '2767N00554' + '786EA00128' '0019600600' '1145771529' + '3177005930' + '2770090'\n\n decoded_b_record = LowLevelReader.decode_B_record(b_record)\n processed_b_record = LowLevelReader.process_B_record(decoded_b_record, fix_record_extensions)\n\n # split per extension: 006 001 14577 15293 177 00593 0277 0090\n expected_values = [\n ('FXA', (36, 38), 6),\n ('ENL', (39, 41), 1),\n ('TAS', (42, 46), 14577),\n ('GSP', (47, 51), 15293),\n ('TRT', (52, 54), 177),\n ('VAT', (55, 59), 593),\n ('OAT', (60, 63), 277),\n ('ACZ', (64, 67), 90),\n ]\n\n for extension, bytes, expected_value in expected_values:\n assert {'bytes': bytes, 'extension_type': extension} in fix_record_extensions\n assert extension in processed_b_record\n assert expected_value == processed_b_record[extension]", "def test_fields_to_dict(self):\r\n test_data = \\\r\n \"\"\"0\tR27DLI_4812\tR27DLI_600\tR27DLI_727\tU1PLI_403\tU1PLI_8969\tU1PLI_9080\tU1PLI_9526\tW3Cecum_6642\tW3Cecum_8992\r\n1\tU1PLI_7889\r\n2\tW3Cecum_4858\r\n3\tR27DLI_3243\tR27DLI_4562\tR27DLI_6828\tR27DLI_9097\tU1PLI_2780\tU1PLI_67\tU9PSI_10475\tU9PSI_4341\tW3Cecum_5191\"\"\".splitlines() # output from cd-hit\r\n obs = fields_to_dict(test_data)\r\n exp = {\r\n '0': ['R27DLI_4812', 'R27DLI_600', 'R27DLI_727', 'U1PLI_403',\r\n 'U1PLI_8969', 'U1PLI_9080', 'U1PLI_9526', 'W3Cecum_6642', 'W3Cecum_8992'],\r\n '1': ['U1PLI_7889'],\r\n '2': ['W3Cecum_4858'],\r\n '3': ['R27DLI_3243', 'R27DLI_4562', 'R27DLI_6828', 'R27DLI_9097', 'U1PLI_2780', 'U1PLI_67', 'U9PSI_10475', 'U9PSI_4341', 'W3Cecum_5191']}\r\n self.assertEqual(obs, exp)", "def get_record_meta(record_list):\n acc_code = record_list[0]\n organism = record_list[1]\n EC_code = record_list[2].replace(\"__\", \" \")\n species = record_list[3].replace(\"__\", \" \")\n note = record_list[4]\n return acc_code, organism, EC_code, species, note", "def _record_reader(stream):\n while True:\n header = stream.read(4)\n if len(header) < 4:\n return\n size, rec_type = struct.unpack(\">HH\", header)\n data_type = rec_type & 0x00FF\n rec_type = rec_type // 256\n data = None\n if size > 4:\n if data_type == 0x01:\n data = numpy.array(\n struct.unpack(\n \">{0}H\".format((size - 4) // 2), stream.read(size - 4)\n ),\n dtype=\"uint\",\n )\n elif data_type == 0x02:\n data = numpy.array(\n struct.unpack(\n \">{0}h\".format((size - 4) // 2), stream.read(size - 4)\n ),\n dtype=\"int\",\n )\n elif data_type == 0x03:\n data = numpy.array(\n struct.unpack(\n \">{0}l\".format((size - 4) // 4), stream.read(size - 4)\n ),\n dtype=\"int\",\n )\n elif data_type == 0x05:\n data = numpy.array(\n [\n _eight_byte_real_to_float(stream.read(8))\n for _ in range((size - 4) // 8)\n ]\n )\n else:\n data = stream.read(size - 4)\n if str is not bytes:\n if data[-1] == 0:\n data = data[:-1].decode(\"ascii\")\n else:\n data = data.decode(\"ascii\")\n elif data[-1] == \"\\0\":\n data = data[:-1]\n yield [rec_type, data]", "def test_transform_record(self):\n response = {\"frequency\": 0.009112876, \"info\": {\"accessType\": \"PUBLIC\"},\n \"referenceBases\": \"CT\", \"alternateBases\": \"AT\",\n \"start\": 10, \"end\": 12,\n \"variantCount\": 3, \"variantType\": \"MNP\"}\n record = Record(\"PUBLIC\", 0.009112875989879, referenceBases=\"CT\", alternateBases=\"AT\", start=10, end=12, variantCount=3, variantType=\"MNP\")\n result = transform_record(record)\n self.assertEqual(result, response)", "def _parse_row(self, record):\n original_record = record\n reverse_record = record[::-1]\n # Records contain null bitmaps for columns. The number of bitmaps is the number of columns / 8 rounded up\n null_table_len = (self.table_header.column_count + 7) // 8\n if null_table_len and null_table_len < len(original_record):\n null_table = record[-null_table_len:]\n # Turn bitmap to a list of True False values\n null_table = [((null_table[i // 8]) & (1 << (i % 8))) != 0 for i in range(len(null_table) * 8)]\n else:\n logging.error(f\"Failed to parse null table column count {self.table_header.column_count}\")\n return\n if self.version > 3:\n field_count = struct.unpack_from(\"h\", record)[0]\n record = record[2:]\n else:\n field_count = struct.unpack_from(\"b\", record)[0]\n record = record[1:]\n\n relative_records_column_map = {}\n # Iterate columns\n for i, column in self.columns.items():\n # Fixed length columns are handled before variable length. If this is a variable length column add it to\n # mapping and continue\n if not column.column_flags.fixed_length:\n relative_records_column_map[i] = column\n continue\n\n self._parse_fixed_length_data(record, column, null_table)\n if relative_records_column_map:\n relative_records_column_map = dict(sorted(relative_records_column_map.items()))\n metadata = self._parse_dynamic_length_records_metadata(reverse_record, original_record,\n null_table_len)\n if not metadata:\n return\n self._parse_dynamic_length_data(original_record, metadata, relative_records_column_map)", "def process(self, record):\n is_data = True\n if self.file_path.split('.')[-1] == 'csv':\n if self.header_skip:\n logging.info('Skipping header data... {}'.format(record))\n self.header_skip = False\n is_data = False\n return [(record, None, None, is_data)]\n record_attributes = list(csv.reader([record]))[0]\n if len(record_attributes) != len(self.schema[FIELDS_KEY]):\n if len(record_attributes) > 1 or not record_attributes[0].strip().isdigit():\n IS_VALID_FILE = False\n is_data = None\n return [(record, None, None, is_data)]\n for record_attribute, attribute_schema in zip(\n record_attributes, self.schema[FIELDS_KEY]):\n is_valid_datatype_check = self.__datatype_check(record_attribute, attribute_schema)\n is_valid_null_check = self.__null_check(record_attribute, attribute_schema)\n return [(record, is_valid_datatype_check, is_valid_null_check, is_data)]", "def parse_record(pub, global_year=None):\n\n wosid = parse_id(pub)\n\n authors = prune_branch(pub, names_path, name_path, parse_name)\n contributors = prune_branch(\n pub, contributors_path, contributor_path, parse_contributor\n )\n\n pubinfo_flag, pubinfo = parse_properties(pub, pubinfo_path)\n\n # pubdate = parse_date(pub, global_year)\n # pubtype = parse_pubtype(pub)\n # # add vol, issue, has_abstract information\n # vol_issue_has_abs = parse_vol_issue_has_abs(pub, pubinfo_path)\n\n date_flag, pubdate, pubinfo_rest = extract_date(pubinfo, global_year)\n\n idents = prune_branch(pub, identifiers_path, identifier_path, parse_identifier)\n\n success = all([wosid[0], pubinfo_flag, date_flag, authors[0], idents[0]])\n if success:\n addresses = prune_branch(pub, add_path, add_spec_path, parse_address)\n\n references = prune_branch(\n pub, references_path, reference_path, parse_reference, filter_false=True\n )\n\n doctypes = prune_branch(\n pub, doctypes_path, doctype_path, parse_doctype, filter_false=True\n )\n\n languages = prune_branch(\n pub, languages_path, language_path, parse_language, filter_false=True\n )\n language_dict = process_languages(languages)\n\n titles = prune_branch(\n pub, titles_path, title_path, parse_title, filter_false=True\n )\n titles_dict = process_titles(titles)\n\n keywords = prune_branch(\n pub, keywords_path, keyword_path, parse_generic, filter_false=True\n )\n\n kws_plus = prune_branch(\n pub, keywordsplus_path, keyword_path, parse_generic, filter_false=True\n )\n\n headings = prune_branch(\n pub, headings_path, heading_path, parse_generic, filter_false=True\n )\n\n subheadings = prune_branch(\n pub, subheadings_path, subheading_path, parse_generic, filter_false=True\n )\n\n subjects = prune_branch(\n pub, subjects_path, subject_path, parse_generic, filter_false=True\n )\n\n abstracts = prune_branch(\n pub, abstracts_path, abstract_path, parse_abstract, filter_false=True\n )\n\n grant_agencies = prune_branch(\n pub, grants_path, grant_path, parse_grant, filter_false=True\n )\n\n publishers = prune_branch(\n pub, publishers_path, publisher_path, parse_publisher, filter_false=True\n )\n\n conferences = prune_branch(\n pub, conferences_path, conference_path, parse_conference, filter_false=True\n )\n\n editions = prune_branch(pub, ewuid_path, edition_path, parse_edition)\n\n fund_text = parse_fundtext(pub)\n\n page_dict = parse_page(pub, page_path)\n\n idents_flat = [item for sublist in idents[1] for item in sublist]\n\n prop_dict = {x: y for x, y in idents_flat}\n\n # prop_dict.update(pubtype[1])\n # prop_dict.update({'vol': vol_issue_has_abs[1].get('vol', None)})\n # prop_dict.update({'issue': vol_issue_has_abs[1].get('issue', None)})\n # prop_dict.update({'has_abstract': vol_issue_has_abs[1].get('has_abstract', None)})\n prop_dict.update(pubinfo_rest)\n\n prop_dict.update(language_dict)\n prop_dict.update(titles_dict)\n prop_dict.update({\"doctype\": doctypes[1]})\n prop_dict.update({\"keywords\": keywords[1]})\n prop_dict.update({\"keywords_plus\": kws_plus[1]})\n prop_dict.update({\"headings\": headings[1]})\n prop_dict.update({\"subheadings\": subheadings[1]})\n prop_dict.update({\"subjects\": list(set(subjects[1]))})\n prop_dict.update({\"abstracts\": abstracts[1]})\n prop_dict.update({\"grant_agencies\": grant_agencies[1]})\n prop_dict.update({\"fund_text\": fund_text[1]})\n prop_dict.update({\"conferences\": conferences[1]})\n prop_dict.update({\"page_info\": page_dict[1]})\n prop_dict.update({\"editions\": editions[1]})\n\n prop_dict = {k: v for k, v in prop_dict.items() if v}\n\n record_dict = {\n \"id\": wosid[1],\n \"date\": pubdate,\n \"addresses\": addresses[1],\n \"authors\": authors[1],\n \"contributors\": contributors[1],\n \"references\": references[1],\n \"publishers\": publishers[1],\n \"properties\": prop_dict,\n }\n\n else:\n record_dict = etree_to_dict(pub)\n record_dict.update({\"id\": wosid[1]})\n record_dict = {k: v for k, v in record_dict.items() if v}\n return success, record_dict", "def _read_record(self, stream):\n header = stream.read(4)\n if len(header) < 4:\n return None\n size, rec_type = struct.unpack('>HH', header)\n data_type = (rec_type & 0x00ff)\n rec_type = rec_type // 256\n data = None\n if size > 4:\n if data_type == 0x01:\n data = numpy.array(\n struct.unpack('>{0}H'.format((size - 4) // 2),\n stream.read(size - 4)),\n dtype='uint')\n elif data_type == 0x02:\n data = numpy.array(\n struct.unpack('>{0}h'.format((size - 4) // 2),\n stream.read(size - 4)),\n dtype='int')\n elif data_type == 0x03:\n data = numpy.array(\n struct.unpack('>{0}l'.format((size - 4) // 4),\n stream.read(size - 4)),\n dtype='int')\n elif data_type == 0x05:\n data = numpy.array([\n _eight_byte_real_to_float(stream.read(8))\n for _ in range((size - 4) // 8)\n ])\n else:\n data = stream.read(size - 4)\n if str is not bytes:\n if data[-1] == 0:\n data = data[:-1].decode('ascii')\n else:\n data = data.decode('ascii')\n elif data[-1] == '\\0':\n data = data[:-1]\n return [rec_type, data]", "def unpackRecords(self,unpackTypes):\n for record in self.records:\n if record.name in unpackTypes:\n record.load(unpack=True)", "def _postprocess_record(record, hide=_CONFIDENTIAL_FIELDS):\n record = hide_confidential_fields(record, hide)\n record = unserialize_fields(record, hide)\n\n convert_float_timestamp2str(record)\n\n return record", "def test_read_participants_data():\n bids_root = _TempDir()\n bids_path = _bids_path.copy().update(root=bids_root, datatype='meg')\n raw = _read_raw_fif(raw_fname, verbose=False)\n\n # if subject info was set, we don't roundtrip birthday\n # due to possible anonymization in mne-bids\n subject_info = {\n 'hand': 1,\n 'sex': 2,\n }\n raw.info['subject_info'] = subject_info\n write_raw_bids(raw, bids_path, overwrite=True, verbose=False)\n raw = read_raw_bids(bids_path=bids_path)\n print(raw.info['subject_info'])\n assert raw.info['subject_info']['hand'] == 1\n assert raw.info['subject_info']['sex'] == 2\n assert raw.info['subject_info'].get('birthday', None) is None\n\n # if modifying participants tsv, then read_raw_bids reflects that\n participants_tsv_fpath = op.join(bids_root, 'participants.tsv')\n participants_tsv = _from_tsv(participants_tsv_fpath)\n participants_tsv['hand'][0] = 'n/a'\n _to_tsv(participants_tsv, participants_tsv_fpath)\n raw = read_raw_bids(bids_path=bids_path)\n assert raw.info['subject_info']['hand'] == 0\n assert raw.info['subject_info']['sex'] == 2\n assert raw.info['subject_info'].get('birthday', None) is None\n\n # make sure things are read even if the entries don't make sense\n participants_tsv = _from_tsv(participants_tsv_fpath)\n participants_tsv['hand'][0] = 'righty'\n participants_tsv['sex'][0] = 'malesy'\n _to_tsv(participants_tsv, participants_tsv_fpath)\n with pytest.warns(RuntimeWarning, match='Unable to map'):\n raw = read_raw_bids(bids_path=bids_path)\n assert raw.info['subject_info']['hand'] is None\n assert raw.info['subject_info']['sex'] is None\n\n # make sure to read in if no participants file\n raw = _read_raw_fif(raw_fname, verbose=False)\n write_raw_bids(raw, bids_path, overwrite=True, verbose=False)\n os.remove(participants_tsv_fpath)\n with pytest.warns(RuntimeWarning, match='Participants file not found'):\n raw = read_raw_bids(bids_path=bids_path)\n assert raw.info['subject_info'] is None", "def read_record(self, file_, num_evo_entries,get_data,mode='dist',return_seq=True):\n # This method and the Switch Class were taken from the original ProteinNet repo.\n # arg is a open file, num_evo_entries is 20 by default\n #\n # Strip the dict and insert lists for each of the types of entries\n desc_dict = utils.load_obj('desc_dict.pkl')\n desc_dict_rev = {int(self.aa_dict[k]):v for (k,v) in desc_dict.items()}\n\n # this will be stripped soon.\n if get_data == 'all':\n get_data = ['id','primary','evolutionary','secondary','tertiary','mask']\n aa_dict = {'A': '0', 'C': '1', 'D': '2', 'E': '3', 'F': '4', 'G': '5', 'H': '6', 'I': '7', 'K': '8', 'L': '9', 'M': '10', 'N': '11', 'P': '12', 'Q': '13', 'R': '14', 'S': '15', 'T': '16', 'V': '17', 'W': '18', 'Y': '19'}\n _mask_dict = {'-': '0', '+': '1'}\n while True:\n next_line = file_.readline()\n for case in switch(next_line):\n if case('[ID]' + '\\n'):\n if 'id' in get_data:\n id_ = file_.readline()[:-1]\n \n elif case('[PRIMARY]' + '\\n'):\n if 'primary' in get_data: \n prim = file_.readline()[:-1]\n primary = self.letter_to_num(prim, self.aa_dict)\n \n \n elif case('[EVOLUTIONARY]' + '\\n'):\n if 'evo' in get_data:\n evolutionary = []\n for residue in range(num_evo_entries): evolutionary.append(np.asarray([float(step) for step in file_.readline().split()]))\n evolutionary = np.array(evolutionary)\n evolutionary = evolutionary.T # this will turn evo into an array of shape (-1, 20) Fuck yeah\n \n elif case('[TERTIARY]' + '\\n'):\n if 'tert' in get_data:\n tertiary = []\n for axis in range(3): tertiary.append([float(coord) for coord in file_.readline().split()])\n \n elif case('[MASK]' + '\\n'):\n if 'mask' in get_data:\n mask = file_.readline()[:-1]\n mask = self.letter_to_num(mask, _mask_dict)\n \n # ends reading a Single record\n elif case('\\n'):\n # perform preprocessing\n if 0 in mask:\n return -1\n if len(primary) > self.seq_cutoff:\n return -1\n prop = utils.make_prop_array(primary,desc_dict_rev)\n x = np.concatenate([tc(primary,num_classes=20),evolutionary,prop],axis=1)\n tertiary = dhc.get_backbone_coords(np.array(tertiary))\n if mode == 'dih':\n y = dhc.fix_array(dhc.get_phi_psi(tertiary))\n y = y.astype('float32')\n elif mode =='dist':\n y = self.pairwise_distance(tertiary)\n elif mode == 'zmat':\n tertiary = tertiary.reshape((1,-1,3,3))\n tertiary = tertiary[:,:,1,:].reshape((1,-1,1,3)).astype('float32')/100\n dist = dhc.calc_dist_vector(tertiary).numpy().reshape((1,-1,1))\n ang = np.radians(dhc.calc_angle_vector(tertiary).numpy().reshape((1,-1,1)))\n dih = dhc.calc_dihedral_vector(tertiary).numpy()\n y = np.concatenate([dist,ang,dih],axis=-1)\n elif mode == 'tert':\n #tertiary = tertiary.reshape((1,-1,3,3))\n tertiary = tertiary.reshape((-1,3,3))\n #tertiary = tertiary[:,:,1,:].reshape((1,-1,1,3)).astype('float32')/100\n #y = self.pairwise_distance(tertiary)\n return [x.astype('float32',copy=False), tertiary.astype('float32',copy=False), np.asarray(id_), primary]\n if return_seq:\n return [x.astype('float32',copy=False), y.astype('float32',copy=False), np.asarray(id_), tertiary.astype('float32',copy=False), primary]\n else:# if anything changes, i will be replacing this with a more pythonic way soon enough\n # if I really need tertiary structure at anytime, i just code it her\n return [x.astype('float32',copy=False), y.astype('float32',copy=False), np.asarray(id_)]\n \n elif case(''):\n return None", "def _raw_record_reader(stream):\n while True:\n header = stream.read(4)\n if len(header) < 4:\n return\n size, rec_type = struct.unpack(\">HH\", header)\n rec_type = rec_type // 256\n yield (rec_type, header + stream.read(size - 4))", "def _next_record(self, next_line):\n record = self.loader.parse_record_stream(self.reader,\n next_line,\n self.known_format)\n\n self.member_info = None\n\n # Track known format for faster parsing of other records\n self.known_format = record.format\n\n return record", "def extract(self) -> DatasetRecords:\n # get mapping to associate value to field in each record\n mapping = self.extract_mapping()\n fields = self.mapping\n records = DatasetRecords()\n l = len(self.data)\n # read every line\n for index in range(0, l):\n record = Record(mapping)\n # read every column of the record\n for field in fields:\n #store values (with the associated field)\n value = self.data.at[index, field]\n record.add(field=field, value=value)\n records.append(record=record)\n return records", "def test_get_record(self):\n pass", "def readfields(self, dbname, line1, nlines, startdate): \n\n conn=sqlite3.connect(dbname)\n c=conn.cursor()\n self.obsHistID=np.zeros(nlines)\n self.fieldMJD=np.zeros(nlines)\n self.fieldRA=np.zeros(nlines)\n self.fieldDec=np.zeros(nlines)\n self.rotSkyPos=np.zeros(nlines)\n self.filter=np.zeros(nlines, dtype=str)\n self.fiveSigmaDepth=np.zeros(nlines)\n self.seeingFwhmEff=np.zeros(nlines)\n\n count=0\n# exec_str='SELECT obsHistID,expMJD,fieldRA,fieldDec,rotSkyPos FROM Summary order by expMJD limit %d,%d' %(line1-1,nlines)\n exec_str='SELECT observationId,observationStartMJD,FieldRA,FieldDec,rotSkyPos,filter,fiveSigmaDepth,seeingFwhmEff FROM SummaryAllProps order by observationStartMJD limit %d,%d' %(line1-1,nlines)\n for row in c.execute(exec_str):\n self.obsHistID[count] = row[0]\n self.fieldMJD[count] = row[1]\n self.fieldRA[count] = np.deg2rad(row[2])\n self.fieldDec[count] = np.deg2rad(row[3])\n self.rotSkyPos[count] = np.deg2rad(row[4])\n self.filter[count] = row[5]\n self.fiveSigmaDepth[count] = row[6]\n self.seeingFwhmEff[count] = row[7]\n count +=1\n\n # startdate is 0 if not provided by user. In this case use the default MJDs.\n if (startdate > 1):\n self.fieldMJD=self.fieldMJD+(int(startdate)-int(self.fieldMJD[0]))", "def test_data_returns_values_in_source_record():\n # Given\n sources = []\n source = Source(\"I001\", \"Test\", \"Person\", \"Pub\", \"Abbr\")\n sources.append(source)\n\n model = SourcesModel(sources)\n\n # When\n pointer = model.data(model.index(0, SourcesModelColumns.POINTER))\n title = model.data(model.index(0, SourcesModelColumns.TITLE))\n author = model.data(model.index(0, SourcesModelColumns.AUTHOR))\n publisher = model.data(model.index(0, SourcesModelColumns.PUBLISHER))\n abbreviation = model.data(model.index(0, SourcesModelColumns.ABBREVIATION))\n autocomplete = model.data(model.index(0, SourcesModelColumns.AUTOCOMPLETE))\n # Then\n assert model.columnCount() == 6\n assert pointer == source.pointer\n assert title == source.title\n assert author == source.author\n assert publisher == source.publisher\n assert abbreviation == source.abbreviation\n assert autocomplete != \"\"", "def FormatRecord(self, record, categories):\n catsmodified=False\n res={'id': 0} # zero means create new record\n for i in record.get(\"serials\", []):\n if i['sourcetype']=='egroupware':\n res['id']=i['id']\n break\n res['n_given'],res['n_middle'],res['n_family']=nameparser.getparts(record.get(\"names\", [{}])[0])\n for nf in 'n_given', 'n_middle', 'n_family':\n if res[nf] is None:\n res[nf]=\"\" # set None fields to blank string\n res['fn']=nameparser.formatsimplename(record.get(\"names\", [{}])[0])\n for t,prefix in (\"business\", \"adr_one\"), (\"home\", \"adr_two\"):\n a={}\n adr=record.get(\"addresses\", [])\n for i in adr:\n if i['type']==t:\n for p2,k in (\"_street\", \"street\"), (\"_locality\", \"city\"), (\"_region\", \"state\"), \\\n (\"_postalcode\", \"postalcode\"), (\"_countryname\", \"country\"):\n res[prefix+p2]=i.get(k, \"\")\n if t==\"business\":\n res['org_name']=i.get(\"company\",\"\")\n break\n if \"emails\" in record:\n for t,k in (\"business\", \"email\"), (\"home\", \"email_home\"):\n for i in record[\"emails\"]:\n if i.get(\"type\",None)==t:\n res[k]=i.get(\"email\")\n res[k+\"_type\"]=\"INTERNET\"\n break\n cats={}\n for cat in record.get(\"categories\", []):\n c=cat['category']\n v=categories.get(c, None)\n if v is None:\n catsmodified=True\n for i in xrange(0,-999999,-1):\n if `i` not in cats:\n break\n else:\n i=`v`\n cats[i]=str(c)\n res['cat_id']=cats\n for t,k in (\"home\", \"tel_home\"), (\"cell\", \"tel_cell\"), ('fax','tel_fax'), \\\n ('pager', 'tel_pager'), ('office', 'tel_work'):\n if \"numbers\" in record:\n v=\"\"\n for i in record['numbers']:\n if i['type']==t:\n v=i['number']\n break\n res[k]=phonenumber.format(v)\n if \"memos\" in record:\n memos=record.get(\"memos\", [])\n memos+=[{}]\n res['note']=memos[0].get(\"memo\",\"\")\n if \"urls\" in record:\n urls=record.get(\"urls\", [])\n u=\"\"\n for url in urls:\n if url.get(\"type\", None)==\"business\":\n u=url[\"url\"]\n break\n if len(u)==0:\n urls+=[{'url':\"\"}]\n u=urls[0][\"url\"]\n res['url']=u\n return catsmodified,res", "def test_get_interesting_mapping_fields(self):\r\n # all columns are completely unique\r\n d = parse_mapping_file(self.mapping_f1)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = []\r\n self.assertEqual(actual, expected)\r\n\r\n # all columns are completely identical\r\n d = parse_mapping_file(self.mapping_f2)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = []\r\n self.assertEqual(actual, expected)\r\n\r\n # some columns retained\r\n d = parse_mapping_file(self.mapping_f3)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = ['Something', 'days_since_epoch']\r\n self.assertEqual(actual, expected)", "def _read_data(self):", "def refresh_record_from_data(record, exclude_file_nos = []):\n record.label = record.data.get(\"Defendant\", {}).get(\"Name\", \"\")\n record.file_no = record.data.get(\"General\", {}).get(\"File No\", \"\")\n record.county = record.data.get(\"General\", {}).get(\"County\", \"\")\n record.dob = record.data.get(\"Defendant\", {}).get(\n \"Date of Birth/Estimated Age\", None\n )\n record.sex = record.data.get(\"Defendant\", {}).get(\"Sex\", constants.NOT_AVAILABLE)\n record.race = record.data.get(\"Defendant\", {}).get(\"Race\", \"\")\n record.case_status = record.data.get(\"Case Information\", {}).get(\"Case Status\", \"\")\n record.offense_date = make_datetime_aware(\n record.data.get(\"Case Information\", {}).get(\"Offense Date\", None)\n )\n record.arrest_date = record.data.get(\"Case Information\", {}).get(\n \"Arrest Date\", dt_obj_to_date(record.offense_date)\n )\n record.jurisdiction = get_jurisdiction(record)\n record.has_additional_offenses = \"Additional offenses exist\" in record.data.get(\"_meta\", {}).get(\"source\", {})\n\n if exclude_file_nos and record.file_no in exclude_file_nos:\n logger.warning(f\"Not saving ciprs record {record.file_no} (most likely because it's a duplicate).\")\n return\n\n logger.info(f\"Saving ciprs record {record.file_no}\")\n record.save()\n refresh_offenses(record)", "def parse(records):\n # Collect all people.\n people = {}\n for record in records:\n if record.rec_type == \"INDI\":\n assert record.rec_id not in people\n people[record.rec_id] = Person(record)\n # Link parents and children.\n for record in records:\n if record.rec_type == \"FAM\":\n # For this \"family unit\" collect all parents and all children.\n parents = []\n children = []\n for sub_rec in record.sub_recs:\n if sub_rec.rec_type in (\"HUSB\", \"WIFE\"):\n parents.append(sub_rec.data)\n elif sub_rec.rec_type == \"CHIL\":\n children.append(sub_rec.data)\n # Ignore MARR, DATE, PLAC, ...\n # Add parent/child relationships.\n for child_id in children:\n child = people[child_id]\n for parent_id in parents:\n parent = people[parent_id]\n child.parents.append(parent)\n parent.children.append(child)\n return people", "def records(self):\r\n raise NotImplementedError()", "def cleanData(records):\n def cleanRecord(record):\n # the monalisa ur logger reports jobs with alice.cern.ch vo name\n # the logger should be fixed now, but we keep it anyway\n if record[VO_NAME] == 'alice.cern.ch':\n record[VO_NAME] = 'alice'\n # remove information that is useless anyway\n if record[VO_ISSUER] and record[VO_ISSUER].startswith('file:///'):\n record[VO_ISSUER] = None\n if record[VO_NAME] and (record[VO_NAME].startswith('file:///') or record[VO_NAME].startswith('/')):\n record[VO_NAME] = None\n if record[USERSN].startswith(ALIEN_USER_PREFIX):\n record[VO_NAME] = 'alice'\n record[USERSN] = '/aliprod'\n if record[USERSN] =='aliprod':\n record[USERSN] = '/aliprod'\n\n # some users have atlas.cern.ch vo, which is not directly wrong but not right either\n # these are mostly non-andrej users so we put them in user if they have no role\n # these are actually non-voms users, but which comes with a reverse vo mapping\n # this is all heuristics\n if record[VO_NAME] == 'atlas.cern.ch' and record[VO_ISSUER].startswith('vomss://voms.cern.ch:8443/voms/atlas'):\n record[VO_NAME] = 'atlas'\n if record[USERSN] == '/C=SI/O=SiGNET/O=IJS/OU=F9/CN=Andrej Filipcic':\n record[VO_GROUP] = 'atlas'\n record[VO_ROLE] = 'production'\n\n # hack for missing vo info on Andrej - turn them into production jobs which is probably 99% right\n if record[USERSN] == '/C=SI/O=SiGNET/O=IJS/OU=F9/CN=Andrej Filipcic' and \\\n record[VO_ISSUER] == None and \\\n record[VO_NAME] == None:\n record[VO_ISSUER] = '/DC=ch/DC=cern/OU=computers/CN=voms.cern.ch'\n record[VO_NAME] = 'atlas'\n record[VO_GROUP] = 'atlas'\n record[VO_ROLE] = 'production'\n\n return record\n\n return [ cleanRecord(r) for r in records ]", "def parse(record):\n\n #Extract individual parts of the FASTA record\n\n identifier = record.id #The sequence's Id\n sequence = record.seq #The sequence itself\n sequence = sequence.upper() #Turns all the nucleotides to upper case\n\n return identifier, sequence", "def finish_constructing(self, more_data):\n # set self.version and self.instance\n super(PptContainerRecord, self).finish_constructing(more_data)\n self.records = None\n if not self.data:\n return\n\n # logging.debug('parsing contents of container record {0}'\n # .format(self))\n\n # create a stream from self.data and parse it like any other\n data_stream = io.BytesIO(self.data)\n record_stream = PptStream(data_stream, self.size,\n 'PptContainerRecordSubstream',\n record_base.STGTY_SUBSTREAM)\n self.records = list(record_stream.iter_records())\n # logging.debug('done parsing contents of container record {0}'\n # .format(self))", "def put_record(self, record):\r\n row = [record.get(field) for field in self.fields.names()]\r\n\r\n self.put(row)", "def read(self):\n self.record_d = {}\n if self.__read_file():\n self.__print_report()", "def parse_record(raw_record, is_training, dtype):\n\n templar_buffer, search_buffer, templar_bbox, search_bbox = parse_example_proto(raw_record)\n templar_img, search_img, score, score_weight, tight_temp_bbox, tight_search_bbox = preprocess_pair(\n templar_buffer=templar_buffer, search_buffer=search_buffer, templar_bbox=templar_bbox,\n search_bbox=search_bbox, num_channels=_NUM_CHANNELS, is_training=is_training)\n\n templar_img = tf.cast(templar_img, dtype)\n search_img = tf.cast(search_img, dtype)\n score = tf.cast(score, tf.int32)\n score_weight = tf.cast(score_weight, dtype)\n tight_temp_bbox = tf.cast(tight_temp_bbox, tf.int32)\n #tight_search_bbox = tf.cast(tight_search_bbox, tf.int32)\n\n dict = {'templar': templar_img, 'search': search_img, 'score': score, 'score_weight': score_weight,\n 'tight_temp_bbox': tight_temp_bbox, 'tight_search_bbox': tight_search_bbox}\n\n return dict", "def parse_lines(lines):\n for line in lines:\n yield Record(line)", "def read(self):\n if not self._objectid:\n raise Record.RecordIncomplete()\n\n if not self._table :\n #prepare meta-data if not available\n if not self.setupRecord():\n raise Record.RecordIncomplete()\n try:\n extra = map (lambda x: '{1} {0}'.format(x), self._extra_sql_columns.items() )\n row = CFG.CX.getrow ( CFG.DB.SCHEMA + \".\" + self._table.name, \n self._objectid, extra)\n except pg.DatabaseError, e: \n raise Record.RecordNotFound(self._objectid, e)\n \n self.feedDataRow(row)", "def process_record(self, record):\n raise NotImplementedError('Process record needs to be customized')", "def prepare_record_data_for_DB_insert(record_data: Dict) -> Dict:\n if record_data[\"artist\"] is None or record_data[\"title\"] is None:\n raise AssertionError(\"Artist and / or Title cannot be None.\")\n\n artist_list = [art.strip() for art in record_data[\"artist\"].split(\";\")]\n artist_country_list = [\n co.strip() for co in record_data[\"artist_country\"].split(\";\")\n ]\n label_list = [lab.strip() for lab in record_data[\"label\"].split(\";\")]\n\n if len(artist_list) != len(artist_country_list):\n raise AssertionError(\n \"Need the same number of artists and artist countries.\"\n )\n\n record_data[\"artist\"] = artist_list\n record_data[\"artist_country\"] = artist_country_list\n record_data[\"label\"] = label_list\n return record_data", "def Transform(self, record):\n pass", "def _verifyParsing(self):\n for attrname, attr in self.__dict__.items():\n if attrname.endswith('records') and iterable(attr):\n ts = get_record_timestamps(attr)\n if not issorted(ts):\n print('Sorting %s' % attrname)\n if type(attr) == list:\n attr = list(np.asarray(attr)[ts.argsort()])\n else:\n attr = attr[ts.argsort()]\n ts = get_record_timestamps(attr)\n assert issorted(ts)\n self.__dict__[attrname] = attr # update", "def load(self, record: api.Record) -> api.DataRecord:\n\n def _index_dict_to_arr(d, keys):\n if isinstance(keys, list):\n return np.array([d[k] for k in keys])\n else:\n return np.array(d[keys])\n\n X = {k: _index_dict_to_arr(record, v) for k, v in self.inputs.items()}\n if self.mode == api.RecordMode.TRAIN or self.mode == api.RecordMode.VALIDATION:\n y = {k: _index_dict_to_arr(record, v) for k, v in self.outputs.items()}\n if self.sample_weights is not None:\n w = {k: np.array(record[v]) for k, v in self.sample_weights.items()}\n return (X, y, w)\n else:\n return (X, y)\n else:\n return (X,)", "def test_Dataheap_Recorded_002_02(self):\n\n \"\"\"\n $ python - --nodblog --loglevel debug --verbose all --logfile /tmp/my_logfile\n Python 2.7.15+ (default, Oct 7 2019, 17:39:04)\n [GCC 7.4.0] on linux2\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n >>> from MythTV import MythDB, Video, Recorded\n >>> d = MythDB()\n _initlogger call\n _parseinput call\n >>> rec =d.searchRecorded(title = 'Die letzte Metro')\n >>> r = next(rec)\n >>> r\n b'<Recorded 'Die letzte Metro','2014-10-16 22:16:00+02:00' at 0x7f96bde242a0>'\n >>> r.cast\n []\n >>> r.cast._refdat\n [11301L, datetime(2014, 10, 16, 20, 18, 21)]\n >>> r.cast._datfields\n [u'name', u'role']\n\n >>> r.cast.add('Catherine Deneuve', 'actor')\n >>> r.cast\n [[(u'name', 'Catherine Deneuve'), (u'role', 'actor')]]\n >>> r.cast.add(u\"Gérard Depardieu\", 'actor')\n >>> r.cast.add(u\"Andréa Ferréol\", 'actor')\n >>> r.cast.add(u\"François Truffaut\", 'director')\n >>> r.cast\n [[(u'name', 'Catherine Deneuve'), (u'role', 'actor')], [(u'name', u'G\\xe9rard Depardieu'), (u'role', 'actor')], [(u'name', u'Andr\\xe9a Ferr\\xe9ol'), (u'role', 'actor')], [(u'name', u'Fran\\xe7ois Truffaut'), (u'role', 'director')]]\n >>> r.update()\n\n >>> print(r.cast[1]['name'])\n Gérard Depardieu\n\n\n >>> r.cast.add(u\"Jean Poiret\", 'actor')\n >>> r.cast.add(u\"Jean-Louis Richard\", 'actor')\n >>> r.cast\n [[(u'name', 'Catherine Deneuve'), (u'role', 'actor')], [(u'name', u'G\\xe9rard Depardieu'), (u'role', 'actor')], [(u'name', u'Andr\\xe9a Ferr\\xe9ol'), (u'role', 'actor')], [(u'name', u'Fran\\xe7ois Truffaut'), (u'role', 'director')], [(u'name', u'Jean Poiret'), (u'role', 'actor')], [(u'name', u'Jean-Louis Richard'), (u'role', 'actor')]]\n >>> r.update()\n\n\n >>> r1 = Recorded((r.chanid, r.starttime), db =d)\n >>> r1\n b'<Recorded 'Die letzte Metro','2014-10-16 22:16:00+02:00' at 0x7f96bde2d868>'\n >>> r1.cast\n [[(u'name', u'Catherine Deneuve'), (u'role', u'actor')], [(u'name', u'G\\xe9rard Depardieu'), (u'role', u'actor')], [(u'name', u'Andr\\xe9a Ferr\\xe9ol'), (u'role', u'actor')], [(u'name', u'Fran\\xe7ois Truffaut'), (u'role', u'director')], [(u'name', u'Jean Poiret'), (u'role', u'actor')], [(u'name', u'Jean-Louis Richard'), (u'role', u'actor')]]\n >>> r1.cast.delete(u'Jean-Louis Richard', u'actor')\n >>> r1.cast\n [[(u'name', u'Catherine Deneuve'), (u'role', u'actor')], [(u'name', u'G\\xe9rard Depardieu'), (u'role', u'actor')], [(u'name', u'Andr\\xe9a Ferr\\xe9ol'), (u'role', u'actor')], [(u'name', u'Fran\\xe7ois Truffaut'), (u'role', u'director')], [(u'name', u'Jean Poiret'), (u'role', u'actor')]]\n >>> r1.update()\n >>> r1.cast\n [[(u'name', u'Catherine Deneuve'), (u'role', u'actor')], [(u'name', u'G\\xe9rard Depardieu'), (u'role', u'actor')], [(u'name', u'Andr\\xe9a Ferr\\xe9ol'), (u'role', u'actor')], [(u'name', u'Fran\\xe7ois Truffaut'), (u'role', u'director')], [(u'name', u'Jean Poiret'), (u'role', u'actor')]]\n\n\n Attention: Recorded.cast.delete() deletes the entries in the 'people' table as well !!\n\n >>> r1.cast.delete(u\"Jean Poiret\", 'actor')\n >>> r1.update()\n >>> r1.cast.delete(u\"François Truffaut\", 'director')\n >>> r1.update()\n >>> r1.cast\n [[(u'name', u'Catherine Deneuve'), (u'role', u'actor')], [(u'name', u'G\\xe9rard Depardieu'), (u'role', u'actor')], [(u'name', u'Andr\\xe9a Ferr\\xe9ol'), (u'role', u'actor')]]\n\n\n \"\"\"\n\n class People(DBData):\n \"\"\"\n People(data=None, db=None) --> People object to\n database table 'people', data is a `name` string.\n\n - get information about the table:\n $ mysql -h <master-backend-ip> -u mythtv -p<password-from-config.xml> mythconverg\n\n MariaDB [mythconverg]> describe people;\n +-------------+-----------------------+------+-----+---------+----------------+\n | Field | Type | Null | Key | Default | Extra |\n +-------------+-----------------------+------+-----+---------+----------------+\n | person | mediumint(8) unsigned | NO | PRI | NULL | auto_increment |\n | name | varchar(128) | NO | UNI | | |\n +-------------+-----------------------+------+-----+---------+----------------+\n 2 rows in set (0.00 sec)\n\n \"\"\"\n _table = 'people'\n _key = ['name']\n\n ### end class Person\n\n\n # a recording with french accents in the cast\n\n title = self.testenv['RECFRTITLE'] # \"Le Dernier Métro\"\n chanid = self.testenv['RECFRCHANID']\n starttimemyth = self.testenv['RECFRSTARTTIMEMYTH']\n\n print(title)\n\n castlist = [ (u'Catherine Deneuve', u'actor' )\n , (u\"Gérard Depardieu\", u'actor' )\n , (u\"Andréa Ferréol\", u'actor' )\n , (u\"François Truffaut\", u'director')\n ]\n\n # get a recording, search for the title\n recs = self.mydb.searchRecorded(title = title)\n rec = next(recs)\n self.assertEqual(rec.chanid, int(chanid))\n\n # backup the cast of this recording\n org_cast = rec.cast\n\n# ## backup the people table\n# #org_people = People(db=self.mydb)\n\n# # check if entries in castlist does not occur in cast\n# for name,role in castlist:\n# print(name)\n# print(role)\n\n# # if in rec.cast:\n# # rec.cast.delete(*c) # need to dereference the tuple\n# sys.exit(1)\n# rec.update()\n# # remember length\n# cast_length = len(rec.cast)\n# # check if the members of the cast are listed\n# # in the 'people' table\n# cast_found = False\n# for c in castlist:\n# try:\n# cname = People(c[0])\n# cast_found = True\n# except:\n# pass\n# # cast should no be listed in the people table\n# self.assertFalse(cast_found)\n\n # add castlist to cast\n for c in castlist:\n rec.cast.add(*c) # need to dereference the tuple\n print(rec.cast)\n #sys.exit(1)\n rec.update()\n\n # check again if the members of the cast are listed\n # in the 'people' table\n cast_found = False\n for c in castlist:\n try:\n cname = People(c[0])\n cast_found = True\n except:\n pass\n # now cast should be listed in the people table\n self.assertTrue(cast_found)\n\n # get the len of the rec.casts\n c1_length = len(rec.cast)\n # delete on entry\n rec.cast.delete(*castlist[2])\n rec.update()\n self.assertEqual(c1_length -1, len(rec.cast))\n\n # delete all entries\n rec.cast.clean() # this does a commit as well\n self.assertEqual(len(rec.cast), 0)\n\n # add the previously saved cast back\n # to a new instance of that recording\n recn = Recorded((rec.chanid, rec.starttime), db = self.mydb)\n for cs in org_cast:\n recn.cast.add(cs)\n recn.update\n\n self.assertEqual(len(recn.cast), len(org_cast))", "def init_bc_record_fields(self, bc):\n self.init_record_fields(BC, bc.bc_record_field_names())", "def _break_down(self, buf, type_override=None, id_override=None):\n assert (id_override is not None and type_override is not None) or\\\n (id_override is None and type_override is None),\\\n 'Field ID and type must be both specified in headerless mode'\n\n while 1:\n field = {}\n if type_override is not None:\n f_type = type_override\n f_id = id_override\n else:\n # if no more data, stop and return\n try:\n f_type, f_id = self._decode_header(buf)\n except EOFError:\n break\n\n self.logger.debug(\n \"_break_down():field #%d pbtype #%d\", f_id, f_type\n )\n try:\n if f_type == 0: # vint\n field['data'] = self._decode_vint(buf)\n elif f_type == 1: # 64-bit\n field['data'] = self._read_fixed(buf, 8)\n elif f_type == 2: # str\n field['data'] = self._decode_str(buf)\n elif f_type == 5: # 32-bit\n field['data'] = self._read_fixed(buf, 4)\n else:\n self.logger.warning(\n \"_break_down():Ignore unknown type #%d\", f_type\n )\n continue\n except EndOfMessage as e:\n if type_override is None or e.partial:\n raise CodecError('Unexpected end of message while decoding field {0}'.format(f_id))\n else:\n break\n field['id'] = f_id\n field['wire_type'] = f_type\n yield field", "def __init__(self, data_id, course_fields, speed_fields, heading_fields,\n wind_dir_fields, wind_speed_fields,\n update_on_fields=None,\n zero_line_reference=0,\n convert_wind_factor=1,\n convert_speed_factor=1,\n output_nmea=False):\n super().__init__(input_format=formats.Python_Record,\n output_format=formats.Text)\n self.data_id = data_id\n self.course_fields = course_fields.split(',')\n self.speed_fields = speed_fields.split(',')\n self.heading_fields = heading_fields.split(',')\n self.wind_dir_fields = wind_dir_fields.split(',')\n self.wind_speed_fields = wind_speed_fields.split(',')\n\n if update_on_fields:\n self.update_on_fields = update_on_fields.split(',')\n else:\n self.update_on_fields = self.wind_dir_fields\n self.zero_line_reference = zero_line_reference\n\n self.convert_wind_factor = convert_wind_factor\n self.convert_speed_factor = convert_speed_factor\n self.output_nmea = output_nmea\n \n self.course_val = None\n self.speed_val = None\n self.heading_val = None\n self.wind_dir_val = None\n self.wind_speed_val = None\n\n self.last_timestamp = 0", "def parseIntoDB(self, filehandle, cursor, alignTab, sequenceTab=None,\n update=None):\n c = filehandle.tell()\n filehandle.seek(0, 2)\n filesize = filehandle.tell()\n filehandle.seek(c)\n l = filehandle.readline()\n rc = 0\n count = 0\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n count+=1\n self.readalign(la[1:], filehandle)\n self._dump(alignTab, sequenceTab)\n if(update and not count % 1000):\n cursor.execute(update % (int(filehandle.tell() * 100.\n / filesize)))\n else:\n## print \"end of records\"\n return\n l=filehandle.readline()", "def test_checkfields():\n data = StringIO(\"\"\"chrX\\t10\\t100\\n\"\"\")\n a = bedparser.bedfile(data)\n a = list(a)\n i = a[0]\n assert i.chr=='chrX'\n assert i.start==10\n assert i.stop==100\n assert len(a) == 1", "def is_record(buf, offset):\n\n if len(buf) < offset + 8:\n return False\n\n magic, size = struct.unpack_from(\"<II\", buf, offset)\n if magic != 0x00002a2a:\n return False\n\n if not (0x30 <= size <= 0x10000):\n return False\n\n if len(buf) < offset + size:\n return False\n\n size2 = struct.unpack_from(\"<I\", buf, offset + size - 4)[0]\n if size != size2:\n return False\n\n return True", "def FILE_RTRV_record(self):\n eor = 0\n raw_record = []\n #record = []\n # record is a list of non-empty strings, \n # each string is a line of info from the reader\n word = ''\n linebuf = []\n while len(raw_record) < 1 and self.eof == 0 and eor == 0 :\n # we need to have something to reply.. \n while 1:\n # read 1 char at a time \n # until we get to the '>' prompt\n #\n c = self.tf.read(1)\n #\n #print c,\n #\n if len(c) != 1:\n self.eof = 1\n if self.debug > 2 :\n print \"FILE Raw Record: \",\n pprint.pprint(raw_record)\n return raw_record\n elif c == '>':\n eor = 1\n if self.debug > 2 :\n print \"FILE2 Raw Record: \",\n pprint.pprint(raw_record)\n return raw_record\n # \\r = CR , \\n = LF \n # (serial device uses CR + optionally LF, unix text only uses LF)\n # - new array entry but only if there is something to add \n elif c == '\\r' or c == '\\n':\n if word != '':\n linebuf.append(word)\n word = ''\n if linebuf != []:\n raw_record.append(linebuf)\n linebuf = []\n # split line into words\n elif c == ' ':\n if word != '':\n linebuf.append(word)\n word = ''\n # all other chars\n else : \n word = word + c\n \n time.sleep(0.001)", "def load_data(self):\n super(MudderyObjectCreater, self).load_data()\n \n data = self.get_data_record()\n if not data:\n return\n \n # set common object's info\n self.obj_list = {}\n\n for obj in data.obj_list.split(\",\"):\n obj_key = \"\"\n number = 0\n arg = obj.split(\":\", 1)\n if len(arg) == 1:\n obj_key = arg[0]\n number = 1\n elif len(arg) >= 2:\n obj_key = arg[0]\n number = int(arg[1])\n\n self.obj_list[obj_key] = number", "def __init__(self):\n self.data = []\n self.record = {}", "def readRecordFromFile():\n\twith open(gbl.sourceFile, newline='') as csvfile:\n\t\trowReader = csv.reader(csvfile, delimiter=gbl.csvDiscriminator, quotechar=gbl.csvQuotechar)\n\t\tfor row in rowReader:\n\t\t\tROWData.append(row)", "def test_misc_csv_read_inmemory():\n r = csv_reader([\"fieldname_a,fieldname_b\",\n \"mo,bo\",\n \"go,zo\",\n \"fo,po\"])\n fields = r.hdr()\n data = str(fields)\n while True:\n row = r.row()\n if not row: break\n data += '\\n' + str(row)\n assert(data==\"\"\"\n['fieldname_a', 'fieldname_b']\n['mo', 'bo']\n['go', 'zo']\n['fo', 'po']\n \"\"\".strip())", "def _init_fields(self):\n if self._fields is None:\n M.mset('U', \"^\") # DBS Calls Require this\n f = self._fields = {}\n attrs = self.fieldnames = {}\n fieldid = \"0\"\n while 1:\n # Subscript 0 is field description, .1 is the title, 3 is help\n fieldid, info, title, fieldhelp = M.ddwalk(self._fileid, fieldid)\n #fieldid, info, title, fieldhelp = M.mexec(\n # \"\"\"set s0=$order(^DD(s2,s0)) Q:s0'=+s0 s s1=$G(^DD(s2,s0,0)),s3=$G(^DD(s2,s0,.1)),s4=$G(^DD(s2,s0,3))\"\"\",\n # M.INOUT(str(fieldid)), M.INOUT(\"\"), str(self._fileid), M.INOUT(\"\"), M.INOUT(\"\"))\n if fieldid == \"\" or fieldid[0] not in \"0123456789.\":\n break\n\n info = info.split(\"^\", 4) \n label = self._clean_label(info[0])\n try:\n ftype = info[1]\n except:\n ftype = None\n if ftype:\n finst = None\n for klass in FIELD_TYPES:\n if klass.isa(ftype):\n finst = f[fieldid] = klass(fieldid, label, info)\n finst.fileid = self.fileid\n finst.ownerdd = self\n attrs[label] = fieldid\n break\n if finst is None:\n print finst, \"FIELD [%s], spec [%s] was not identified\" % (label, ftype)\n continue\n finst.title = title\n finst.fieldhelp = fieldhelp\n else:\n assert finst, \"FIELD [%s] %s has no fieldspec\" % (label, info)\n\n return self._fields", "def process_record(raw_record, umi, unique_align_buffer):\n # something like ths :>\n\n #record = raw_record.strip().split()\n #flag = int(record[1])\n #start_position = int(record[3])\n #position = int(record[3])\n #cigar_string = record[5]\n\n #is_positive = True if ((flag & 16) == 16) else False\n #matches = re.findall(r'(\\d+)([A-Z]{1})', cigar_string)\n\n #if is_positive:\n # if matches[0][1] == 'S':\n # position -= int(matches[0][0])\n \n #else:\n # for match in matches:\n # if match[1] not in ('I','X','=') :\n # position += int(match[0])\n\n #unique_key = f\"{position}_{umi}_{is_positive}\"\n #if unique_key not in unique_align_buffer:\n # unique_align_buffer[unique_key] = [raw_record]\n \n #return None\n pass", "def _add_parsed_record(record, records):\n if record != \"\":\n logger.debug('The record is not empty. Let\\'s parse it.')\n parsed = self._parse_record(record, customization=customization)\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')", "def read_record_head(self):\n ver_inst, rec_type, rec_size = unpack('<HHL', self.stream.read(8))\n instance, version = divmod(ver_inst, 2**4)\n return rec_type, rec_size, (instance, version)", "def sample_record(self, chunkdata):\n version = chunkdata[0:4]\n if version == V6_VERSION:\n record_size = self.v6_struct.size\n elif version == V5_VERSION:\n record_size = self.v5_struct.size\n elif version == V4_VERSION:\n record_size = self.v4_struct.size\n elif version == V3_VERSION:\n record_size = self.v3_struct.size\n else:\n return\n\n for i in range(0, len(chunkdata), record_size):\n if self.sample > 1:\n # Downsample, using only 1/Nth of the items.\n if random.randint(0, self.sample - 1) != 0:\n continue # Skip this record.\n\n record = chunkdata[i:i + record_size]\n # for earlier versions, append fake bytes to record to maintain size\n if version == V3_VERSION:\n # add 16 bytes of fake root_q, best_q, root_d, best_d to match V4 format\n record += 16 * b'\\x00'\n if version == V3_VERSION or version == V4_VERSION:\n # add 12 bytes of fake root_m, best_m, plies_left to match V5 format\n record += 12 * b'\\x00'\n # insert 4 bytes of classical input format tag to match v5 format\n record = record[:4] + CLASSICAL_INPUT + record[4:]\n if version == V3_VERSION or version == V4_VERSION or version == V5_VERSION:\n # add 48 byes of fake result_q, result_d etc\n record += 48 * b'\\x00'\n\n if version == V6_VERSION:\n # diff focus code, peek at best_q, orig_q and pol_kld from record (unpacks as tuple with one item)\n best_q = struct.unpack('f', record[8284:8288])[0]\n orig_q = struct.unpack('f', record[8328:8332])[0]\n pol_kld = struct.unpack('f', record[8348:8352])[0]\n\n # if orig_q is NaN or pol_kld is 0, accept, else accept based on diff focus\n if not np.isnan(orig_q) and pol_kld > 0:\n diff_q = abs(best_q - orig_q)\n q_weight = self.diff_focus_q_weight\n pol_scale = self.diff_focus_pol_scale\n total = (q_weight * diff_q + pol_kld) / (q_weight +\n pol_scale)\n thresh_p = self.diff_focus_min + self.diff_focus_slope * total\n if thresh_p < 1.0 and random.random() > thresh_p:\n continue\n\n yield record", "def read_recorded(self):\n reader=csv.reader(open(self._filename,\"rb\"),delimiter=',')\n self._headers = reader.next()\n x=list(reader)\n recorded_positions=np.array(x).astype('float')\n self.refine_pos(recorded_positions)\n self._joint_first = recorded_positions[0]\n self._joint_last = recorded_positions[-1]", "def load_records():\n\n with open('seed_data/records.csv', 'rb') as csvfile:\n data = csv.reader(csvfile)\n for row in data:\n record_id, user_id, common_name, date_time, latitude, longitude, notes, seen, num_birds = row\n\n record = Record(record_id=record_id, user_id=user_id, common_name=common_name,\n date_time=date_time, latitude=latitude, longitude=longitude, \n notes=notes, seen=seen, num_birds=num_birds)\n\n db.session.add(record)\n\n db.session.commit()", "def read_record(self, *dtypes, **kwargs):\n dtype = kwargs.pop('dtype', None)\n if kwargs:\n raise ValueError(\"Unknown keyword arguments {}\".format(tuple(kwargs.keys())))\n\n if dtype is not None:\n dtypes = dtypes + (dtype,)\n elif not dtypes:\n raise ValueError('Must specify at least one dtype')\n\n first_size = self._read_size(eof_ok=True)\n\n dtypes = tuple(np.dtype(dtype) for dtype in dtypes)\n block_size = sum(dtype.itemsize for dtype in dtypes)\n\n num_blocks, remainder = divmod(first_size, block_size)\n if remainder != 0:\n raise ValueError('Size obtained ({0}) is not a multiple of the '\n 'dtypes given ({1}).'.format(first_size, block_size))\n\n if len(dtypes) != 1 and first_size != block_size:\n # Fortran does not write mixed type array items in interleaved order,\n # and it's not possible to guess the sizes of the arrays that were written.\n # The user must specify the exact sizes of each of the arrays.\n raise ValueError('Size obtained ({0}) does not match with the expected '\n 'size ({1}) of multi-item record'.format(first_size, block_size))\n\n data = []\n for dtype in dtypes:\n r = np.fromfile(self._fp, dtype=dtype, count=num_blocks)\n if len(r) != num_blocks:\n raise FortranFormattingError(\n \"End of file in the middle of a record\")\n if dtype.shape != ():\n # Squeeze outmost block dimension for array items\n if num_blocks == 1:\n assert r.shape == (1,) + dtype.shape\n r = r[0]\n\n data.append(r)\n\n second_size = self._read_size()\n if first_size != second_size:\n raise IOError('Sizes do not agree in the header and footer for '\n 'this record - check header dtype')\n\n # Unpack result\n if len(dtypes) == 1:\n return data[0]\n else:\n return tuple(data)", "def test_patch_record(self):\n pass", "def load_from_db(self):\n records = self.read(include_header=True)\n fields = list(records[0])\n for index, rule in enumerate(records):\n if not index:\n continue # first record is a header\n trule = TrafficRule()\n\n for key, val in zip(fields, list(rule)):\n if val is not None and val != '':\n try:\n ktype = TrafficRule.SCHEMA.get(key, None)\n if ktype in self.TYPES_MAP:\n val = self.TYPES_MAP[ktype](val)\n except (TypeError, ValueError) as err:\n log.warning('Converting type for %s resulted an error %s',\n key, err)\n setattr(trule, key, val)\n\n ruleid = getattr(trule, 'ruleid', None)\n if not ruleid:\n log.error(\"Skipped Invalid rule with no ruleid : %s\",\n trule.__dict__)\n self._rules[ruleid] = trule", "def enumerate_records(self, table):\n for i in range(self.nrofrecords()):\n data = self.bank.readrec(i + 1)\n if data and data[0] == table.tableid:\n try:\n yield Record(i + 1, table.fields, data[1:])\n except EOFError:\n print(\"Record %d too short: -- %s\" % (i+1, ashex(data)), file=stderr)\n except Exception as e:\n print(\"Record %d broken: ERROR '%s' -- %s\" % (i+1, e, ashex(data)), file=stderr)", "def preprocess_record(record):\n automatic_fields = ['created_at', 'modified_at']\n record = serialize_fields(filter_out_dict_keys(record, automatic_fields))\n\n return record", "def _get_packed_record(self, i):\n\n raise NotImplementedError()", "def test_load_from_msg_type_check(self):\n for msg in self.cases.keys():\n\n cr = CloudRecord()\n cr.load_from_msg(msg)\n\n for key in cr._int_fields:\n value = cr._record_content[key]\n # Check the value we are going to be passing to MySQL\n # is an integer or None. MySQL 5.6.x rejects the value\n # otherwise, whereas 5.1.x interprets it as integer 0.\n valid_value = isinstance(value, int) or value is None\n # Use 'repr' to show quote marks if value is a string.\n self.assertTrue(valid_value, 'Integer %s with value: %s\\n%s' %\n (key, repr(value), msg))\n\n for key in cr._float_fields:\n value = cr._record_content[key]\n # Check the value we are going to be passing to MySQL\n # is a float or None. MySQL 5.6.x rejects the value\n # otherwise, whereas 5.1.x interprets it as 0.00.\n valid_value = isinstance(value, float) or value is None\n # Use 'repr' to show quote marks if value is a string.\n self.assertTrue(valid_value, 'Decimal %s with value: %s\\n%s' %\n (key, repr(value), msg))\n\n for key in cr._datetime_fields:\n value = cr._record_content[key]\n # Check the value we are going to be passing to MySQL\n # is a datetime or None. MySQL 5.6.x rejects the value\n # otherwise, whereas 5.1.x interprets it as a zero timestamp.\n valid_value = isinstance(value, datetime) or value is None\n # Use 'repr' to show quote marks if value is a string.\n self.assertTrue(valid_value, 'Datetime %s with value: %s\\n%s' %\n (key, repr(value), msg))", "def inflateRecord(self, record):\n # Dirty bit is turned off by default\n return [record.recordID, record.recordData, 0, record.partitionFacility, record.partitionUser]", "def _new_record():\n nonlocal key\n nonlocal value_list\n nonlocal record\n nonlocal origin\n nonlocal field_offset_map\n key = None\n value_list = None\n if source is not None:\n origin = Origin(source, None, None)\n field_offset_map = {}\n record = RFC822Record(data_cls(), origin, data_cls(), field_offset_map)", "def get_record_set(settings, dataset, dataset_types, rw, numjobs):\n if settings['rw'] == 'randrw':\n if len(settings['filter']) > 1 or not settings['filter']:\n print(\n \"Since we are processing randrw data, you must specify a filter for either read or write data, not both.\")\n exit(1)\n\n record_set = {'x_axis': dataset_types['iodepth'], 'x_axis_format': 'Queue Depth', 'y1_axis': None,\n 'y2_axis': None, 'numjobs': numjobs}\n\n iops_series_raw = []\n iops_stddev_series_raw = []\n lat_series_raw = []\n lat_stddev_series_raw = []\n\n for depth in dataset_types['iodepth']:\n for record in dataset:\n if (int(record['iodepth']) == int(depth)) and int(record['numjobs']) == int(numjobs[0]) and record[\n 'rw'] == rw and record['type'] in settings['filter']:\n iops_series_raw.append(record['iops'])\n lat_series_raw.append(record['lat'])\n iops_stddev_series_raw.append(record['iops_stddev'])\n lat_stddev_series_raw.append(record['lat_stddev'])\n #\n # Latency data must be scaled, IOPs will not be scaled.\n #\n latency_scale_factor = supporting.get_scale_factor(lat_series_raw)\n scaled_latency_data = supporting.scale_yaxis_latency(\n lat_series_raw, latency_scale_factor)\n #\n # Latency data must be rounded.\n #\n scaled_latency_data_rounded = supporting.round_metric_series(\n scaled_latency_data['data'])\n scaled_latency_data['data'] = scaled_latency_data_rounded\n #\n # Latency stddev must be scaled with same scale factor as the data\n #\n lat_stdev_scaled = supporting.scale_yaxis_latency(\n lat_stddev_series_raw, latency_scale_factor)\n\n lat_stdev_scaled_rounded = supporting.round_metric_series(\n lat_stdev_scaled['data'])\n\n #\n # Latency data is converted to percent.\n #\n lat_stddev_percent = supporting.raw_stddev_to_percent(\n scaled_latency_data['data'], lat_stdev_scaled_rounded)\n\n lat_stddev_percent = [int(x) for x in lat_stddev_percent]\n\n scaled_latency_data['stddev'] = supporting.round_metric_series(\n lat_stddev_percent)\n #\n # IOPS data is rounded\n iops_series_rounded = supporting.round_metric_series(iops_series_raw)\n #\n # IOPS stddev is converted to percent\n iops_stdev_rounded = supporting.round_metric_series(iops_stddev_series_raw)\n iops_stdev_rounded_percent = supporting.raw_stddev_to_percent(\n iops_series_rounded, iops_stdev_rounded)\n iops_stdev_rounded_percent = [int(x) for x in iops_stdev_rounded_percent]\n #\n #\n record_set['y1_axis'] = {'data': iops_series_rounded,\n 'format': \"IOPS\", 'stddev': iops_stdev_rounded_percent}\n record_set['y2_axis'] = scaled_latency_data\n\n return record_set", "def _parse_record(self, record, customization=None):\n d = {}\n\n if not record.startswith('@'):\n logger.debug('The record does not start with @. Return empty dict.')\n return {}\n\n # if a comment record, add to bib_database.comments\n if record.lower().startswith('@comment'):\n logger.debug('The record startswith @comment')\n logger.debug('Store comment in list of comments')\n\n self.bib_database.comments.append(re.search('\\{(.*)\\}', record, re.DOTALL).group(1))\n\n logger.debug('Return an empty dict')\n return {}\n\n # if a preamble record, add to bib_database.preambles\n if record.lower().startswith('@preamble'):\n logger.debug('The record startswith @preamble')\n logger.debug('Store preamble in list of preambles')\n\n self.bib_database.preambles.append(re.search('\\{(.*)\\}', record, re.DOTALL).group(1))\n\n logger.debug('Return an empty dict')\n return {}\n\n # prepare record\n record = '\\n'.join([i.strip() for i in record.split('\\n')])\n if '}\\n' in record:\n logger.debug('}\\\\n detected in the record. Clean up.')\n record = record.replace('\\r\\n', '\\n').replace('\\r', '\\n').rstrip('\\n')\n # treat the case for which the last line of the record\n # does not have a coma\n if record.endswith('}\\n}') or record.endswith('}}'):\n logger.debug('Missing coma in the last line of the record. Fix it.')\n record = re.sub('}(\\n|)}$', '},\\n}', record)\n\n # if a string record, put it in the replace_dict\n if record.lower().startswith('@string'):\n logger.debug('The record startswith @string')\n key, val = [i.strip().strip('{').strip('}').replace('\\n', ' ') for i in record.split('{', 1)[1].strip('\\n').strip(',').strip('}').split('=')]\n key = key.lower() # key is case insensitive\n val = self._string_subst_partial(val)\n if val.startswith('\"') or val.lower() not in self.bib_database.strings:\n self.bib_database.strings[key] = val.strip('\"')\n else:\n self.bib_database.strings[key] = self.bib_database.strings[val.lower()]\n logger.debug('Return a dict')\n return d\n\n # for each line in record\n logger.debug('Split the record of its lines and treat them')\n kvs = [i.strip() for i in record.split(',\\n')]\n inkey = \"\"\n inval = \"\"\n for kv in kvs:\n logger.debug('Inspect: %s', kv)\n # TODO: We may check that the keyword belongs to a known type\n if kv.startswith('@') and not inkey:\n # it is the start of the record - set the bibtype and citekey (id)\n logger.debug('Line starts with @ and the key is not stored yet.')\n bibtype, id = kv.split('{', 1)\n bibtype = self._add_key(bibtype)\n id = id.strip('}').strip(',')\n logger.debug('bibtype = %s', bibtype)\n logger.debug('id = %s', id)\n if self.ignore_nonstandard_types and bibtype not in ('article',\n 'book',\n 'booklet',\n 'conference',\n 'inbook',\n 'incollection',\n 'inproceedings',\n 'manual',\n 'mastersthesis',\n 'misc',\n 'phdthesis',\n 'proceedings',\n 'techreport',\n 'unpublished'):\n logger.warning('Entry type %s not standard. Not considered.', bibtype)\n break\n elif '=' in kv and not inkey:\n # it is a line with a key value pair on it\n logger.debug('Line contains a key-pair value and the key is not stored yet.')\n key, val = [i.strip() for i in kv.split('=', 1)]\n key = self._add_key(key)\n val = self._string_subst_partial(val)\n # if it looks like the value spans lines, store details for next loop\n if (val.count('{') != val.count('}')) or (val.startswith('\"') and not val.replace('}', '').endswith('\"')):\n logger.debug('The line is not ending the record.')\n inkey = key\n inval = val\n else:\n logger.debug('The line is the end of the record.')\n d[key] = self._add_val(val)\n elif inkey:\n logger.debug('Continues the previous line to complete the key pair value...')\n # if this line continues the value from a previous line, append\n inval += ', ' + kv\n # if it looks like this line finishes the value, store it and clear for next loop\n if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('\"') and inval.endswith('\"')):\n logger.debug('This line represents the end of the current key-pair value')\n d[inkey] = self._add_val(inval)\n inkey = \"\"\n inval = \"\"\n else:\n logger.debug('This line does NOT represent the end of the current key-pair value')\n\n logger.debug('All lines have been treated')\n if not d:\n logger.debug('The dict is empty, return it.')\n return d\n\n d['ENTRYTYPE'] = bibtype\n d['ID'] = id\n\n if customization is None:\n logger.debug('No customization to apply, return dict')\n return d\n else:\n # apply any customizations to the record object then return it\n logger.debug('Apply customizations and return dict')\n return customization(d)", "def handle_record(self, record):\n raise NotImplementedError", "def load(self):\n canSave = self.canSave\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpack('4s3i',16,'REC_HEAD')\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #--LEVC?\n if name == 'LEVC':\n levc = Levc(name,size,delFlag,recFlag,ins,True)\n self.levcs[levc.id] = levc\n if canSave: self.records.append(levc)\n #print ' Added:',levc.id\n elif name == 'LEVI':\n levi = Levi(name,size,delFlag,recFlag,ins,True)\n self.levis[levi.id] = levi\n if canSave: self.records.append(levi)\n #print ' Added:',levi.id\n #--Other\n elif canSave:\n record = Record(name,size,delFlag,recFlag,ins)\n self.records.append(record)\n else:\n ins.seek(size,1,'Record')\n #--Done Reading\n ins.close()", "def read_fields(self, limit = 0, collapse = False):\r\n\r\n keys = []\r\n probes = {}\r\n\r\n def probe_record(record, parent = None):\r\n for key, value in record.items():\r\n full_key = parent + \".\" + key if parent else key\r\n\r\n if self.expand and type(value) == dict:\r\n probe_record(value, full_key)\r\n continue\r\n\r\n if not full_key in probes:\r\n probe = brewery.dq.FieldTypeProbe(full_key)\r\n probes[full_key] = probe\r\n keys.append(full_key)\r\n else:\r\n probe = probes[full_key]\r\n probe.probe(value)\r\n\r\n count = 0\r\n for record in self.records():\r\n if collapse:\r\n record = collapse_record(record)\r\n\r\n probe_record(record)\r\n if limit and count >= limit:\r\n break\r\n count += 1\r\n\r\n fields = []\r\n\r\n for key in keys:\r\n probe = probes[key]\r\n field = Field(probe.field)\r\n\r\n storage_type = probe.unique_storage_type\r\n if not storage_type:\r\n field.storage_type = \"unknown\"\r\n elif storage_type == \"unicode\":\r\n field.storage_type = \"string\"\r\n else:\r\n field.storage_type = \"unknown\"\r\n field.concrete_storage_type = storage_type\r\n\r\n # FIXME: Set analytical type\r\n\r\n fields.append(field)\r\n\r\n self.fields = list(fields)\r\n return self.fields", "def init_record_fields(self, run_record_key, record_fields):\n\n record_fields_grp = self.settings_grp[RECORD_FIELDS]\n\n # make a dataset for the sparse fields allowed. this requires\n # a 'special' datatype for variable length strings. This is\n # supported by HDF5 but not numpy.\n vlen_str_dt = h5py.special_dtype(vlen=str)\n\n # create the dataset with the strings of the fields which are records\n record_group_fields_ds = record_fields_grp.create_dataset(run_record_key,\n (len(record_fields),),\n dtype=vlen_str_dt,\n maxshape=(None,))\n\n # set the flags\n for i, record_field in enumerate(record_fields):\n record_group_fields_ds[i] = record_field", "def parse_file(fits_file, data_offset, records, record_length, field_position, field_size):\n bits = field_size * 8\n\n with open(fits_file, 'rb') as f:\n f.read(data_offset)\n for _ in range(0, records):\n record = f.read(record_length)\n value = record[field_position-1:field_position+field_size-1]\n print(multiparse(bits, value))", "def parse_passport_records(source: IO[str]) -> Iterable[PassportRecord]: # noqa: D401\n # The initial adverb is tricking flake8 into thinking the above line is\n # not imperative.\n current_record_contents: Dict[PassportField, str] = {}\n\n for line in source:\n components = line.split(\" \")\n components = [x.strip() for x in components]\n components = [x for x in components if x]\n\n if not components:\n # Process this record\n if current_record_contents:\n yield current_record_contents\n current_record_contents = {}\n continue\n\n for component in components:\n component = component.strip()\n if not component:\n continue\n field_name, field_value = component.split(\":\", 1)\n try:\n true_field = PassportField(field_name)\n except ValueError:\n # Accept invalid data\n continue\n current_record_contents[true_field] = field_value\n\n # Emit the last record\n if current_record_contents:\n yield current_record_contents", "def setUp(self):\n # $TODO - expand supported field types\n # NOTE: For the purposes of this test, don't expect to compare 'f'-type\n # floats! The precision conversion using struct.pack -> struct.unpack\n # is problematic! Use 'd' instead because Python floats are actually\n # C-doubles. Use '=d' if necessary.\n int1 = -10\n uint1 = 20\n string1 = \"This is a string\" # pack this as a 30-byte string\n char1 = \"J\"\n short1 = 0\n float1 = 5.23\n double1 = -256.3456789107\n ushort1 = 5\n string2 = \"This is another string\" # pack this as a 30-byte string\n long1 = 2147483647\n ulong1 = 3000000000\n\n # Use the fields above in this order.\n self.fieldmap = \"iI30schddH30slL\"\n self.sourcekeys = (\"int1\", \"uint1\", \"string1\", \"char1\", \"short1\",\n \"float1\", \"double1\", \"ushort1\", \"string2\", \"long1\", \"ulong1\")\n\n # Create the raw data that getfields will parse\n self.rawdata = struct.pack(self.fieldmap, int1, uint1, string1, char1,\n short1, float1, double1, ushort1, string2, long1, ulong1)\n\n # This is what getfields should return\n self.knownvalues = {\"char1\":char1, \"short1\":short1, \"ushort1\":ushort1,\n \"int1\":int1, \"uint1\":uint1, \"long1\":long1, \"ulong1\":ulong1,\n \"float1\":float1, \"double1\":double1, \"string1\":string1,\n \"string2\":string2}", "def abc_make_records(self, records, fields=None):\n fields = fields or self.abc_get_model_fields(records)\n result = []\n field_types = {}\n def field_type(name):\n \"\"\"Check the type of a field.\"\"\"\n if name not in field_types:\n field_types[name] = records.fields_get([name], attributes=['type'])[name]['type']\n return field_types.get(name)\n for record in records:\n rec ={'_name': record._name, 'id': record.id}\n for field in fields:\n child_fields = None\n # Relational field\n if type(field) == tuple:\n child_fields = field[1] or ['display_name']\n field = field[0]\n value = getattr(record, field)\n if child_fields:\n value = self.abc_make_records(value, child_fields)\n if field_type(field) == 'many2one':\n value = value and value[0] or None\n rec[field] = value\n result.append(rec)\n return result", "def test_get_next_record_flex():\n reader = csvReader.CsvReader()\n reader.column_count = 6\n reader.expected_headers = ['a', 'b', 'c', None, None, None]\n reader.flex_headers = [None, None, None, 'flex_d', 'flex_e', None]\n reader.csv_reader = csv.reader(['A,\"B\\n\",C,D,E,F'], dialect='excel', delimiter=',')\n return_dict, flex_fields = reader.get_next_record()\n assert return_dict == {'a': 'A', 'b': 'B\\n', 'c': 'C'}\n assert len(flex_fields) == 2\n assert flex_fields[0].header == 'flex_d'\n assert flex_fields[0].cell == 'D'\n assert flex_fields[1].header == 'flex_e'\n assert flex_fields[1].cell == 'E'", "def record_updater(records, observations):\n for record in records:\n try:\n record = ast.literal_eval(records[record])\n except Exception:\n record = record\n try:\n if type(records[record]) is dict:\n records[record] = Common.record_updater(\n records[record], observations\n )\n elif type(records[record]) is list:\n list_records = []\n for list_record in records[record]:\n for observation in observations:\n if observation != \"_id\":\n try:\n if re.search(observation, f\"{list_record}\"):\n if not re.search(\n observations[observation],\n f\"{records[record]}\",\n ):\n if not re.search(\"-->\", f\"{list_record}\"):\n list_records.append(\n f\"{list_record}\"\n + \" --> \"\n + observations[observation]\n )\n else:\n list_records.append(list_record)\n else:\n list_records.append(list_record)\n else:\n list_records.append(list_record)\n except Exception as ex:\n Common.logger.warning(\n f\"Exception happened in observation comparison {ex}\"\n )\n records[record] = list_records\n else:\n records = Common.data_comparison(observations, records, record)\n except Exception:\n records = Common.data_comparison(observations, records, record)\n return records", "def corrected_records(handle):\n\n seen = coll.defaultdict(set)\n for record in SeqIO.parse(handle, \"fasta\"):\n\n if not str(record.seq):\n continue\n\n # These are probably protein, so skip them\n if record.id.startswith(\"XM_\") or record.id.startswith(\"NM_\"):\n continue\n\n # Change given ids into a probably unique id\n given = record.id.replace(\",\", \"\")\n match = re.search(r\"gene RGD:(\\d+),\", record.description)\n if not match:\n raise ValueError(\"RGD fasta must state gene id: %s\", record.description)\n gene = match.group(1)\n\n match = re.search(\"locus: (.+)$\", record.description)\n if not match:\n raise ValueError(\"RGD fasta must have a locus\")\n location = match.group(1)\n\n record.id = \"{given}-{gene}-{location}\".format(\n given=given,\n gene=gene,\n location=location,\n )\n\n # Prevent writing duplicate entries\n if str(record.seq) in seen[record.id]:\n continue\n\n seen[record.id].add(str(record.seq))\n yield record", "async def line_to_obj(raw_line: bytearray, ref: Ref) -> Optional[ObjectRec]:\n # secondary_update = None\n if raw_line[0:1] == b\"0\":\n return None\n\n if raw_line[0:1] == b'-':\n rec = ref.obj_store[int(raw_line[1:], 16)]\n rec.alive = 0\n await mark_dead(rec.id)\n\n if 'Weapon' in rec.Type:\n impacted = await determine_contact(rec, type='impacted', ref=ref)\n if impacted:\n rec.impacted = impacted[0]\n rec.impacted_dist = impacted[1]\n sql = create_impact_stmt()\n vals = (ref.session_id, rec.parent, rec.impacted, rec.id,\n ref.time_offset, rec.impacted_dist)\n await DB.execute(sql, *vals)\n return rec\n\n comma = raw_line.find(b',')\n rec_id = int(raw_line[0:comma], 16)\n try:\n rec = ref.obj_store[rec_id]\n rec.update_last_seen(ref.time_offset)\n rec.updates += 1\n\n except KeyError:\n # Object not yet seen...create new record...\n rec = ObjectRec(id_=rec_id,\n session_id=ref.session_id,\n first_seen=ref.time_offset,\n last_seen=ref.time_offset)\n ref.obj_store[rec_id] = rec\n\n while True:\n last_comma = comma + 1\n comma = raw_line.find(b',', last_comma)\n if comma == -1:\n break\n\n chunk = raw_line[last_comma:comma]\n eq_loc = chunk.find(b\"=\")\n key = chunk[0:eq_loc]\n val = chunk[eq_loc + 1:]\n\n if key == b\"T\":\n i = 0\n pipe_pos_end = -1\n while i < COORD_KEY_LEN:\n pipe_pos_start = pipe_pos_end + 1\n pipe_pos_end = chunk[eq_loc + 1:].find(b'|', pipe_pos_start)\n if pipe_pos_start == -1:\n break\n\n coord = chunk[eq_loc + 1:][pipe_pos_start:pipe_pos_end]\n if coord != b'':\n c_key = COORD_KEYS[i]\n if c_key == \"lat\":\n rec.lat = float(coord) + ref.lat\n elif c_key == \"lon\":\n rec.lon = float(coord) + ref.lon\n else:\n rec.update_val(c_key, float(coord))\n i += 1\n else:\n rec.update_val(\n key.decode('UTF-8') if key != b'Group' else 'grp', val.decode('UTF-8'))\n\n rec.compute_velocity(ref.time_since_last)\n\n if rec.updates == 1 and rec.should_have_parent():\n parent_info = await determine_contact(rec, type='parent', ref=ref)\n if parent_info:\n rec.parent = parent_info[0]\n rec.parent_dist = parent_info[1]\n\n return rec", "def test_conversion(parallel, read_basic):\n text = \"\"\"\nA B C D E F G H\n1 a 3 4 5 6 7 8\n2. 1 9 -.1e1 10.0 8.7 6 -5.3e4\n4 2 -12 .4 +.e1 - + six\n\"\"\"\n table = read_basic(text, parallel=parallel)\n assert_equal(table[\"A\"].dtype.kind, \"f\")\n assert table[\"B\"].dtype.kind in (\"S\", \"U\")\n assert_equal(table[\"C\"].dtype.kind, \"i\")\n assert_equal(table[\"D\"].dtype.kind, \"f\")\n assert table[\"E\"].dtype.kind in (\"S\", \"U\")\n assert table[\"F\"].dtype.kind in (\"S\", \"U\")\n assert table[\"G\"].dtype.kind in (\"S\", \"U\")\n assert table[\"H\"].dtype.kind in (\"S\", \"U\")", "def test_record(self):\n self.assertEqual(self.record.attrib['id'],\n 'nhc_demo_patient_0',\n 'Incorrect ID ')\n self.assertEqual(self.record.attrib['model'],\n 'nh.clinical.patient',\n 'Incorrect model')" ]
[ "0.62811124", "0.62365377", "0.61066103", "0.60930586", "0.6048932", "0.60215473", "0.6007136", "0.5951948", "0.5928344", "0.59142256", "0.5856558", "0.5852108", "0.5832339", "0.5824252", "0.5793622", "0.579146", "0.5781421", "0.57737976", "0.5772325", "0.571253", "0.5677232", "0.56720006", "0.5656448", "0.5632743", "0.5629757", "0.561031", "0.56057423", "0.56051624", "0.55686796", "0.5553228", "0.554701", "0.554477", "0.5540075", "0.5539929", "0.55397433", "0.5535224", "0.55080646", "0.5489489", "0.5484313", "0.5475186", "0.54688865", "0.5468158", "0.54676956", "0.5463662", "0.5448368", "0.5447638", "0.5445412", "0.54306644", "0.54095787", "0.54077697", "0.5403075", "0.5401028", "0.5382636", "0.53774774", "0.5374201", "0.5356817", "0.5355786", "0.53555655", "0.53554446", "0.53413", "0.53346074", "0.5334285", "0.532406", "0.5315777", "0.53119105", "0.53114957", "0.5304847", "0.530218", "0.53013766", "0.52965844", "0.5292414", "0.528987", "0.5271712", "0.52686715", "0.5259718", "0.525767", "0.52547646", "0.52475256", "0.5241685", "0.5237079", "0.52341384", "0.5232162", "0.5230223", "0.52290255", "0.52250224", "0.5223533", "0.52147377", "0.5214234", "0.52133054", "0.5209643", "0.5206138", "0.52048814", "0.5198701", "0.5191959", "0.5190451", "0.5179144", "0.51788217", "0.5175999", "0.51753294", "0.5172017", "0.5170214" ]
0.0
-1
Does the program add a new record into the sequential data structure?
def test_CovidCase_add(self): add_covid = self.create_CovidCase() add_covid.save() self.assertIn(add_covid, CovidCase.objects.all())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_record(self):\n if not self.record_exists(self.args.date):\n record = self.create_record()\n self.records.append(record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False", "def test_append_updated_record_to_queue_same_data(small_app):\n pid = PersistentIdentifier.get(\"literature\", 11883)\n publication_id = str(pid.object_uuid)\n record = Record.get_record(publication_id)\n\n append_updated_record_to_queue(None, record, record, \"records-hep\", \"hep\")\n\n assert str(record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def add_record(self, record):\n pass", "def insert(self, val):\n if val in self.record:\n return False\n \n self.record[val] = len(self.data)\n self.data.append(val)\n return True", "def update_record(self):\n new_record = self.create_record()\n for record in self.records:\n if self.date_str == record[\"date\"] and not record == new_record:\n record.update(new_record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False", "def test_append_new_record_to_queue_method_not_hep_record(small_app):\n sample_author_record = _IdDict({\n '$schema': 'http://localhost:5000/schemas/records/authors.json',\n 'collections': [{'primary': 'HEPNAMES'}],\n 'control_number': '314159265',\n 'name': {'value': 'Glashow, S.L.'},\n 'positions': [{'institution': {'name': 'Copenhagen U.'}}],\n 'self': {'$ref': 'http://localhost:5000/api/authors/314159265'}})\n\n append_new_record_to_queue(sample_author_record)\n\n assert str(sample_author_record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def add(self, rec):\n #print(\"ADD REC={}\".format(rec))\n if self._disable_insert:\n return\n if self._is_mem:\n key = '#'.join([rec['ts'], rec['user'], rec['narr']])\n if key in self._rkeys:\n self._add_duplicate()\n self._rkeys.add(key)\n cursor = self._sq.cursor()\n rec['name'] = rec['name'][19:] # strip 'biokbase.narrative.'\n values = [rec[c] for c in self.COLUMNS]\n ivalues = []\n for v in values:\n if isinstance(v, float):\n ivalues.append('{:f}'.format(v))\n else:\n ivalues.append('\"' + v + '\"')\n stmt = self._insert_stmt.format(values=','.join(ivalues))\n # add record\n try:\n cursor.execute(stmt)\n except sqlite3.IntegrityError:\n self._add_duplicate()\n cursor.close()", "def add(self, record):\n if record.name != 'consensus':\n self.members.append(record)", "def cli_add_record(record_data):\n new_record = None\n try:\n new_record = api.insert_record( record_data)\n except DuplicateRecord as error:\n debug(\"%(error)s\" % locals())\n print \"Adding new record failed. %(error)s\" % locals()\n return None\n except MissingRequiredInformaton as error:\n debug(\"%(error)s\" % locals())\n print \"Adding new record failed. %(error)s\" % locals()\n return None\n\n return new_record", "def test_append_new_record_to_queue_method(small_app):\n sample_hep_record = _IdDict({\n '$schema': 'http://localhost:5000/schemas/records/hep.json',\n 'authors': [{\n 'affiliations': [{'value': 'Copenhagen U.'}],\n 'curated_relation': False,\n 'full_name': 'Glashow, S.L.',\n 'signature_block': 'GLASs',\n 'uuid': '5ece3c81-0a50-481d-8bee-5f78576e9504'\n }],\n 'collections': [\n {'primary': 'CORE'},\n {'primary': 'HEP'}\n ],\n 'control_number': '4328',\n 'self': {'$ref': 'http://localhost:5000/api/literature/4328'},\n 'titles': [{'title': 'Partial Symmetries of Weak Interactions'}]\n })\n\n append_new_record_to_queue(sample_hep_record)\n\n assert str(sample_hep_record.id) == \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def case_add_records(self, refresh_db_before):\n new_names = [\"tag1\", \"tag2\", \"tag3\"]\n\n for name in new_names:\n TagOp.add(name)\n\n check_records = TagOp.get()\n\n self.assertEqual(len(check_records), len(new_names))\n\n for record, exp_name in zip(check_records, new_names):\n self.assertTrue(isinstance(record, Tag))\n self.assertEqual(record.name, exp_name)", "def test_append_updated_record_to_queue(small_app):\n pid = PersistentIdentifier.get(\"literature\", 4328)\n publication_id = str(pid.object_uuid)\n record = Record.get_record(publication_id)\n\n record_to_update = deepcopy(record)\n record_to_update['authors'][0]['full_name'] = \"John Smith\"\n\n append_updated_record_to_queue(None, record_to_update, record_to_update,\n \"records-hep\", \"hep\")\n\n assert str(record_to_update.id) == \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def add_record(self, data):\n if self.current_trip is None:\n print \"no trip to add data\"\n return\n self.current_trip.store_data(data)", "def test_append_updated_record_to_queue_new_record(small_app):\n sample_hep_record = _IdDict({\n '$schema': 'http://localhost:5000/schemas/records/hep.json',\n 'authors': [{\n 'affiliations': [{'value': 'Copenhagen U.'}],\n 'curated_relation': False,\n 'full_name': 'Glashow, S.L.',\n 'signature_block': 'GLASs',\n 'uuid': '5ece3c81-0a50-481d-8bee-5f78576e9504'\n }],\n 'collections': [\n {'primary': 'CORE'},\n {'primary': 'HEP'}\n ],\n 'control_number': '4328',\n 'self': {'$ref': 'http://localhost:5000/api/literature/4328'},\n 'titles': [{'title': 'Partial Symmetries of Weak Interactions'}]\n })\n\n result = append_updated_record_to_queue(None, sample_hep_record,\n sample_hep_record, \"records-hep\",\n \"hep\")\n\n assert result is None\n assert str(sample_hep_record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def test_record_add():\n\n display = Display()\n display.add('first')\n display.add('second')\n\n assert len(display.display_output) == 2", "def insert_record(self, record, session):\n try:\n session.add(record)\n session.commit()\n session.close()\n return True\n except:\n\n logging.exception(\"http record cannot be added to db \" \":Time: \" + str(datetime.datetime.now()))\n return False", "def add_rec(self):\n print(\"Write phone number:\")\n add_phone_number_input = input()\n print(\"Write name of the record:\")\n add_name_input = input()\n print(\"Write address:\")\n add_address_input = input()\n return self.storage.add(\n add_phone_number_input, add_name_input, add_address_input\n )", "def save_data(self, record):\n self.dbm.addRecord(record)", "def test_add():\n # Generate observations with random times\n timeline = random_timed_observation_timeline()\n\n # Check that our observations are in order\n # (Go look up zip())\n for prev, current in zip(timeline.observations, timeline.observations[1:]):\n assert prev.time < current.time", "def createRecord(self):\n self.dto.getRecord().append(self.controller.createNewObj())\n print(\"Record added.\")", "def add_record(self, record: EventRecord) -> None:\n with self.session.begin() as session:\n session.add(record)", "def add(self, data):\n if self._filter(data):\n id = self.db._generate_id(data)\n \n if not id == None:\n if self.db._store:\n self.db.append(id, str(data))\n print id, \"stored to\", self.db._generate_path(id)\n else:\n print id\n print data.show2()", "def test_append_updated_record_to_queue_not_hep_record(small_app):\n sample_author_record = _IdDict({\n '$schema': 'http://localhost:5000/schemas/records/authors.json',\n 'collections': [{'primary': 'HEPNAMES'}],\n 'control_number': '314159265',\n 'name': {'value': 'Glashow, S.L.'},\n 'positions': [{'institution': {'name': 'Copenhagen U.'}}],\n 'self': {'$ref': 'http://localhost:5000/api/authors/314159265'}})\n\n append_updated_record_to_queue(None, sample_author_record,\n sample_author_record, \"records-authors\",\n \"authors\")\n\n assert str(sample_author_record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def add(table, record):\n\n table.append(record)\n\n return table", "def test_add(self):\n # Everything added will be deleted later in test_delete.\n first_name = 'Trevor'\n last_name = 'Harvey'\n entry_date = '04/19/2012'\n title = 'Test'\n minutes = 34\n notes = 'testing entries. and regex (555) 555-3425'\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)\n # second test add\n first_name = 'Nik'\n last_name = 'Silver'\n entry_date = '01/14/1827'\n title = '[email protected]'\n minutes = 34\n notes = 'This is an email test.'\n\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)", "def append(self, obj):\r\n self.record_count += 1\r\n \r\n if type(obj) == dict:\r\n self._probe_record(obj)\r\n else:\r\n self._probe_row(obj)", "def test_add_dup(self):\n for i in range(3):\n self.datastore.save(self.trans)\n\n eq_(1, self.datastore._collection.count())", "def add_record(self, msg_id, rec):\n if msg_id in self._records:\n raise KeyError(\"Already have msg_id %r\" % (msg_id))\n self._check_dates(rec)\n self._records[msg_id] = rec\n self._add_bytes(rec)\n self._maybe_cull()", "def insert(self, record):\n temp = self.hashing(record.get_key())\n self.__buckets[temp].insert(record)\n self.__num_records += 1 \n if self.__scalable:\n if self.load_factor() > 0.75:\n temp_list = []\n self.__size *= 2\n for i in range(self.__size):\n temp_list.append(LinkedList()) \n for i in self.__buckets:\n while not i.is_empty():\n first = i.get_first()\n j = first.get_data()\n temp = self.hashing(j.get_key())\n temp_list[temp].insert(j)\n i.delete(j) \n self.__buckets = temp_list", "def record(self, pop, off, dad, mom):\n if mom is not None:\n off.setInfo(0, str(self.field))\n else:\n off.setInfo(dad.info(self.field) + 1, self.field)\n return True", "def add_record():\n if 'json' not in request.files:\n # use an HTML record that seems appropriate\n return \"no json file in the request!\", 400\n try:\n # can't assume that JSON file is valid\n _record = json.loads(request.files['json'].read())\n except ValueError:\n return \"failed to parse JSON file correctly!\", 400\n if type(_record) is not dict or 'name' not in _record:\n return \"expecting a dictionary with identifier, post failed!\", 400\n with RECORD_LOCK:\n # just check if the name already exists in the global RECORD list\n if len([r for r in RECORDS if r.get('name') == _record['name']]):\n return \"already in the records!\", 409\n RECORDS.append(_record)\n return \"OK\"", "def store(self, record: ModelledTable) -> bool:\n\n return self.model.store(self.cursor, record)", "def add(self, rec):\n rec['ts'] = datetime.now()\n self.collection.insert_one(rec)", "def add_records(self, data: dict, execution_context: dict):", "def test_new_entries_are_added(db_session):\n db_session.add_all(MODEL_ENTRIES)\n query = db_session.query(MyModel).all()\n assert len(query) == len(MODEL_ENTRIES)", "def add_record(d):\n\n print(\"\\nEnter the information of the person you'd like to add\")\n firstname = input('First name: ')\n lastname = input('Last name: ')\n phone = input('Phone: ')\n address = input('Address: ')\n\n name_is_equal = False\n\n for pid in d:\n if firstname == d[pid].get('First name') and lastname == d[pid].get('Last name'):\n name_is_equal = True\n\n if name_is_equal is True:\n print('\\n# The contact is already in the phone book')\n else:\n d[len(d)] = {'First name': firstname, 'Last name': lastname, 'Phone': phone, 'Address': address}\n print('\\n# The contact has been added to the phone book')\n\n return d", "def test_add_record_to_zone(self):\n zone = Zone('test.example.com')\n record = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n zone.add_record(record)\n self.assertEqual(zone.records.get('test-record'), record)", "def add(self, line):\n if self.unique is not False and self.unique is not True:\n raise AttributeError(\"Attribute 'unique' is not True or False.\")\n self.log('add({0}); unique={1}'.format(line, self.unique))\n if line is False:\n return False\n if isinstance(line, str):\n line = line.split('\\n')\n if not isinstance(line, list):\n raise TypeError(\"Parameter 'line' not a 'string' or 'list', is {0}\".format(type(line)))\n local_changes = False\n for this in line:\n if self.unique is False or this not in self.contents:\n self.contents.append(this)\n self.changed = local_changes = True\n if self.sorted and local_changes:\n self.sort()\n return local_changes", "def record_and_push(self, data):\n return self.record(data).push()", "def test_add():\n data = io.create_sample_Dataset()\n tmp = data + data\n assert tmp[\"u\"][0, 0, 0] == 2.0", "def test_006_add(self):\n HEADING()\n db = self.db\n\n count = 5\n\n db.connect()\n\n db.delete_jobs()\n\n for id in range(0,count):\n job = db.insert(\"job\" + str(id))\n\n assert len(db) == count", "def test_0_data_insertion(self):\n s = self.fitness.insert_in_database(self.fitness_dict, date_time=self.dt1)\n self.assertTrue(s)", "def add(self, record):\n self._hist_records[record.uid] = record", "def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())", "def add(self, key, data):\n if key not in self.vertices:\n self.numberOfVertices += 1\n self.vertices[key] = Vertex(key, data)\n return True\n\n return False", "def test_create_record(self):\n pass", "def test_update_record(self):\n pass", "def insert_record(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n return {\"error\": False, \"id\": self.id}\r\n except exc.SQLAlchemyError as e: # pragma: no cover\r\n # print(e)\r\n # print(sys.exc_info())\r\n db.session.rollback()\r\n return {\"error\": True}\r\n finally:\r\n db.session.close()", "def add_record(self, record: Dict, src_name: SourceName) -> None:\n concept_id = record[\"concept_id\"]\n record[\"src_name\"] = src_name.value\n label_and_type = f\"{concept_id.lower()}##identity\"\n record[\"label_and_type\"] = label_and_type\n record[\"item_type\"] = \"identity\"\n try:\n self.batch.put_item(Item=record)\n except ClientError as e:\n logger.error(\n \"boto3 client error on add_record for \"\n f\"{concept_id}: {e.response['Error']['Message']}\"\n )\n for attr_type, item_type in ITEM_TYPES.items():\n if attr_type in record:\n value = record.get(attr_type)\n if not value:\n continue\n if isinstance(value, str):\n items = [value.lower()]\n else:\n items = {item.lower() for item in value}\n for item in items:\n self._add_ref_record(\n item, record[\"concept_id\"], item_type, src_name\n )", "def new_archive_record(self, event):\n dbmanager = self.engine.db_binder.get_manager(self.data_binding)\n dbmanager.addRecord(event.record)", "def test_write_record_new(self):\n entity = TaskConnectionEntity.get_mock_object()\n\n TaskConnectionRepository.write_record(\n TaskConnectionMapper.to_record(entity)\n )\n\n connections = TaskConnectionRepository.fetch_connections_for_pipeline(entity.pipeline_id)\n self.assertEquals(connections[0].id, entity.id)", "def _add_record(self, datetime_, hash_):\n assert isinstance(datetime_, datetime)\n assert isinstance(hash_, str)\n record = {'datetime': datetime_, 'hash': hash_, 'artifacts': self.artifacts}\n self.logger.debug(f'Adding record: {record}')\n self.db_collection.update_one(self.query, {'$addToSet': {'records': record}})", "def _add_parsed_record(record, records):\n if record != \"\":\n logger.debug('The record is not empty. Let\\'s parse it.')\n parsed = self._parse_record(record, customization=customization)\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')", "def insert(tablename: str, data: dict):\n try:\n if (t := tablenameRev[tablename]) not in sequenceTables:\n return False\n db.session.add(t.new(**data))\n db.session.commit()\n updateSequence([t])\n del_cache_for_sequence_table(tablename)\n return True\n except:\n return False", "def insert(self, val):\n if val in self.dic:\n return False\n else:\n self.data.append(val)\n self.dic[val]=len(self.data)-1\n return True", "def insert(self, val):\n new_item = False\n if val not in self.ds:\n self.ds.add(val)\n self.keys.append(val)\n new_item = True\n return new_item", "def test_append(self):\n self.table.append(['Tom', 26])", "def _appendRecord(self, rec, reclistname):\n if reclistname not in self.__dict__: # if not already an attrib\n self.__dict__[reclistname] = [] # init it\n self.__dict__[reclistname].append(rec) # append this record to its list", "def test_duplicate_primary_key(self):\n view = SchemaView(SCHEMA)\n patcher = ObjectChanger(schemaview=view)\n dataset = Dataset()\n patcher.apply(AddObject(value=Person(id='P1', name='p1')), dataset)\n patcher.apply(AddObject(value=Person(id='P1', name='p2')), dataset)\n assert dataset.persons[0].id == 'P1'\n self.assertEqual(len(dataset.persons), 2)\n logging.info(dataset.persons[0])\n logging.info(dataset.persons[1])\n patcher.apply(RemoveObject(value=Person(id='P1')), dataset)\n self.assertEqual(len(dataset.persons), 1)", "def _add():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\tmyTask = WorkToolkitDB.db.Task()\n\n\taddDataStr = input('输入记录 格式: \"task_id\",\"title\",\"version_time\",\"memo\",\"sys\" :\\n >')\n\taddData = re.split('\\s',addDataStr)\n\n\tif len(addData) != 5:\n\t\tprint('ERR: no add task input')\n\t\treturn 1\n\n\t#create record\n\tmyTask.task_id = addData[0]\n\tmyTask.title = addData[1]\n\tmyTask.version_time = addData[2]\n\tmyTask.memo = addData[3]\n\tmyTask.sys = addData[4]\n\n\t#init accept_time , finish_status\n\tmyTask.accept_time = datetime.datetime.now().strftime('%Y%m%d')\n\tmyTask.finish_status = 0\n\n\tmyTaskSession.add_all([myTask])\n\tmyTaskSession.commit()\n\n\treturn 0", "def inserted(self):\n return True", "def db_add_entry(person):\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n if person.name in db:\n print(\"Updating existing entry ..... {name}\\n\".format(name=person.name))\n else:\n person.new = True\n print(\"Adding new entry ..... {name}\".format(name=person.name))\n db[person.name.capitalize()] = person.phone\n db.sync()\n db.close()\n db_show_all()", "def new_archive_record(self, event):\n now = int(time.time() + 0.5)\n delta = now - event.record['dateTime']\n if delta > event.record['interval'] * 60:\n logdbg(\"Skipping record: time difference %s too big\" % delta)\n return\n if self.last_ts is not None:\n self.save_data(self.get_data(now, self.last_ts))\n self.last_ts = now\n #-- TBD: make this tunable on/off via variable\n #-- if self.max_age is not None:\n #-- self.prune_data(now - self.max_age)", "def store(self, cursor: sqlite3.Cursor, record: ModelledTable) -> bool:\n\n if not isinstance(record, self.record):\n raise Exception(\"Wrong type\")\n\n fields = list(self.table_fields.keys())\n data: Dict[str, Any] = {}\n\n for field in fields:\n data[field] = getattr(record, field)\n\n for _field, (_attr, _model) in self.foreigners.items():\n data[_field] = data[_attr][_field]\n del data[_attr]\n\n if data[self.id_field] is None:\n fields.remove(self.id_field)\n del data[self.id_field]\n else:\n fields.append(self.id_field)\n\n sql = (\n f\"INSERT OR REPLACE INTO [{self.table}] ([{'], ['.join(fields)}])\"\n f\" VALUES (:{', :'.join(fields)})\"\n )\n\n _LOGGER.debug(sql)\n _LOGGER.debug(data)\n\n cursor.execute(sql, data)\n\n setattr(record, self.id_field, cursor.lastrowid)\n\n return True", "def test_patch_record(self):\n pass", "def add_activity(self, record):\n # this brings shame to me but meh, faster to hardcode (see model_activity)\n # id = str(record.value(\"id\"))\n id = record.value(\"id\")\n room = record.value(\"room\")\n if room == 0:\n room = \"Nenhuma\"\n else:\n room = str(room)\n weekday = constants.days_of_the_week[record.value(\"weekday\")]\n weektime = record.value(\"weektime\").toString(\"HH:mm\")\n entry = (id, record.value(\"description\"), room,\n weekday, weektime)\n\n if entry in self._activity_list:\n return False\n else:\n if self.is_in_del_queue(id):\n # no real need to add as it was queued to be removed\n self._removed_activities.remove(id)\n else:\n self._added_activities.append(id)\n # print self._added_activities\n\n self._activity_list.append(entry)\n # sorts by day/time\n self._activity_list.sort(key=operator.itemgetter(3,4))\n # self._activity_list = sorted(self._activity_list, key=lambda dia_hora: (dia_hora[3], dia_hora[2]))\n return True", "def append_record_failure():\n\t\tpass", "def record(self):\n # TODO: record the data", "def add(table, record):\n index_id = 0\n record.insert(index_id, common.generate_random(table))\n table.append(record)\n data_manager.write_table_to_file(\"model/sales/sales.csv\", table)\n\n return table", "def __call__(self):\r\n AddNewRecords()", "def __create_record(self, timestamp, url):\n if not timestamp in self.record_d.keys():\n self.record_d[timestamp] = {}\n\n # increment url count\n if not url in self.record_d[timestamp].keys():\n self.record_d[timestamp][url] = 1\n else:\n self.record_d[timestamp][url] += 1", "def add(self, datarecord):\n if datarecord[\"linetype\"] == \"print_ccInstance\":\n instanceId = datarecord[\"instanceId\"]\n ownerId = datarecord[\"ownerId\"]\n timestamp = datarecord[\"ts\"]\n status = datarecord[\"state\"].lower()\n t = datarecord[\"date\"]\n\n id = instanceId + \" \" + ownerId + \" \" + str(timestamp)\n\n instance = self.data\n\n try:\n current = instance[id]\n # if were wereto do a data base this line needs to be replaced\n except:\n current = datarecord\n\n try:\n current[\"t_end\"]\n except:\n # if not (\"t_end\" in current):\n # time in the future\n f = self.in_the_future\n\n current[\"trace\"] = {\n \"pending\": {\"start\": f, \"stop\": t},\n \"teardown\": {\"start\": f, \"stop\": t},\n \"extant\": {\"start\": f, \"stop\": t}\n }\n current[\"t_end\"] = current[\"date\"]\n current[\"t_start\"] = current[\"ts\"] # for naming consitency\n current[\"duration\"] = 0.0\n\n current[\"t_end\"] = max(current[\"t_end\"], t)\n current[\"trace\"][status][\"start\"] = min(\n current[\"trace\"][status][\"start\"], t)\n current[\"trace\"][status][\"stop\"] = max(\n current[\"trace\"][status][\"stop\"], t)\n\n instance[id] = current", "def add_new_element_to_store(entry_sequence, element, is_propagated_call=False):\n\t\tglobal board, node_id\n\t\tsuccess = False\n\t\ttry:\n\t\t\tboard[int(entry_sequence)] = element\n\t\t\tsuccess = True\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn success", "def insert(self, val):\n if val not in self.table.keys():\n self.table[val] = len(self.ls)\n self.ls.append(val)\n return True\n return False", "def has_new_entry(self):\n if self.new_entry:\n self.new_entry -= 1\n return True", "def add_a_record(self, record):\n '''\n doc = { \"P/N\": record,#record.get_PN(),\n \"supplier\": \"\",\n \"inventory\": \"\",\n \"specification\": \"\",\n \"description\": \"\",\n \"OEM\": \"\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": datetime.datetime.utcnow()}'''\n self.collection.insert(record)", "def test_add(self):\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n task = r.get(\"ToDo\")\n self.assertTrue(task, \"No such entry in DB. Adding failed.\")", "def write(self):\n if not self._table: raise ValueError ( \"_table is Null\" )\n if self._isnew:\n for m in self._modified_values:\n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n try:\n rec = CFG.CX.insert ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n #this will automatically re-read the data from the db, to take all changes\n #done by triggers and default values into account.\n self._objectid = rec['objectid']\n\n #print \"Record # {0} inserted into {1}.\".format(self._objectid, self._table.name)\n self.raiseEvent ( \"record_added\", self )\n \n except pg.DatabaseError, e:\n print \"Error inserting record.\"\n raise Record.DataManipulationError ( \"Inserting a new record into '{0}'\".format(self._table.name),\n str(self._modified_values),\n e)\n elif self._ismodified:\n \n for m in self._modified_values: \n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n self._modified_values['objectid'] = self._objectid\n del self.TextCache[self._objectid]\n try:\n rec = CFG.CX.update ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n self.read() \n self.raiseEvent ( \"record_saved\", self )\n except pg.DatabaseError, e:\n print \"Error updating record\"\n raise Record.DataManipulationError ( \"Updating record {1} of '{0}'\".format(self._table.name, self._objectid),\n str(self._modified_values),\n e)", "def _add_new_struct_to_existing_database(self, db, filename):\n\n pass", "def test_add_multiple_records_to_zone(self):\n zone = Zone('test.example.com')\n recordA = Record(zone, 'test-recorda', {'type': 'A', 'ttl': 300})\n recordB = Record(zone, 'test-recordb', {'type': 'A', 'ttl': 300})\n zone.add_record(recordA)\n zone.add_record(recordB)\n self.assertDictEqual(zone.records, {\n 'test-recorda': recordA,\n 'test-recordb': recordB,\n })", "def addRecord(self):\n\n ## Saving recorded entries to the CRM and Mailings Database\n print(\"Saving entries to the CRM and Mailings database...\")\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.crm_company_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.address.title() + \"', '\" + self.city.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.county.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.state_code.upper() + \"', '\" + str(self.zip_code) + \"', '\" + self.phone_number + \"', '\" + self.phone_number_2 + \"' , '\" + self.email_address + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \" \" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.company_name.replace(\"\\'\", \"\\'\\'\").title() + \"','\" + self.address + \" \" + self.city.title() + \" \" + self.county.title() + \" \" + self.state_code.upper() + \" \" + str(self.zip_code) + \"'); COMMIT\")", "def test_add(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n new_task = self.task_storage.find(self.my_task.key)\n new_task.key = self.task_storage.add(new_task)\n\n self.assertNotEqual(self.my_task.key, new_task.key)\n self.task_storage.delete(new_task.key)", "def insert(self, val: int) -> bool:\n if self.store_dict.get(val) != None:\n return False\n self.store_list.append(val)\n self.store_dict[val] = len(self.store_list) - 1\n return True", "def add_record(self, record):\n # Store the domain as the key, and the rest as value.\n new_key = \"{0},{1}\".format(record.get_domain(), record.get_record_type())\n self._records[new_key] = record", "def insert(self, val: int) -> bool:\n if val in self.idx:\n return False\n else:\n # append value into data \n self.data.append(val)\n \n # record the idx of the value in data\n self.idx[val] = len(self.data) - 1\n return True", "def record(self, time, increment):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def add(self, record, last=None):\n if type(record) != Record:\n raise ValueError('`record` must be a Record instance.')\n\n if self.reg == {}: \n self.reg[record.liidx] = [(record.clidx, record.clidx)]\n elif not record.liidx in self.reg:\n self.reg[record.liidx] = [(record.clidx, record.clidx)]\n else:\n for idx, v in enumerate(self.reg[record.liidx]):\n l, r = v\n if r == (record.clidx - 1):\n r = record.clidx\n break\n elif l == (record.clidx + 1):\n l = record.clidx\n break\n elif last is None and l <= record.clidx <= r:\n return None\n else:\n if last is None:\n insort(self.reg[record.liidx],\n (record.clidx, record.clidx))\n return None\n\n if last is not None and idx != last:\n current = self.reg[record.liidx].pop(idx)\n other = self.reg[record.liidx].pop(last)\n insort(self.reg[record.liidx],\n (other[0], current[1]))\n else:\n self.reg[record.liidx][idx] = (l, r)\n self.add(record, last=idx)", "def test_add_coverage_record_for(self):\n provider = AlwaysSuccessfulCollectionCoverageProvider(\n self._default_collection\n )\n identifier = self._identifier()\n record = provider.add_coverage_record_for(identifier)\n\n # This is the same as calling CoverageRecord.add_for with\n # appropriate arguments.\n record2, is_new = CoverageRecord.add_for(\n identifier, data_source=provider.data_source,\n operation=provider.operation,\n collection=provider.collection_or_not\n )\n assert False == is_new\n assert record == record2\n\n # By default, the CoverageRecord is not associated with any\n # particular collection.\n assert None == record.collection\n\n # Setting COVERAGE_COUNTS_FOR_EVERY_COLLECTION to False will\n # change that -- a CoverageRecord will only count for the\n # collection associated with the CoverageProvider.\n provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION = False\n record = provider.add_coverage_record_for(identifier)\n assert self._default_collection == record.collection\n\n record2, is_new = CoverageRecord.add_for(\n identifier, data_source=provider.data_source,\n operation=provider.operation,\n collection=provider.collection_or_not\n )\n assert False == is_new\n assert record == record2", "def test_new_entries_are_added(db_session):\n for entry in ENTRIES:\n row = Entries(title=entry[\"title\"], creation_date=entry[\"creation_date\"], body=entry[\"body\"])\n db_session.add(row)\n query = db_session.query(Entries).all()\n assert len(query) == len(ENTRIES)", "def test_new_attribute_is_added(db_session):\n new_att = Attribute(\n label=\"test_label\",\n desc=\"test_desc\"\n )\n db_session.add(new_att)\n query = db_session.query(Attribute).all()\n assert len(query) == 1", "def test_add_no_commit(self):\n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = {}\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n\n self.conn.add(**doc)\n results = self.conn.query(\"user_id:\" + user_id).results\n self.assertEquals(len(results), 0,\n \"Document (id:%s) shouldn't have been fetched\" % (id))", "def put_record(self, record):\r\n row = [record.get(field) for field in self.fields.names()]\r\n\r\n self.put(row)", "def add(self, stock_record):\n if stock_record.symbol in self._records:\n raise StockRecordExistsError(stock_record.symbol)\n self._records[stock_record.symbol] = stock_record", "def add(self, obs, action, reward, new_obs, done):\n experience = (obs, action, reward, new_obs, done)\n insert_index = self.fix_index()\n if insert_index > 0:\n if insert_index in self._storage:\n del self._storage[insert_index]\n self._storage[insert_index] = experience\n # add to priority queue\n priority = self.priority_queue.get_max_priority()\n self.priority_queue.update(priority, insert_index)\n return True\n else:\n sys.stderr.write('Insert failed\\n')\n return False", "def add(table):\n\n list_labels = [\"Name: \", \"Manufacturer: \", \"purchase_date: \", \"Durability: \"]\n data_input = ui.get_inputs(list_labels, \"Add new record\")\n\n id_ = common.generate_random(table)\n is_date_number = data_input[2].isdigit() and len(data_input) == 4\n is_durability_number = data_input[3].isdigit()\n\n if is_date_number is True and is_durability_number is True:\n data_input.insert(0, id_)\n table.append(data_input)\n\n elif is_date_number is False:\n ui.print_error_message(\"Wrong year format! Record add failed!\")\n\n elif is_durability_number is False:\n ui.print_error_message(\"Wrong durability format! Record add failed!\")\n\n return table", "def test_new_contact_is_added(db_session):\n new_contact = AddressBook(\n name=\"test_name\",\n phone=\"test_phone\",\n email=\"test_email\"\n )\n db_session.add(new_contact)\n query = db_session.query(AddressBook).all()\n assert len(query) == 1", "def add(self, data, check_exists=True): # pragma: no cover\n raise NotImplementedError", "def test_record_eq_record(self):\n zone = Zone('test.example.com')\n record_current = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n record_desired = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n self.assertTrue(record_current == record_desired)", "def test_duplicate_entries(self):", "def record_all(self):\n for i in self.recorders:\n t = i[0]\n r = i[1]\n self.add_row(t, r())", "def add_state(self, current_state, next_state):\n if current_state not in self.records.keys():\n self.records[current_state] = dict()\n\n if next_state not in self.records[current_state].keys():\n self.records[current_state][next_state] = 0\n\n self.records[current_state][next_state] += 1" ]
[ "0.6565922", "0.64655966", "0.64488524", "0.635182", "0.6166402", "0.6109293", "0.60871565", "0.60605955", "0.60419774", "0.6038555", "0.6027791", "0.60155684", "0.6006945", "0.5984549", "0.5980958", "0.5957817", "0.5919547", "0.58998924", "0.58906573", "0.5881246", "0.5871917", "0.5866867", "0.5858553", "0.585794", "0.57924175", "0.5768413", "0.5764683", "0.5760963", "0.5753879", "0.5735464", "0.5691882", "0.5660929", "0.56449276", "0.56339943", "0.5628664", "0.5622464", "0.5614753", "0.5609284", "0.56033033", "0.55961907", "0.5595825", "0.55902517", "0.55868834", "0.5584493", "0.5582452", "0.55794305", "0.55775225", "0.5569832", "0.5554472", "0.5549458", "0.5545054", "0.55174553", "0.55110866", "0.5502712", "0.55003065", "0.5493726", "0.5485849", "0.5481182", "0.5478", "0.547294", "0.5457971", "0.5450438", "0.5448953", "0.54444677", "0.54369485", "0.5432899", "0.54272425", "0.5417893", "0.5417422", "0.54135466", "0.54119486", "0.5411589", "0.5393017", "0.5389547", "0.5387933", "0.5364395", "0.5364327", "0.5358659", "0.535339", "0.53437424", "0.53406984", "0.5338868", "0.53377974", "0.53322285", "0.53250796", "0.5310219", "0.5305693", "0.5303873", "0.5302805", "0.5301352", "0.529592", "0.52922875", "0.52921927", "0.52885956", "0.52878696", "0.5281924", "0.52783036", "0.52744234", "0.52737695", "0.52734953", "0.526587" ]
0.0
-1
Does the program update a record in the sequential data structure as expected?
def test_CovidCase_update(self): u_Covid = self.update_CovidCase() c = CovidCase.objects.get(country_id="UP") c.name_en = "New name" c.save() self.assertEqual(c.name_en, "New name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_record(self):\n pass", "def test_append_updated_record_to_queue_same_data(small_app):\n pid = PersistentIdentifier.get(\"literature\", 11883)\n publication_id = str(pid.object_uuid)\n record = Record.get_record(publication_id)\n\n append_updated_record_to_queue(None, record, record, \"records-hep\", \"hep\")\n\n assert str(record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def test_append_updated_record_to_queue(small_app):\n pid = PersistentIdentifier.get(\"literature\", 4328)\n publication_id = str(pid.object_uuid)\n record = Record.get_record(publication_id)\n\n record_to_update = deepcopy(record)\n record_to_update['authors'][0]['full_name'] = \"John Smith\"\n\n append_updated_record_to_queue(None, record_to_update, record_to_update,\n \"records-hep\", \"hep\")\n\n assert str(record_to_update.id) == \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def update_record(self):\n new_record = self.create_record()\n for record in self.records:\n if self.date_str == record[\"date\"] and not record == new_record:\n record.update(new_record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False", "def update_record():\n global data_base, table, choice, res, output1, result, column_names, choice_row, number_str\n a = output1.get('1.0', END)\n a = a[0:-1]\n changed_string = a.split('\\n')\n changed_string = changed_string[0:-2]\n\n number_str = int(number_str) - 1\n source_string = []\n for i in result:\n for j in i:\n source_string.append(j)\n\n for i in range(0, 4):\n try:\n if changed_string[i] == source_string[i]:\n pass\n except IndexError:\n pass\n else:\n param_value = changed_string[i]\n step = i\n param_column = column_names[step]\n sqlite3_update_record(data_base, table, param_column, param_value, choice_row, res)\n output1.delete(1.0, END)", "def update(self, record):\n # clean copy of record to preserve IDs for response if no update required\n record_data = self.sanitize_record(copy.deepcopy(record.data))\n changed = False\n args = {}\n\n for key in RECORD_KEYS_MAP:\n input_data = self.filter_empty_subparams(key)\n\n if input_data is not None:\n if (\n RECORD_KEYS_MAP[key]['appendable']\n and self.module.params.get('record_mode') == 'append'\n ):\n # create union of input and existing record data,\n # preserving existing order\n input_data = record_data[key] + [\n input_obj\n for input_obj in input_data\n if input_obj not in record_data[key]\n ]\n\n if input_data != record_data[key]:\n changed = True\n args[key] = input_data\n\n # create a new copy of the previously sanitized dict that will be\n # updated with changing args to support --diff\n after_changes = copy.deepcopy(record_data)\n for k, v in args.items():\n if after_changes[k] != v:\n after_changes[k] = v\n\n # check mode short circuit before update\n if self.module.check_mode:\n self.record_exit(before=record_data,\n changed=changed,\n after=after_changes,\n record=record)\n\n # update only if some changed data\n if changed:\n record.update(errback=self.errback_generator(),\n **args)\n\n self.record_exit(before=record_data,\n changed=changed,\n after=after_changes,\n record=record)\n\n # catch exit if not running in check mode and no changes are to be made.\n self.record_exit(changed=False, record=record)", "def test_concurrent_updates(self):\r\n instance = TestCounterModel.create()\r\n new1 = TestCounterModel.get(partition=instance.partition)\r\n new2 = TestCounterModel.get(partition=instance.partition)\r\n\r\n new1.counter += 5\r\n new1.save()\r\n new2.counter += 5\r\n new2.save()\r\n\r\n actual = TestCounterModel.get(partition=instance.partition)\r\n assert actual.counter == 10", "def test_update_occurrence(self):\n pass", "def update(self, data):\n if not isinstance(data, list): data = [data] # otherwise no conversion is necessary\n master = Handler.ALL_VERS_DATA\n for record in data:\n #print(record)\n for k,v in iteritems(record): # ensure record contents aretyped appropriately\n try: record[k] = int(v)\n except ValueError: record[k] = v\n try: label = record[\"label\"] # verify this record has the required 'label' key\n except KeyError:\n raise ValueError(\"Must provide a valid label argument. Given:%s%s\"%(\\\n os.linesep, (\"%s \"%(os.linesep)).join(\n [\"%15s:%s\"%(k,v) for k,v in iteritems(kwargs)]\n )))\n try: masterLabel = master[label] # identify the already existing record that matches this to-be-updated record, if any\n except KeyError: # master hasn't been defined yet\n master[label] = record\n self._updated = True # a new record should also be saved\n continue\n for k,v in iteritems(record): # determine whether master needs to be updated\n try:\n if masterLabel[k] == v: continue # whether an entry in the record needs to be updated (doesn't match)\n except KeyError: pass # this condition means that k is a new key, so the record must be updated\n self._updated = True\n try: master[label].update(record) # index each record by its label\n except KeyError: break", "def test_record_update(app_static, app_live):\n time.sleep(SLEEP_TIME)\n record = dict(app_live.get(OBJ)[0])\n update_value = \"0.00\" if record[UPDATE_KEY] != \"0.00\" else \"1.00\"\n data = {\"id\": record[\"id\"], UPDATE_KEY: update_value}\n record_updated = app_live.record(method=\"update\", data=data, obj=OBJ)\n assert record_updated[UPDATE_KEY] == update_value", "def test_patch_record(self):\n pass", "def update(self, initial, follows):", "def test_updates(self):\r\n instance = TestCounterModel.create()\r\n instance.counter += 5\r\n instance.save()\r\n\r\n actual = TestCounterModel.get(partition=instance.partition)\r\n assert actual.counter == 5", "def updated_full_record(full_record):\n full_record[\"access\"][\"status\"] = \"embargoed\"\n full_record[\"created\"] = \"2023-03-23T00:00:00.000000+00:00\"\n full_record[\"id\"] = \"abcde-fghij\"\n full_record[\"metadata\"][\"resource_type\"][\"id\"] = \"other\"\n\n return full_record", "def isUpdated(self):\n seq = self.readSeq()\n\n if (seq != self.seq):\n self.seq = seq\n return True\n else:\n return False", "def test_append_updated_record_to_queue_not_hep_record(small_app):\n sample_author_record = _IdDict({\n '$schema': 'http://localhost:5000/schemas/records/authors.json',\n 'collections': [{'primary': 'HEPNAMES'}],\n 'control_number': '314159265',\n 'name': {'value': 'Glashow, S.L.'},\n 'positions': [{'institution': {'name': 'Copenhagen U.'}}],\n 'self': {'$ref': 'http://localhost:5000/api/authors/314159265'}})\n\n append_updated_record_to_queue(None, sample_author_record,\n sample_author_record, \"records-authors\",\n \"authors\")\n\n assert str(sample_author_record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def test_write(self):\n data2 = self.data.copy()\n data2['a'] *= 2\n self.dset['a'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))\n data2['b'] *= 4\n self.dset['b'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))\n data2['a'] *= 3\n data2['c'] *= 3\n self.dset['a','c'] = data2\n self.assertTrue(np.all(self.dset[...] == data2))", "def test_append_updated_record_to_queue_new_record(small_app):\n sample_hep_record = _IdDict({\n '$schema': 'http://localhost:5000/schemas/records/hep.json',\n 'authors': [{\n 'affiliations': [{'value': 'Copenhagen U.'}],\n 'curated_relation': False,\n 'full_name': 'Glashow, S.L.',\n 'signature_block': 'GLASs',\n 'uuid': '5ece3c81-0a50-481d-8bee-5f78576e9504'\n }],\n 'collections': [\n {'primary': 'CORE'},\n {'primary': 'HEP'}\n ],\n 'control_number': '4328',\n 'self': {'$ref': 'http://localhost:5000/api/literature/4328'},\n 'titles': [{'title': 'Partial Symmetries of Weak Interactions'}]\n })\n\n result = append_updated_record_to_queue(None, sample_hep_record,\n sample_hep_record, \"records-hep\",\n \"hep\")\n\n assert result is None\n assert str(sample_hep_record.id) != \\\n DisambiguationRecord.query.order_by(desc(\"id\")).first().record_id", "def test_update_values(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n\r\n # independently save over a new count value, unknown to original instance\r\n m1 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\r\n m1.count = 6\r\n m1.save()\r\n\r\n # update the text, and call update\r\n m0.update(text='monkey land')\r\n self.assertEqual(m0.text, 'monkey land')\r\n\r\n # database should reflect both updates\r\n m2 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\r\n self.assertEqual(m2.count, m1.count)\r\n self.assertEqual(m2.text, m0.text)", "def test_update_values(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n\n # independently save over a new count value, unknown to original instance\n m1 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\n m1.count = 6\n m1.save()\n\n # update the text, and call update\n m0.update(text='monkey land')\n self.assertEqual(m0.text, 'monkey land')\n\n # database should reflect both updates\n m2 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\n self.assertEqual(m2.count, m1.count)\n self.assertEqual(m2.text, m0.text)", "def test_recursive_update():\n\n test = Status.update_dict({'generation': TEST_1_ATTRS_1},\n {'generation': TEST_1_ATTRS_2})\n\n assert test['generation']['run_id'] == TEST_1_ATTRS_1['run_id']\n assert test['generation']['job_status'] == TEST_1_ATTRS_2['job_status']", "def update_soa(record):\n if record and record.domain and record.domain.soa:\n record.domain.soa.serial += 1\n record.domain.soa.dirty = True\n record.domain.soa.save()", "def test_update_twice_same_result():\n starting_db = create_db(STARTING_DB_INPUT)\n actual: dict = o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n INP\n )\n actual2: dict = o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n INP\n )\n assert actual == EXPECTED == actual2", "def test_update_domain_with_a_record(self):\n a_record = [{'type': 'A',\n 'name': 'ftp.example.com',\n 'data': '192.0.2.8',\n 'ttl': 3600}]\n self.test_update(updateRecords=a_record)", "def record_updater(records, observations):\n for record in records:\n try:\n record = ast.literal_eval(records[record])\n except Exception:\n record = record\n try:\n if type(records[record]) is dict:\n records[record] = Common.record_updater(\n records[record], observations\n )\n elif type(records[record]) is list:\n list_records = []\n for list_record in records[record]:\n for observation in observations:\n if observation != \"_id\":\n try:\n if re.search(observation, f\"{list_record}\"):\n if not re.search(\n observations[observation],\n f\"{records[record]}\",\n ):\n if not re.search(\"-->\", f\"{list_record}\"):\n list_records.append(\n f\"{list_record}\"\n + \" --> \"\n + observations[observation]\n )\n else:\n list_records.append(list_record)\n else:\n list_records.append(list_record)\n else:\n list_records.append(list_record)\n except Exception as ex:\n Common.logger.warning(\n f\"Exception happened in observation comparison {ex}\"\n )\n records[record] = list_records\n else:\n records = Common.data_comparison(observations, records, record)\n except Exception:\n records = Common.data_comparison(observations, records, record)\n return records", "def case_update_record(self, refresh_db_before):\n original_name = \"tag3\"\n update_name = \"tag33\"\n\n original_tag = TagOp.add(original_name)\n\n updated_tag_local = TagOp.update(original_tag, update_name)\n\n self.assertEqual(updated_tag_local.name, update_name)\n\n updated_tag_get = TagOp.get(name=update_name)\n\n self.assertTrue(len(updated_tag_get) is 1)\n self.assertEqual(updated_tag_get[0].name, update_name)\n self.assertEqual(updated_tag_get[0].id, original_tag.id)", "def test_update_from_none(self):\r\n instance = TestCounterModel()\r\n instance.counter += 1\r\n instance.save()\r\n\r\n new = TestCounterModel.get(partition=instance.partition)\r\n assert new.counter == 1", "def update(self, key, val) -> bool:\n if key in self._cache:\n self._remove_key(key)\n self._add_to_tail(key, val)\n self._cache[key] = self._tail\n return True\n else:\n if len(self._cache) == self.max_records:\n self._remove_oldest()\n self._add_to_tail(key, val)\n self._cache[key] = self._tail\n return False", "def _update(self, count=True, forced=False):", "def test_partial_updates(self):\r\n m1 = TestSetModel.create(int_set={1, 2, 3, 4})\r\n\r\n m1.int_set.add(5)\r\n m1.int_set.remove(1)\r\n assert m1.int_set == {2, 3, 4, 5}\r\n\r\n m1.save()\r\n\r\n m2 = TestSetModel.get(partition=m1.partition)\r\n assert m2.int_set == {2, 3, 4, 5}", "def _update(self):\n if self._dirty:\n return self._to_flattr_dict()\n return False", "def update_records(self, something):\n print(\"Some logic (not shown) to update database of units\")", "def test_success_case(self):\r\n tm = TestModel.create(count=8, text='123456789')\r\n tm2 = tm.update(count=9)\r\n\r\n tm3 = TestModel.get(tm.vid)\r\n assert tm2.count == 9\r\n assert tm3.count == 9", "def update_record(self, msg_id, rec):\n if msg_id in self._culled_ids:\n raise KeyError(\"Record %r has been culled for size\" % msg_id)\n self._check_dates(rec)\n _rec = self._records[msg_id]\n self._drop_bytes(_rec)\n _rec.update(rec)\n self._add_bytes(_rec)", "def update(table, id_, record):\n\n new_table = []\n for element in table:\n if id_ == element[0]:\n element = record\n new_table.append(element)\n else:\n new_table.append(element)\n table = new_table\n\n return table", "def update_rec(self):\n print(\"Write phone number:\")\n update_phone_number_input = input()\n print(\"Write new name of the record:\")\n update_name_input = input()\n print(\"Write new address:\")\n update_address_input = input()\n return self.storage.update(\n update_phone_number_input, update_name_input, update_address_input\n )", "def test_preserve_changes(self):\n\n mapper(Order, orders, properties = {\n 'userident':deferred(orders.c.user_id, group='primary'),\n 'description':deferred(orders.c.description, group='primary'),\n 'opened':deferred(orders.c.isopen, group='primary')\n })\n sess = create_session()\n o = sess.query(Order).get(3)\n assert 'userident' not in o.__dict__\n o.description = 'somenewdescription'\n assert o.description == 'somenewdescription'\n def go():\n assert o.opened == 1\n self.assert_sql_count(testing.db, go, 1)\n assert o.description == 'somenewdescription'\n assert o in sess.dirty", "def test_update_case(self):\n pass", "def processRecord(c,collection,VarMap,totalVariables,uniqueIndexes,versionNumber,specialKeyInds,incremental,sliceDB,sliceColTuples,ContentCols):\n\n vNInd = VarMap['__versionNumber__']\n retInd = VarMap['__retained__']\n aKInd = VarMap['__addedKeys__']\n origInd = VarMap['__originalVersion__']\n \n c = dict([(VarMap[k],c[k]) for k in c.keys()])\n c[vNInd] = versionNumber\n s = dict([(VarMap[k],c[VarMap[k]]) for k in uniqueIndexes])\n s[vNInd] = versionNumber - 1\n \n H = collection.find_one(s)\n \n if H:\n if incremental:\n diff = dict([(k,H[k]) for k in H.keys() if k != '_id' and k not in specialKeyInds and k in c.keys() and notEqual(H[k],c[k]) ])\n newc = dict([(k,H[k]) for k in H.keys() if k != '_id' and k not in specialKeyInds and k not in c.keys() ])\n newc.update(c)\n c = newc\n else:\n diff = dict([(k,H[k]) for k in H.keys() if k != '_id' and k not in specialKeyInds and (k not in c.keys() or notEqual(H[k],c[k])) ])\n \n c[origInd] = H[origInd]\n \n if diff:\n diff[retInd] = True\n newkeys = [k for k in c.keys() if k not in H.keys()]\n if newkeys:\n diff[aKInd] = newkeys\n \n if diff: \n DIFF = ContentCols.intersection([totalVariables[int(k)] for k in diff.keys()])\n diff.update(s)\n collection.update(s,diff)\n print 'Diff:' , diff\n else:\n DIFF = False\n c['_id'] = H['_id']\n collection.remove(s)\n \n else:\n c[origInd] = versionNumber\n diff = c\n DIFF = True\n \n id = collection.insert(c) \n if DIFF:\n sliceInsert(c,collection,sliceColTuples,VarMap,sliceDB,versionNumber)\n \n return id, c", "def test_write_record_update(self):\n fake_uuid = uuid.uuid4()\n\n # Assert connection's from_task_id is not expected\n connection = TaskConnectionRepository.fetch_connections_for_pipeline(\n self.connection1.pipeline_id\n )[0]\n self.assertNotEquals(connection.from_task_id, fake_uuid)\n\n # Update connection's from_task_id\n connection.from_task_id = fake_uuid\n TaskConnectionRepository.write_record(connection)\n\n # Assert connection's from_task_id is as expected\n connection = TaskConnectionRepository.fetch_connections_for_pipeline(\n self.connection1.pipeline_id\n )[0]\n self.assertEquals(connection.from_task_id, fake_uuid)", "def test_update_multiple(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 14}, age=12)\n assert n_updated == 2\n items = list(test_store.get_by())\n\n andy.age = pandy.age = 14\n assert andy in items\n assert pandy in items\n assert candy in items", "def test_update_model(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n\r\n # independently save over a new count value, unknown to original instance\r\n m1 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\r\n m1.count = 6\r\n m1.save()\r\n\r\n # update the text, and call update\r\n m0.text = 'monkey land'\r\n m0.update()\r\n\r\n # database should reflect both updates\r\n m2 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\r\n self.assertEqual(m2.count, m1.count)\r\n self.assertEqual(m2.text, m0.text)", "def test_update_model(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n\n # independently save over a new count value, unknown to original instance\n m1 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\n m1.count = 6\n m1.save()\n\n # update the text, and call update\n m0.text = 'monkey land'\n m0.update()\n\n # database should reflect both updates\n m2 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\n self.assertEqual(m2.count, m1.count)\n self.assertEqual(m2.text, m0.text)", "def test_list_inplace_update(self):\r\n vm = List.value_manager(None, None, [1,2,3])\r\n assert not vm.changed\r\n vm.value.append(4)\r\n assert vm.changed", "def test_update_all(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 13})\n assert n_updated == 3\n items = list(test_store.get_by())\n\n andy.age = pandy.age = 13\n assert andy in items\n assert pandy in items\n assert candy in items", "def updateOne(self,ident):\n \tLOGGER.info(\"lazily updating {}\".format(ident))\n \tself.idToUpdate=ident\n \tself.newState=''\n \tself.save()", "def test_update_one(self):\n pass", "def record(self, pop, off, dad, mom):\n if mom is not None:\n off.setInfo(0, str(self.field))\n else:\n off.setInfo(dad.info(self.field) + 1, self.field)\n return True", "def apply_ruling(self, ruling, record):\r\n record.update(ruling)\r\n return record", "def test_Dataheap_Recorded_002_01(self):\n\n chanid = self.testenv['DOWNCHANID']\n starttimemyth = self.testenv['DOWNSTARTTIME']\n\n rec = Recorded((chanid, starttimemyth), db=self.mydb)\n\n # Recorded.rating is a list of lists of tuples\n # [[(u'system', u'ABCD'), (u'rating', '08.15')], [(u'system', u'WXYZ'), (u'rating', u'0.11')]]\n\n # add ratings to the recorded instance:\n rec.rating.add(u'ABCD', u'41.98')\n rec.rating.add(u'WXYZ', u'0.11')\n\n # check the ratings:\n #print(rec.rating)\n s0_found = s1_found = False\n r0_found = r1_found = False\n for (s,r) in rec.rating:\n # print(s)\n # print(r)\n if s == u'ABCD':\n s0_found = True\n if s == u'WXYZ':\n s1_found = True\n if r == u'41.98':\n r0_found = True\n if r == u'0.11':\n r1_found = True\n self.assertTrue(s0_found)\n self.assertTrue(s1_found)\n self.assertTrue(r0_found)\n self.assertTrue(r1_found)\n\n # revert last changes:\n rec.rating.revert()\n # check for an empty list:\n #print(rec.rating)\n self.assertEqual(len(rec.rating), 0)\n\n # add ratings again:\n rec.rating.add('ABCD', '41.98')\n rec.rating.add('QWERTZ', 'blah')\n rec.rating.add('WXYZ', '0.11')\n # commit these updates:\n rec.update()\n\n # get the recorded data again:\n recn = Recorded((chanid, starttimemyth), db=self.mydb)\n # edit existing rating data:\n for i,(s,r) in enumerate(recn.rating):\n if s == 'ABCD':\n break\n if i is not None:\n recn.rating[i]['rating'] = u'08.15'\n # commit that change:\n recn.update()\n # check the changed value:\n #print(rec.rating)\n rn_found = False\n for (s,r) in recn.rating:\n if r == u'08.15':\n rn_found = True\n self.assertTrue(rn_found)\n\n # delete a rating:\n recn.rating.delete(u'WXYZ', u'0.11')\n recn.update()\n #print(recn.rating)\n sn_found = False\n for (s,r) in recn.rating:\n if s == u'WXYZ':\n sn_found = True\n self.assertFalse(sn_found)\n\n # clean all ratings for this recorded instance:\n recn.rating.clean()\n recn.update()\n self.assertEqual(len(recn.rating), 0)", "def test_changedata(self):\n p = model.Person(firstname=\"Tobias\", lastname=\"Thelen\",\n email=\"[email protected]\", hobbies=[\"singen\",\"springen\",\"fröhlichsein\"])\n id = p.store()\n\n p = model.Person(id=id)\n p['firstname'] = \"Walter\"\n p.store()\n\n p2 = model.Person(id=id)\n self.assertEqual(p2.firstname, \"Walter\")\n self.assertEqual(p2.lastname, \"Thelen\")", "def test_lwt(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Inserting initial data using IF NOT EXISTS\")\n for i in range(1000):\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS\".format(v=i)\n )\n self._replay_batchlogs()\n\n logger.debug(\"All rows should have been inserted\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Tyring to UpInsert data with a different value using IF NOT EXISTS\")\n for i in range(1000):\n v = i * 2\n session.execute(\n \"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"No rows should have changed\")\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )\n\n logger.debug(\"Update the 10 first rows with a different value\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"UPDATE t SET v={v} WHERE id = {id} IF v < 10\".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows changed.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 1000\n for i in range(1000):\n v = i + 2000 if i < 10 else i\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(v),\n [v, i, 'a', 3.0]\n )\n\n logger.debug(\"Deleting the first 10 rows\")\n for i in range(1000):\n v = i + 2000\n session.execute(\n \"DELETE FROM t WHERE id = {id} IF v = {v} \".format(id=i, v=v)\n )\n self._replay_batchlogs()\n\n logger.debug(\"Verify that only the 10 first rows have been deleted.\")\n results = list(session.execute(\"SELECT * FROM t_by_v;\"))\n assert len(results) == 990\n for i in range(10, 1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )", "def test_update(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Candy\")\n assert n_updated == 1\n items = list(test_store.get_by())\n\n candy.age = 15\n assert andy in items\n assert pandy in items\n assert candy in items", "def test_update(self):\n c = city.City(name=\"Paris\")\n p1 = city.Citizen(name=\"Peter\")\n c.add(p1, rel=city.hasInhabitant)\n\n with DataspaceSession(URI) as session:\n wrapper = city.CityWrapper(session=session)\n cw = wrapper.add(c)\n session.commit()\n\n p2 = city.Citizen(name=\"Georg\")\n cw.add(p2, rel=city.hasInhabitant)\n cw.name = \"Freiburg\"\n session.commit()\n\n check_state(self, c, p1, p2, db=DB)", "def update():", "def update():", "def test_update(self):\n track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', conductor='Conductor', composer='Composer')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(pk)\n self.assertEqual(track_row['artist'], 'Artist')\n self.assertEqual(track_row['album'], 'Album')\n self.assertEqual(track_row['title'], 'Title')\n self.assertEqual(track_row['ensemble'], 'Ensemble')\n self.assertEqual(track_row['composer'], 'Composer')\n self.assertEqual(track_row['conductor'], 'Conductor')\n\n # Now update the object and save out, and test.\n track.artist = 'Artist 2'\n track.album = 'Album 2'\n track.title = 'Title 2'\n track.ensemble = 'Ensemble 2'\n track.composer = 'Composer 2'\n track.conductor = 'Conductor 2'\n track.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(pk)\n self.assertEqual(track_row['artist'], 'Artist 2')\n self.assertEqual(track_row['album'], 'Album 2')\n self.assertEqual(track_row['title'], 'Title 2')\n self.assertEqual(track_row['ensemble'], 'Ensemble 2')\n self.assertEqual(track_row['composer'], 'Composer 2')\n self.assertEqual(track_row['conductor'], 'Conductor 2')", "def update_data():\n pass", "def test_atomic_update_episode(self):\n episode = self._create_sample_episode()\n study_id, session_id, episode_id = (episode.study_id, episode.session_id,\n episode.id)\n\n def callback(read_episode):\n # Read episode should match the stored one.\n self.assertEqual(read_episode, episode)\n self.assertEqual(read_episode.num_steps, 100)\n # Make a change.\n read_episode.num_steps = 200\n return True\n\n self.assertTrue(\n self.storage.atomic_update_episode(study_id, session_id, episode_id,\n callback))\n # Check that the change was applied.\n episode.num_steps = 200\n self.assertEqual(\n self.storage.get_episode(study_id, session_id, episode_id), episode)", "def update( ):\r\n pass", "def cli_update_record(field_list, record_data):\n api.update_record(field_list, record_data)", "def process_records(records):\n changes = defaultdict(int)\n cascaded_create_records = set()\n cascaded_publish_records = set()\n cascaded_unpublish_records = set()\n cascaded_undelete_records = set()\n cascaded_update_records = set()\n cascaded_delete_records = set()\n cascaded_location_changes = set()\n\n for record in records:\n if record.change != ChangeType.deleted and record.object is None:\n # Skip entries which are not deletions but have no corresponding objects.\n # Probably they are updates for objects that got deleted afterwards.\n continue\n if record.change == ChangeType.created:\n assert record.type != EntryType.category\n cascaded_create_records.add(record)\n elif record.change == ChangeType.published:\n cascaded_publish_records.add(record)\n elif record.change == ChangeType.unpublished:\n cascaded_unpublish_records.add(record)\n elif record.change == ChangeType.undeleted:\n assert record.type != EntryType.category\n cascaded_undelete_records.add(record)\n elif record.change == ChangeType.deleted:\n assert record.type != EntryType.category\n cascaded_delete_records.add(record)\n elif record.change in {ChangeType.moved, ChangeType.protection_changed}:\n cascaded_update_records.add(record)\n elif record.change == ChangeType.data_changed:\n assert record.type != EntryType.category\n changes[record.object] |= SimpleChange.updated\n # subcontributions have their parent's time information, so we need to\n # cascade contribution updates to them\n if record.type == EntryType.contribution:\n for subcontrib in record.object.subcontributions:\n changes[subcontrib] |= SimpleChange.updated\n elif record.change == ChangeType.location_changed:\n assert record.type in (EntryType.event, EntryType.contribution, EntryType.session)\n cascaded_location_changes.add(record)\n\n for obj in _process_cascaded_category_contents(cascaded_update_records):\n changes[obj] |= SimpleChange.updated\n\n for obj in _process_cascaded_category_contents(cascaded_unpublish_records):\n changes[obj] |= SimpleChange.deleted\n\n for obj in _process_cascaded_category_contents(cascaded_publish_records):\n changes[obj] |= SimpleChange.created\n\n for obj in _process_cascaded_event_contents(cascaded_delete_records):\n changes[obj] |= SimpleChange.deleted\n\n for obj in _process_cascaded_event_contents(cascaded_create_records, include_deleted=True):\n changes[obj] |= SimpleChange.created\n\n for obj in _process_cascaded_locations(cascaded_location_changes):\n changes[obj] |= SimpleChange.updated\n\n for obj in _process_cascaded_event_contents(cascaded_undelete_records, skip_all_deleted=True):\n # This may result in a create for an object which is already created - in the (somewhat rare)\n # case of a deletion being followed by a restore in the same set of records.\n # However, since we expect backends to either convert those operations to an update or skip\n # them altogether this shouldn't be a problem\n changes[obj] |= SimpleChange.created\n changes[obj] &= ~SimpleChange.deleted\n\n created_and_deleted = {obj for obj, flags in changes.items() if (flags & CREATED_DELETED) == CREATED_DELETED}\n for obj in created_and_deleted:\n # discard any change where the object was both created and deleted\n del changes[obj]\n\n return {obj: _get_final_change(flags) for obj, flags in changes.items()}", "def test_update_values(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6)\r\n\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == (6 if i == 3 else i)\r\n assert row.text == str(i)", "def compute_upd(self, move):", "def test_updating_a_segment(self):\n pass", "def updated_minimal_record(minimal_record):\n minimal_record[\"access\"][\"status\"] = \"open\"\n minimal_record[\"created\"] = \"2023-03-09T00:00:00.000000+00:00\"\n minimal_record[\"id\"] = \"abcde-fghij\"\n\n for creator in minimal_record[\"metadata\"][\"creators\"]:\n name = creator[\"person_or_org\"].get(\"name\")\n if not name:\n creator[\"person_or_org\"][\"name\"] = \"Name\"\n\n return minimal_record", "def test_partial_updates(self):\r\n now = datetime.now()\r\n #derez it a bit\r\n now = datetime(*now.timetuple()[:-3])\r\n early = now - timedelta(minutes=30)\r\n earlier = early - timedelta(minutes=30)\r\n later = now + timedelta(minutes=30)\r\n\r\n initial = {'now': now, 'early': earlier}\r\n final = {'later': later, 'early': early}\r\n\r\n m1 = TestMapModel.create(text_map=initial)\r\n\r\n m1.text_map = final\r\n m1.save()\r\n\r\n m2 = TestMapModel.get(partition=m1.partition)\r\n assert m2.text_map == final", "def write(self):\n if not self._table: raise ValueError ( \"_table is Null\" )\n if self._isnew:\n for m in self._modified_values:\n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n try:\n rec = CFG.CX.insert ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n #this will automatically re-read the data from the db, to take all changes\n #done by triggers and default values into account.\n self._objectid = rec['objectid']\n\n #print \"Record # {0} inserted into {1}.\".format(self._objectid, self._table.name)\n self.raiseEvent ( \"record_added\", self )\n \n except pg.DatabaseError, e:\n print \"Error inserting record.\"\n raise Record.DataManipulationError ( \"Inserting a new record into '{0}'\".format(self._table.name),\n str(self._modified_values),\n e)\n elif self._ismodified:\n \n for m in self._modified_values: \n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n self._modified_values['objectid'] = self._objectid\n del self.TextCache[self._objectid]\n try:\n rec = CFG.CX.update ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n self.read() \n self.raiseEvent ( \"record_saved\", self )\n except pg.DatabaseError, e:\n print \"Error updating record\"\n raise Record.DataManipulationError ( \"Updating record {1} of '{0}'\".format(self._table.name, self._objectid),\n str(self._modified_values),\n e)", "def update(tablename: str, data: dict):\n try:\n if (t := tablenameRev[tablename]) not in sequenceTables:\n return False\n t.query.filter_by(id=data.pop(\"id\")).update(data)\n db.session.commit()\n del_cache_for_sequence_table(tablename)\n return True\n except:\n return False", "def test_dictionary_inplace_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value[4] = 5\r\n assert vm.changed", "def test_update_list_changes_data(qtbot):\n # Given\n model = SourcesModel()\n assert model.rowCount() == 0\n\n sources = []\n source = Source(\"I001\", \"Test\", \"Person\", \"Pub\", \"Abbr\")\n sources.append(source)\n\n # When\n with qtbot.waitSignals([model.modelAboutToBeReset, model.modelReset]):\n model.update_list(sources)\n\n # Then\n assert model.rowCount() == 1", "def visit_record(self, syrecord):\n for other_key, other_value in syrecord.items():\n try:\n getattr(self.current, other_key).update(other_value)\n except KeyError:\n setattr(self.current, other_key, other_value)", "def test_record_eq_record(self):\n zone = Zone('test.example.com')\n record_current = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n record_desired = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n self.assertTrue(record_current == record_desired)", "def test_update_values(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, 6 if i == 3 else i)\n self.assertEqual(row.text, str(i))", "def test_update_no_commit(self):\n track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', conductor='Conductor', composer='Composer')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(pk)\n self.assertEqual(track_row['artist'], 'Artist')\n self.assertEqual(track_row['album'], 'Album')\n self.assertEqual(track_row['title'], 'Title')\n self.assertEqual(track_row['ensemble'], 'Ensemble')\n self.assertEqual(track_row['composer'], 'Composer')\n self.assertEqual(track_row['conductor'], 'Conductor')\n\n # Now update the object and save out, and test.\n track.artist = 'Artist 2'\n track.album = 'Album 2'\n track.title = 'Title 2'\n track.ensemble = 'Ensemble 2'\n track.composer = 'Composer 2'\n track.conductor = 'Conductor 2'\n track.update(self.app.db, self.app.curs, commit=False)\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(pk)\n self.assertEqual(track_row['artist'], 'Artist 2')\n self.assertEqual(track_row['album'], 'Album 2')\n self.assertEqual(track_row['title'], 'Title 2')\n self.assertEqual(track_row['ensemble'], 'Ensemble 2')\n self.assertEqual(track_row['composer'], 'Composer 2')\n self.assertEqual(track_row['conductor'], 'Conductor 2')\n self.app.db.rollback()\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(pk)\n self.assertEqual(track_row['artist'], 'Artist')\n self.assertEqual(track_row['album'], 'Album')\n self.assertEqual(track_row['title'], 'Title')\n self.assertEqual(track_row['ensemble'], 'Ensemble')\n self.assertEqual(track_row['composer'], 'Composer')\n self.assertEqual(track_row['conductor'], 'Conductor')", "def test_list_update(self):\r\n vm = List.value_manager(None, None, [1,2,3])\r\n assert not vm.changed\r\n vm.value = [4,5,6]\r\n assert vm.changed", "def put_record(self, record):\r\n row = [record.get(field) for field in self.fields.names()]\r\n\r\n self.put(row)", "def test_edit_record_edits_the_correct_record(self):\n # create some db records\n dataset = self.create_mixed_test_data()\n test_log_entries = dataset['test_log_entries']\n # set the menu instance's `records` property\n self.menu.records = test_log_entries\n record_index = 1\n\n # handle the user input to select the record\n # handle the user input to specify the new values for the record\n log_entry_for_input = OrderedDict([\n ('name', \"New Test Employee\"),\n ('date', \"2017-10-05\"),\n ('task_name', \"New Test Task\"),\n ('duration', 55),\n ('notes', \"New Note\")\n ])\n\n user_inputs = [record_index + 1] + list(log_entry_for_input.values())\n # get the unedited requested record from the db\n old_query = self.base_query(test_log_entries[record_index])\n self.assertEqual(len(old_query), 1)\n\n # execute the method\n with patch('builtins.input', side_effect=user_inputs):\n self.menu.edit_record()\n\n # verify the record that was changed is the one selected by the user\n # (make sure we can get the record with the new details and we can't\n # get the record with the old details)\n new_query = self.base_query(log_entry_for_input)\n repeat_old_query = self.base_query(test_log_entries[record_index])\n\n self.assertEqual(len(new_query), 1) # new_query shd return one result\n self.assertEqual(len(repeat_old_query), 0) # query should be empty", "def test_update_many(self):\n sample_input = \"\"\"\nfoo=100\nbar=200, baz=300\n\"\"\"\n self.assertNotEquals(self.param_dict.get(\"foo\"), 100)\n self.assertNotEquals(self.param_dict.get(\"bar\"), 200)\n self.assertNotEquals(self.param_dict.get(\"baz\"), 300)\n result = self.param_dict.update_many(sample_input)\n log.debug(\"result: %s\", result)\n self.assertEquals(result[\"foo\"], True)\n self.assertEquals(result[\"bar\"], True)\n self.assertEquals(result[\"baz\"], True)\n self.assertEquals(self.param_dict.get(\"foo\"), 100)\n self.assertEquals(self.param_dict.get(\"bar\"), 200)\n self.assertEquals(self.param_dict.get(\"baz\"), 300)", "def test_record_eq_record_different_values_order(self):\n zone = Zone('test.example.com')\n data = {'type': 'A', 'ttl': 30, 'values': ['1.1.1.1', '2.2.2.2']}\n record_current = Record(zone, 'test-record', data)\n data = {'type': 'A', 'ttl': 30, 'values': ['2.2.2.2', '1.1.1.1']}\n record_desired = Record(zone, 'test-record', data)\n self.assertTrue(record_current == record_desired)", "def update(self, data_test):\n self.data_array[-1].update(data_test)", "def update(self, stock_record):\n self._records[stock_record.symbol] = stock_record", "def test_update_scores():\n ref = np.zeros((36,), dtype=int)\n depth = np.array([_ for _ in ref])\n ascore = np.array([3, 1, 2, 0, 0, 6])\n for offset in xrange(31):\n pref = [_ for _ in ref]\n prev = ref[offset + 5]\n pdep = depth[offset + 5]\n _update_scores(ref, depth, ascore, offset)\n if ref[offset + 5] != 6 + prev:\n raise ValueError('Error updating scores at offset {}\\n'\n 'prev: {}\\n'\n 'next: {}'.format(offset, pref, ref.tolist()))\n assert depth[offset + 5] == 1 + pdep", "def test_update(self):\n album = Album(artist='Artist', album='Album', album_type='ep',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist')\n self.assertEqual(album_row['alalbum'], 'Album')\n self.assertEqual(album_row['altype'], 'ep')\n self.assertEqual(album_row['totaltracks'], 1)\n self.assertEqual(album_row['totalseconds'], 120)\n\n # Now update the object and save out, and test.\n album.artist = 'Artist 2'\n album.album = 'Album 2'\n album.album_type = 'live'\n album.totaltracks = 2\n album.totalseconds = 240\n album.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist 2')\n self.assertEqual(album_row['alalbum'], 'Album 2')\n self.assertEqual(album_row['altype'], 'live')\n self.assertEqual(album_row['totaltracks'], 2)\n self.assertEqual(album_row['totalseconds'], 240)", "def test_update(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.my_task.title = 'foo'\n key = self.task_storage.update(self.my_task)\n new_task = self.task_storage.find(key)\n\n self.assertEqual(self.my_task, new_task)", "def update_one(self, obj, parsed):\n x = self.model.objects.filter(\n url=obj\n )\n if x.exists():\n x = x.latest('accessed')\n existing = self.get_deferred(x)\n if len(existing) != len(parsed):\n self.logger.error(\n \"Number of existing deferred instances (%d) does not match the number of parsed instances (%d).\",\n len(existing),\n len(parsed)\n )\n self.logger.error(\"This is probably due to the implementation of the get_deferred() method\")\n raise AttributeError(\"Lengths of deferred lists do not match.\")\n\n eq = all([\n dict_equality(e.attrs, p.attrs, return_diff=False) for e, p in zip(existing, parsed)\n ])\n\n if eq:\n # just update the accessed date\n x.accessed = timezone.now()\n x.save()\n return False\n else:\n for p in parsed:\n p.save()\n return True\n else:\n # no existing record, no comparison needed\n for p in parsed:\n p.save()\n return True", "def test_update_state4(self):\n pass", "def test_update_book_details(self):\n\n first_book_list = BookList()\n first_book = Book()\n\n first_book.create_book({\n \"title\": \"First Man\",\n \"author\": \"James R. Hansen\",\n \"year\": 2005,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 1\n })\n\n first_book_list.add_book(first_book)\n\n new_book_details = {\n \"title\": \"First Man\",\n \"author\": \"James Hansen\",\n \"year\": 2018,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 5\n }\n\n assert first_book_list.update_book_details(new_book_details) == True\n assert first_book_list.find_book(\"First Man\") == True\n\n for book in first_book_list.show_all():\n assert book.get(\"title\") == \"First Man\"\n assert book.set(\"title\", \"First Man: The Life of Neil A. Armstrong\") == True\n\n assert first_book_list.find_book(\"First Man: The Life of Neil A. Armstrong\") == True", "def test_partially_update_device_group_by_id1(self):\n pass", "def test_update(self):\n doc_fields = document_fields.DocumentFields({\n 'foo@': 'bar',\n })\n self.assertEquals('bar', doc_fields['foo'])\n doc_fields.update({\n 'foo@': 'bbq',\n })\n self.assertEquals('bbq', doc_fields['foo'])", "def test_insert_batch_result_with_a_single_update(self):\n incomplete = generate_mock_result(status='IN_PROGRESS', success=False, run_id=1)\n self.db.insert_result_batch(results=[incomplete, generate_mock_result(run_id=2)])\n self.assertEqual(2, len(self.db.get_results_for_project('TEST')))\n self.assertEqual(1, len(self.db.get_failed_results_for_project('TEST')))\n incomplete.update({'status': 'SUCCESS', 'success': True})\n self.db.insert_result_batch(results=[incomplete, generate_mock_result(run_id=3)])\n self.assertEqual(3, len(self.db.get_results_for_project('TEST')))\n self.assertEqual(0, len(self.db.get_failed_results_for_project('TEST')))", "def test_update(self):\n query = \"insert into cds values(%s,%s,%s,%s)\"\n values = (156098,\"haha\",\"haha 5\",2)\n self.a.insert(query, values)\n query1 = \"update cds set Quantity=%s where id=%s\"\n values1 = (5, 156098)\n self.a.update(query1, values1)\n query2 = \"select * from cds where id=156609\"", "def test_partially_update_device_by_id1(self):\n pass", "def test_update_no_updated(reader, chunk_size, call_update_method):\n reader._storage.chunk_size = chunk_size\n\n parser = Parser()\n reader._parser = parser\n\n feed = parser.feed(1, None, title='old')\n entry_one = parser.entry(1, 1, None, title='old')\n reader._now = lambda: naive_datetime(2010, 1, 1)\n reader.add_feed(feed.url)\n call_update_method(reader, feed)\n feed = feed.as_feed(added=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 1))\n\n assert set(reader.get_feeds()) == {feed}\n assert set(reader.get_entries()) == {\n entry_one.as_entry(\n feed=feed, updated=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 1)\n )\n }\n\n feed = parser.feed(1, None, title='new')\n entry_one = parser.entry(1, 1, None, title='new')\n entry_two = parser.entry(1, 2, None)\n reader._now = lambda: naive_datetime(2010, 1, 2)\n call_update_method(reader, feed)\n feed = feed.as_feed(added=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 2))\n\n assert set(reader.get_feeds()) == {feed}\n assert set(reader.get_entries()) == {\n entry_one.as_entry(\n feed=feed, updated=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 2)\n ),\n entry_two.as_entry(\n feed=feed, updated=datetime(2010, 1, 2), last_updated=datetime(2010, 1, 2)\n ),\n }", "def calculate_incremental(self):\n tmp = [x for x in self.data if x not in self._last_data]\n\n # consecutive refreshes are compared with latest block with atual data, not with latest empty diff\n if self.data:\n self._last_data = self.data\n\n self.data = tmp\n\n logging.debug(f'Sending incremental changes from {len(self._last_data)} messages to {len(self.data)}')", "def update(table, id_, record):\n\n index_id = 0\n record.insert(index_id, common.generate_random(table))\n table.append(record)\n data_manager.write_table_to_file(\"model/sales/sales.csv\", table)\n\n entry_index = 0\n for entry in table:\n entry_id_ = entry[0]\n if entry_id_ == id_:\n del table[entry_index]\n entry_index += 1\n\n return table", "def update_record(self, collection_name, update_record, update_condition):\n try:\n self.logger.info('in update_record()')\n collection = self.get_db()[collection_name]\n collection.update_one(update_condition, {\"$set\": update_record})\n self.logger.info('out update_record()')\n except Exception as e:\n self.logger.error(f'Error occurred while updating record {e}')", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def test_record_eq_record_different_values(self):\n zone = Zone('test.example.com')\n data = {'type': 'A', 'ttl': 30, 'values': ['1.1.1.1', '2.2.2.2']}\n record_current = Record(zone, 'test-record', data)\n data = {'type': 'A', 'ttl': 30, 'values': ['1.1.1.1', '3.3.3.3']}\n record_desired = Record(zone, 'test-record', data)\n self.assertTrue(record_current != record_desired)", "def post_seqnoincrease(self):", "async def line_to_obj(raw_line: bytearray, ref: Ref) -> Optional[ObjectRec]:\n # secondary_update = None\n if raw_line[0:1] == b\"0\":\n return None\n\n if raw_line[0:1] == b'-':\n rec = ref.obj_store[int(raw_line[1:], 16)]\n rec.alive = 0\n await mark_dead(rec.id)\n\n if 'Weapon' in rec.Type:\n impacted = await determine_contact(rec, type='impacted', ref=ref)\n if impacted:\n rec.impacted = impacted[0]\n rec.impacted_dist = impacted[1]\n sql = create_impact_stmt()\n vals = (ref.session_id, rec.parent, rec.impacted, rec.id,\n ref.time_offset, rec.impacted_dist)\n await DB.execute(sql, *vals)\n return rec\n\n comma = raw_line.find(b',')\n rec_id = int(raw_line[0:comma], 16)\n try:\n rec = ref.obj_store[rec_id]\n rec.update_last_seen(ref.time_offset)\n rec.updates += 1\n\n except KeyError:\n # Object not yet seen...create new record...\n rec = ObjectRec(id_=rec_id,\n session_id=ref.session_id,\n first_seen=ref.time_offset,\n last_seen=ref.time_offset)\n ref.obj_store[rec_id] = rec\n\n while True:\n last_comma = comma + 1\n comma = raw_line.find(b',', last_comma)\n if comma == -1:\n break\n\n chunk = raw_line[last_comma:comma]\n eq_loc = chunk.find(b\"=\")\n key = chunk[0:eq_loc]\n val = chunk[eq_loc + 1:]\n\n if key == b\"T\":\n i = 0\n pipe_pos_end = -1\n while i < COORD_KEY_LEN:\n pipe_pos_start = pipe_pos_end + 1\n pipe_pos_end = chunk[eq_loc + 1:].find(b'|', pipe_pos_start)\n if pipe_pos_start == -1:\n break\n\n coord = chunk[eq_loc + 1:][pipe_pos_start:pipe_pos_end]\n if coord != b'':\n c_key = COORD_KEYS[i]\n if c_key == \"lat\":\n rec.lat = float(coord) + ref.lat\n elif c_key == \"lon\":\n rec.lon = float(coord) + ref.lon\n else:\n rec.update_val(c_key, float(coord))\n i += 1\n else:\n rec.update_val(\n key.decode('UTF-8') if key != b'Group' else 'grp', val.decode('UTF-8'))\n\n rec.compute_velocity(ref.time_since_last)\n\n if rec.updates == 1 and rec.should_have_parent():\n parent_info = await determine_contact(rec, type='parent', ref=ref)\n if parent_info:\n rec.parent = parent_info[0]\n rec.parent_dist = parent_info[1]\n\n return rec" ]
[ "0.6519844", "0.6387769", "0.6352885", "0.6277602", "0.6213696", "0.5965861", "0.5937817", "0.592395", "0.58763266", "0.5874582", "0.58480644", "0.58157754", "0.57564855", "0.57024807", "0.5700393", "0.5700325", "0.56963235", "0.56787336", "0.5669062", "0.5668286", "0.56622356", "0.56566364", "0.56398803", "0.56323713", "0.5630778", "0.5624966", "0.5617725", "0.5610616", "0.56055605", "0.5581118", "0.55801344", "0.55752295", "0.5574742", "0.5556057", "0.55270773", "0.55102956", "0.5507046", "0.55039734", "0.5503154", "0.5487937", "0.5475612", "0.54628164", "0.54531074", "0.54491746", "0.5443293", "0.5433353", "0.54289293", "0.54191405", "0.5418267", "0.5410257", "0.540934", "0.54064006", "0.53999794", "0.53973967", "0.539263", "0.539263", "0.53900373", "0.5381283", "0.53726494", "0.5356833", "0.5356059", "0.5329966", "0.53279173", "0.5327907", "0.5327121", "0.53241163", "0.53034586", "0.52957445", "0.5293364", "0.52714497", "0.52701014", "0.52668464", "0.5261049", "0.5257035", "0.5253083", "0.525257", "0.5249184", "0.5242786", "0.52410144", "0.5230798", "0.52180874", "0.52169055", "0.5213075", "0.5210452", "0.52042246", "0.5202509", "0.5194258", "0.5190305", "0.5187657", "0.5186921", "0.5182773", "0.51740724", "0.51737785", "0.5168359", "0.51614887", "0.5156638", "0.5156533", "0.51520896", "0.5151724", "0.51498526", "0.5149711" ]
0.0
-1
Does the program remove a record from the sequential data structure as expected?
def test_CovidCase_delete(self): # setting up by creating and saving the the database del_Covid = self.create_CovidCase() del_Covid.save() del_id = del_Covid.id # we are going to delete by calling the delete function del_deleted = CovidCase.objects.get(id=del_id) del_deleted.delete() self.assertNotIn(del_Covid, CovidCase.objects.all())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(self, val):\n if not val in self.record:\n return False\n index = self.record[val]\n self.data[index], self.data[-1] = self.data[-1], self.data[index]\n self.record[self.data[index]] = index\n self.data.pop()\n self.record.pop(val)\n return True", "def remove():", "def delete(self, record):\n temp = self.hashing(record.get_key())\n if self.__buckets[temp].contains(record):\n self.__buckets[temp].delete(record)\n self.__num_records -= 1", "def remove_record_failure():\n\t\tpass", "def delete(self, index):\n try:\n self.shapes.pop(index)\n self.records.pop(index)\n except IndexError:\n print('No record found at index: {}'.format(index))", "def delete_record(records):\n delete_record()", "def remove(self):", "def remove_record():\n # could use .../record/<name> in URL or as in this case as an argument .../record?name=bob\n if 'name' not in request.args:\n return \"need a name to delete a record!\", 400\n with RECORD_LOCK:\n if len([r for r in RECORDS if r.get('name') == request.args.get('name')]) == 0:\n return \"no such record found!\", 409\n RECORDS[:] = [r for r in RECORDS if r.get( # copy all but name matches\n 'name') != request.args.get('name')]\n return \"OK\"", "def delete(self, record):\n\n s = record.split()\n if len(s) != 3:\n sys.stderr.write('The format of the input should be like this: meal breakfast -50.\\\n \\nFail to delete a record.\\n')\n elif self._records.count(record) > 1:\n try:\n d = int(input(f'Which line of the record \"{record}\" is going to be deleted? '))\n testlist = []\n for i, v in enumerate(self._records):\n if v == record:\n testlist.append(i+1) # testlist contains the records that is identical to the input\n assert d in testlist\n except ValueError:\n sys.stderr.write('Invalid input. Should be an integer.\\nFail to delete a record.\\n')\n except AssertionError:\n sys.stderr.write(f'Invalid input number. No record of \"{record}\" in line {d}.\\\n \\nFail to delete a record')\n else:\n del(self._records[d-1])\n elif self._records.count(record) == 1:\n self._records.remove(record)\n else:\n sys.stderr.write(f'There\\'s no record with \"{record}\".\\nFail to delete a record.\\n')", "def test_remove_one(self):\n pass", "def delete_record(self):\n for record in self.records:\n if self.date_str == record[\"date\"]:\n self.records.remove(record)\n if len(self.records) > 0:\n self.write_json_file(self.records_file, self.records)\n else:\n os.remove(self.records_file)\n return True\n return False", "def delete_last_record():\n\tnewRcrds = list()\n\twith jsonlines.open('tempRecords.jsonl', mode='r') as readerOp:\n\t\tfor obj in readerOp:\n\t\t\tnewRcrds.append(obj)\n\twith jsonlines.open('tempRecords.jsonl', mode='w') as writerOp:\n\t\tif len(newRcrds) != 1:\n\t\t\t# checking if the record being removed is the last record which has file names.\n\t\t\tfor obji in newRcrds[:len(newRcrds)-1]:\n\t\t\t\twriterOp.write(obji)\n\t\telse:\n\t\t\t# if its the last record then do not delet it, as it is required for annotation data\n\t\t\tfor obji in newRcrds[:len(newRcrds)]:\n\t\t\t\twriterOp.write(obji)", "def delete_record(self, key):\n del self._records[key]", "def remove(self, val):\n ind = self.table.pop(val, None)\n if ind is None:\n return False\n key = self.ls.pop()\n if len(self.ls)!=0 and len(self.ls) != ind:\n self.ls[ind] = key\n self.table[key] = ind\n return True", "def test_delete_record(self):\n pass", "def delete(self):\n first = self.data[0]\n self.data.pop(0)\n self.size = self.size - 1\n return first", "def test_delete_complex_tree_08(comp):\n comp.delete(11)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (12, 8, 14, 6, 10, 13, 15, 4, 7, 9)", "def _remove(self):\n pass", "def clean_collection(previous_records, collection):\n for previous_record in previous_records:\n collection.delete_one({\"_ref\": previous_record})", "def __delitem__(self, i):\n\n if self.mode == DB_OPEN_READ:\n raise RecordTableAccessError()\n\n # Check that the value was set (otherwise raise KeyError):\n self[i]\n self._set_packed_record(i, self.packer.empty_value)", "def remove_row(self, row_id):", "def test_delete_complex_tree_04(comp):\n comp.delete(13)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 11, 12, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 14, 6, 10, 12, 15, 4, 7, 9)", "def remove(self, key: int) -> None:\n index = key % 10000\n previous = self.array[index]\n current = previous.next\n while current:\n if current.key == key:\n previous.next = current.next\n break\n previous = previous.next\n current = current.next", "def testRemove(self):\n kinds = ('tiny', 'small')\n # Tests that an add followed by a remove of a load fails\n actions = [(0, 1, self.in_tables_v1[0]), (0, None, None)]\n self._run_actions(kinds, None, actions, 'rem1', True, False)\n # Tests that a sequence of two adds followed by removes of\n # those loads fails.\n actions = [(0, 1, self.in_tables_v1[0]),\n (1, 1, self.in_tables_v1[1]),\n (0, 1, None),\n (1, 1, None)]\n self._run_actions(kinds, None, actions, 'rem2', True, False)\n # Tests that a sequence of 3 adds for different loads,\n # followed by 3 adds of different versions of those loads,\n # followed by removes of the 3 original versions yields the\n # same index as one generated from the 3 new load versions.\n in_tables_v1 = self.in_tables_v1[:3]\n in_tables_v2 = self.in_tables_v2[:3]\n actions = [(i, 1, t) for i, t in enumerate(in_tables_v1)]\n actions.extend([(i, 2, t) for i, t in enumerate(in_tables_v2)])\n actions.extend([(i, 1, None) for i in xrange(3)])\n self._run_actions(kinds, in_tables_v2, actions, 'rem3')\n # Tests that a sequence of 3 adds for different loads,\n # followed by 3 adds of different versions of those loads,\n # followed by removes of the 3 new versions yields the\n # same index as one generated from the 3 original load\n # versions.\n actions = [(i, 1, t) for i, t in enumerate(in_tables_v1)]\n actions.extend([(i, 2, t) for i, t in enumerate(in_tables_v2)])\n actions.extend([(i, 2, None) for i in xrange(3)])\n self._run_actions(kinds, in_tables_v1, actions, 'rem4')\n # Tests that a sequence of 3 adds for different loads,\n # followed by an add of a different version for one load,\n # followed by a remove of that entire load yields the same\n # index as one generated from the other 2 loads.\n in_tables = [in_tables_v1[0], in_tables_v1[2]]\n actions = [(i, 1, t) for i, t in enumerate(in_tables_v1)]\n actions.append((1, 2, in_tables_v2[1]))\n actions.append((1, None, None))\n self._run_actions(kinds, in_tables, actions, 'rem5')", "def test_delete_complex_tree_07(comp):\n comp.delete(12)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 11, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 14, 6, 10, 13, 15, 4, 7, 9)", "def test_delete_occurrence(self):\n pass", "def test_delete_complex_tree_03(comp):\n comp.delete(15)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 11, 12, 13, 14)\n assert tuple(comp.breadth_first()) == (11, 8, 13, 6, 10, 12, 14, 4, 7, 9)", "def remove():\n pass", "def clear(self):\n del self._record_list[:]\n self._number = 0", "def test_delete_complex_tree_02(comp):\n comp.delete(4)\n assert tuple(comp.in_order()) == (6, 7, 8, 9, 10, 11, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 13, 6, 10, 12, 14, 7, 9, 15)", "def test_delete_complex_tree_05(comp):\n comp.delete(8)\n assert tuple(comp.in_order()) == (4, 6, 7, 9, 10, 11, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 9, 13, 6, 10, 12, 14, 4, 7, 15)", "def remove(self, o):\n for idx in range(self.current):\n if(self.arr[idx] == o): # same object # np.array_equal(self.arr[idx], o) => same values\n self.arr[idx] = self.arr[self.current-1]\n self.current -= 1\n break", "def _del(self, entry):\n entry.key = dummy\n entry.value = None\n self.used -= 1", "def remove(self):\n pass", "def remove(self):\n pass", "def remove(self):\n pass", "def remove(self):\r\n if self.first() is not None:\r\n self.dec_size()\r\n self.set_first(self.first().next())\r\n if self.size() == 0: # when there are no more elements in the list,\r\n self.__last = None # remove the pointer to the last element\r", "def delete_element_from_store(entry_sequence, is_propagated_call = False):\n\t\tglobal board, node_id\n\t\tsuccess = False\n\t\ttry:\n\t\t\tdel board[int(entry_sequence)]\n\t\t\tsuccess = True\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn success", "def test_delete_complex_tree_06(comp):\n comp.delete(9)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 10, 11, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 13, 6, 10, 12, 14, 4, 7, 15)", "def test_remove(self):\n solution = pk.Solution()\n model = pk.Model('iv')\n protocol = pk.Protocol()\n model2 = pk.Model('sc')\n protocol2 = pk.Protocol(initial_dose=1.1, time_span=1.2)\n with self.assertRaises(TypeError):\n solution.remove()\n with self.assertRaises(TypeError):\n solution.remove('model')\n with self.assertRaises(IndexError):\n solution.remove(1)\n solution.add(model, protocol)\n self.assertEqual(solution.list_compartments, [(model, protocol)])\n solution.remove(0)\n self.assertEqual(solution.list_compartments, [])\n solution.add(model, protocol)\n solution.add(model2, protocol2)\n self.assertEqual(solution.list_compartments,\n [(model, protocol), (model2, protocol2)])\n solution.remove(1)\n self.assertEqual(solution.list_compartments, [(model, protocol)])\n solution.remove(0)\n self.assertEqual(solution.list_compartments, [])\n with self.assertRaises(IndexError):\n solution.remove(0)", "def testRemove(self):\n\n numIns = randint(70,200)\n\n for i in xrange(numIns):\n self.s.insert(i, None)\n for i in xrange(numIns):\n self.s.remove(i)", "def test_remove_by_identifier(self):\n view = SchemaView(SCHEMA)\n patcher = ObjectChanger(schemaview=view)\n dataset = yaml_loader.load(DATA, target_class=Dataset)\n n_persons = len(dataset.persons)\n dataset: Dataset\n change = RemoveObject(value=Person(id='P:002'))\n r = patcher.apply(change, dataset)\n logging.info(yaml_dumper.dumps(dataset))\n self.assertEqual(len(dataset.persons), n_persons-1)\n self.assertEqual(dataset.persons[0].id, 'P:001')", "def test_remove(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n h.remove(2)\n\n self.assertTrue(Heap.is_heap(data), 'should preserve heap property')\n self.assertNotIn(8, h.data, 'the value corresponding to the index was removed')", "def test_del_handles_multiple_place_changes(robust):\n robust.delete(9)\n assert robust.balance() == 1\n assert tuple(robust.in_order()) == (\n 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19\n )\n robust.delete(10)\n assert tuple(robust.in_order()) == (\n 1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13, 14, 15, 16, 17, 18, 19\n )\n assert robust.balance() == 1\n assert robust.depth() == 5\n robust.delete(19)\n robust.delete(11)\n robust.delete(12)\n assert tuple(robust.in_order()) == (\n 1, 2, 3, 4, 5, 6, 7, 8, 13, 14, 15, 16, 17, 18\n )\n assert tuple(robust.breadth_first()) == (\n 8, 4, 16, 2, 6, 14, 18, 1, 3, 5, 7, 13, 15, 17\n )\n assert robust.balance() == 0\n assert robust.depth() == 4", "def remove_sequence(self):\n self.sequence_fragment_list = []", "def testRemoveDeep(self):\n ref = set()\n #pdb.set_trace()\n for j in xrange(randint(5, 50)):\n N = randint(60, 170)\n R = N/2 + randint(0, N/2) #remove over half the entries\n for i in xrange(N): #add\n a = randint(-2147483648,2147483647)\n self.s.insert(a, True)\n ref.add(a)\n self.assertTrue(a in self.s)\n self.assertTrue(a in ref)\n #ensure both contain equal number of elements\n #self.assertEqual(len(ref), len(self.s))\n for i in xrange(R): #remove\n a = ref.pop()\n self.s.remove(a)\n self.assertFalse(a in self.s)\n self.assertFalse(a in ref)\n #check no extra elements were accidentially deleted\n for j in ref:\n try:\n self.assertTrue(j in self.s)\n except Exception as e:\n print \"missing:\", j\n print self.s\n print ref\n raise e\n #check consistancy twice\n self.assertEqual(len(ref), len(self.s))\n for i in ref:\n self.assertIsNotNone(self.s.find(i))\n self.assertEqual(len(ref), len(self.s))\n for i in ref:\n self.assertIsNotNone(self.s.find(i))", "def test_ordered_refcount_remove(cls):\n sl = orderedstructs.SkipList(object)\n obj = cls(0)\n rc = sys.getrefcount(obj)\n sl.insert(obj)\n sl.remove(obj)\n assert sys.getrefcount(obj) == rc", "def remove(self):\n if LongObjectHashMap.self.modCount != self.expectedModCount:\n raise ConcurrentModificationException()\n if self.lastReturned == self.EMPTY_KEY:\n raise IllegalStateException()\n self.count -= 1\n LongObjectHashMap.self.remove(self.lastReturned)\n self.lastReturned = self.EMPTY_KEY\n self.expectedModCount = LongObjectHashMap.self.modCount", "def test_deletion_no_child(basic_tree):\n tree = red_black_tree.RBTree()\n\n test_tree = [(23, \"23\"), (4, \"4\"), (30, \"30\"), (11, \"11\")]\n\n for key, data in test_tree:\n tree.insert(key=key, data=data)\n\n tree.delete(4)\n assert [item for item in tree.inorder_traverse()] == [\n (11, \"11\"),\n (23, \"23\"),\n (30, \"30\"),\n ]", "def remove(self, data):\n\n traverse = self.head\n temp = self.head\n if traverse.data == data:\n self.head = traverse.next\n return\n\n while traverse.next != None:\n\n temp = traverse.next\n if temp.data == data:\n traverse.next = temp.next\n return\n\n traverse = traverse.next", "def test_remove(self):\n pass", "def delete_order():", "def test_relation_after_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', 50021) == None\n assert query_row(db_conf, 'osm_landusages', -50021) == None", "def remove(self, val):\n if val in self.dic:\n i = self.dic[val]\n if i<len(self.data)-1:\n self.data[i]=self.data[-1]\n self.dic[self.data[i]]=i\n self.data.pop()\n self.dic.pop(val,0)\n return True\n else:\n return False", "def remove(self, val):\n temp = self.table.pop(val, None)\n if temp is None:\n return False\n return True", "def delete(self, value):\n for i in range(value):\n self.popleft()\n self.annotations.popleft()", "def remove(self, item):\n\t\tif self.len == 0:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tif self.prim.dato == item:\n\t\t\tself.borrar_primero()\n\t\t\treturn\n\t\tanterior = self.prim\n\t\tactual = anterior.prox\n\t\twhile actual and actual.dato != item:\n\t\t\tanterior = anterior.prox\n\t\t\tactual = actual.prox\n\t\tif not actual:\n\t\t\traise ValueError(\"Elemento no encontrado\")\n\t\tanterior.prox = actual.prox\n\t\tself.len -= 1", "def drop_matching_records(self, check):\n matches = self._match(check)\n for rec in matches:\n self._drop_bytes(rec)\n del self._records[rec['msg_id']]", "def __delitem__(self, key):\r\n key = self.key(key)\r\n if key in self.data_with_same_key:\r\n if len(self.data_with_same_key[key]) == 1:\r\n self.data[key] = self.data_with_same_key.pop(key)[0]\r\n else:\r\n self.data[key] = self.data_with_same_key[key].pop(-1)\r\n else:\r\n del self.data[key]", "def removeData(self, data: ghidra.program.model.listing.Data) -> None:\n ...", "def remove(table, id_):\n count=0\n searched_index=-1\n in_it=False\n for i in table:\n if i[0]==id_:\n searched_index=count\n in_it=True\n count+=1\n\n if in_it: \n table.pop(searched_index)\n else:\n ui.print_error_message(\"ID not found\")\n \n return table", "def remove(self):\n raise NotImplementedError", "def remove(self):\n raise NotImplementedError", "def __delitem__(self, i):\n # An element of a policy function can't be deleted", "def rm(self, line):\n self.log('rm({0})'.format(line))\n if line is False:\n return False\n if isinstance(line, str):\n line = line.split('\\n')\n if not isinstance(line, list):\n raise TypeError(\"Parameter 'line' not a 'string' or 'list', is {0}\".format(type(line)))\n local_changes = False\n for this in line:\n if this in self.contents:\n while this in self.contents:\n self.log('Removed \"{0}\" from position {1}'.format(this, self.contents.index(this)))\n self.contents.remove(this)\n self.changed = local_changes = True\n else:\n self.log('\"{0}\" not in {1}'.format(this, self.filename))\n if self.sorted and local_changes:\n self.sort()\n return local_changes", "def __delitem__(self, i):\n key = self._main._sequence[i]\n if isinstance(i, types.SliceType):\n for k in key:\n # FIXME: efficiency?\n del self._main[k]\n else:\n del self._main[key]", "def remove(self, key):", "def test_remove_book(self):\n\n first_book_list = BookList()\n first_book = Book()\n\n first_book.create_book({\n \"title\": \"First Man\",\n \"author\": \"James R. Hansen\",\n \"year\": 2005,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 1\n })\n\n first_book_list.add_book(first_book)\n\n assert first_book_list.remove(\"title\", \"First Man\") == True\n assert first_book_list.count() == 0", "def test_delete_records(self):\n pass", "def test_remove(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"tmp\", 0, sched)\n inst_map.remove(\"tmp\", 0)\n self.assertFalse(inst_map.has(\"tmp\", 0))\n with self.assertRaises(PulseError):\n inst_map.remove(\"not_there\", (0,))\n self.assertFalse(\"tmp\" in inst_map.qubit_instructions(0))", "def __delitem__(self,key):\n if key in self.changed: self.changed.remove(key)\n if key not in self.deleted: self.deleted.append(key)\n del self.data[key]", "def remove(table, id_):\n\n # 3\n for index in range(len(table)):\n if table[index][0] == id_:\n table.pop(index)\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table", "def remove(self):\r\n\t\tself._delete()", "def remove(self, value):\n found = False\n for i in range(len(self.data)):\n if self.data[i] != value:\n pass\n else:\n found = True\n self.__delitem__(i)\n break\n if not found:\n raise ValueError", "def test_remove_one(self):\n seq_run = SequencingRun(None, sequencing_run_type=\"test\",\n project_list=[\n Project(sample_list=[\n Sample(\"one\"),\n Sample(\"two\"),\n Sample(\"three\")\n ], id=1)\n ])\n sample_status_list = [DirectoryStatus.SampleStatus(sample_name=\"one\", project_id=\"1\", uploaded=False),\n DirectoryStatus.SampleStatus(sample_name=\"two\", project_id=\"1\", uploaded=True),\n DirectoryStatus.SampleStatus(sample_name=\"three\", project_id=\"1\", uploaded=False)]\n\n res = upload_helpers.set_uploaded_samples_to_skip(seq_run, sample_status_list)\n\n res_samples = res.project_list[0].sample_list\n self.assertEqual(res_samples[0].skip, False)\n self.assertEqual(res_samples[1].skip, True)\n self.assertEqual(res_samples[2].skip, False)", "def __delitem__(self, where):\n with self._lock:\n self._current_bytes -= self._data[where]\n del self._data[where]\n self._order.remove(where)", "def remove(self, obj):\n\n if self.begin.value == obj:\n self.begin = self.begin.next\n self.begin.prev = None\n\n elif self.end.value == obj:\n self.end = self.end.prev\n self.end.next = None\n\n else:\n node = self.begin\n while node.value != obj:\n node = node.next\n node.prev.next = node.next\n node.next.prev = node.prev", "def remove(table, id_):\n # 3\n for index in range(len(table)):\n if table[index][0] == id_:\n table.pop(index)\n data_manager.write_table_to_file('accouting/items.csv', table)\n\n return table", "def remove(self, val):\n i = self.d.get(val)\n if i is None:\n return False\n assert 0 <= i < len(self.l)\n last_val = self.l[-1]\n if val != last_val:\n self.d[last_val] = i\n self.l[i] = last_val\n del self.d[val]\n _ = self.l.pop()\n return True", "def remove(self, key: int) -> None:\n idx = key % 1000\n if not self.map[idx]:\n return\n elif self.map[idx].key == key:\n self.map[idx] = self.map[idx].next\n else:\n curr = self.map[idx]\n prev = curr\n curr = curr.next\n while curr:\n if curr.key == key:\n prev.next = curr.next\n break\n curr = curr.next\n prev = prev.next", "def remove(self, data):\n\n traverse = self.head\n temp = self.head\n if self.head == None:\n return None\n\n if traverse.data == data:\n self.head = traverse.next\n return\n\n while traverse.next != None:\n\n temp = traverse.next\n if temp.data == data:\n traverse.next = temp.next\n return\n\n traverse = traverse.next", "def delete(self, key):\n index = self.quadratic_probe(key)\n if self.table[index] is not None:\n self.table[index] = None\n self.size -= 1", "def delete_cand():\n if row:\n return row[0] + [[key + [left_i]]]", "def remove(self, key):\n i = key //1000\n j = key%1000\n self.container[i][j] = -1", "def remove(self, key: int) -> None:\n index = key % self.size\n if self.table[index].value is None:\n return \n \n p = self.table[index]\n \n if p.key == key:\n if p.next is None:\n self.table[index] = ListNode()\n else:\n self.table[index] = p.next\n return\n \n prev = p\n while p:\n if p.key == key:\n prev.next = p.next\n return\n prev = p\n p = p.next\n #p = p.next\n #prev = p\n #prev, p = p, p.next", "def remove(table, id_):\n return common.remove_line(table, id_)", "def test_dataset_deltitem(train_dataset):\n with pytest.raises(Exception):\n del train_dataset[0]", "def remove(self, d):\n\n if self.head is not None:\n if self.head.data == d:\n self.head = self.head.next\n else:\n temp = self.head\n while temp.next is not None:\n if temp.next.data == d:\n temp.next = temp.next.next\n break\n else:\n temp = temp.next", "def remove(self, value: object) -> bool:\n for _ in range(self.da.length()):\n if value == self.da[_]:\n self.da.remove_at_index(_)\n return True\n return False", "def remove(self, key: int) -> None:\n idx = key % self.size\n if self.mp[idx]:\n for i in range(len(self.mp[idx])):\n if self.mp[idx][i][0] == key:\n #self.mp[idx].pop(i)\n del self.mp[idx][i]\n break", "def remove_one(self):\n item = self.expiry.pop(0)\n if item.updated:\n self.new_expiry.append(item)\n return\n del self.index[item.target]\n return", "def delete(self, key: int) -> None:\n i = k % self.capacity\n cur = pre = self.data[i]\n if not cur:\n return\n if cur.pair[0] == k:\n self.data[i] = cur.next\n else:\n cur = cur.next\n while cur:\n if cur.pair[0] == k:\n pre.next = cur.next\n break\n else:\n cur, pre = cur.next, pre.next", "def drop_info(batch):\n if 'id' in batch:\n batch.pop('id')\n if 'rng' in batch:\n batch.pop('rng')\n return batch", "def test_ordered_insert_remove_id_is_same(cls):\n sl = orderedstructs.SkipList(object)\n obj_insert = cls(0)\n sl.insert(obj_insert)\n obj_remove = sl.remove(obj_insert)\n assert id(obj_remove) == id(obj_insert)", "def drop_record(self, msg_id):\n rec = self._records[msg_id]\n self._drop_bytes(rec)\n del self._records[msg_id]", "def _valueRemoved(self):\n self.__changed = 1\n if self.size() == 0:\n raise ConsistencyFailure()", "def remove(self, data_id, idx):\n temp = self.database[data_id]\n del temp[idx]\n self.database[data_id] = temp", "def prune(record: cfg.OpenAPI, outformat: cfg.Format = None) -> NoReturn:\n content = record.oas\n # Cleanup\n content, _, _ = clean.remove_unused_components(content)\n content = clean.remove_empty_objects(content)\n # Output\n out(content, outformat or record.oastype)", "def delete(self):\n if not self.isNew:\n #We do not check the hasData property, so we can use this function to delete records\n #without reading them first.\n #TODO: this is stupid and unclean, change it\n try:\n CFG.CX.delete ( CFG.DB.SCHEMA + \".object\", { 'objectid' : self._objectid } )\n self.clearRecord()\n self.raiseEvent ( \"record_deleted\", self )\n except pg.DatabaseError, e:\n raise Record.DataManipulationError ( \"Deleting record {1} of '{0}'\".format(self._table.name, self._objectid),\n \"\",\n e)", "def delete(self) -> None:\n self.pop()", "def delete_record(Xfile, Xaccn_num):\n #open the file for reading \n opened_file = open(Xfile, 'r')\n #create a queue object.\n my_queue = Queue()\n #loop through the file looking for the record.\n for line in opened_file:\n #make line array\n line_array = line.split(\",\")\n if Xaccn_num == line_array[2]: #if the account is found\n #change the value in the array\n line_array[4] = \"False\"\n line = \",\".join(line_array)\n my_queue.put(line) #add line to queue\n #close the file\n opened_file.close()\n #loop through the file and write to it\n opened_file = open(Xfile,'w')\n #loop through it and write to it\n while not my_queue.empty(): #while my queue is not empty\n #Add lines to my file\n line = my_queue.get()\n if line[-1] != \"\\n\":\n line += \"\\n\"\n if my_queue.qsize() == 0:\n line = line[:len(line)-1]\n opened_file.write(line)\n opened_file.close()" ]
[ "0.68329746", "0.67125225", "0.6622166", "0.66102016", "0.65779585", "0.65033436", "0.6474776", "0.63278514", "0.6315034", "0.629419", "0.6229488", "0.61964864", "0.6193291", "0.6143841", "0.6140655", "0.61280227", "0.6104588", "0.6092655", "0.60867935", "0.6085228", "0.6081483", "0.6077938", "0.60763067", "0.6059216", "0.60434735", "0.6040234", "0.6035215", "0.60303664", "0.60215664", "0.60093266", "0.6009003", "0.6004021", "0.59832156", "0.598045", "0.598045", "0.598045", "0.59659106", "0.59596354", "0.5951158", "0.5939898", "0.59274507", "0.59222627", "0.59197295", "0.59079707", "0.5893774", "0.5893661", "0.58870083", "0.5883468", "0.58782154", "0.58766425", "0.5856446", "0.58497405", "0.58460563", "0.58455276", "0.5840618", "0.58396375", "0.58129525", "0.5800918", "0.5793799", "0.57932246", "0.57877725", "0.5783191", "0.5783191", "0.5771169", "0.57656986", "0.57589185", "0.57541937", "0.57491946", "0.57488126", "0.5746914", "0.5736057", "0.5732003", "0.5731158", "0.5727196", "0.57238936", "0.5721188", "0.5714131", "0.5707318", "0.56959623", "0.56942445", "0.5693884", "0.5692844", "0.5686556", "0.56832105", "0.5675736", "0.5674021", "0.56718814", "0.56717956", "0.5671519", "0.56680024", "0.56660265", "0.5661453", "0.56607825", "0.56606865", "0.5659155", "0.5657273", "0.5656695", "0.56533027", "0.5651631", "0.5645843", "0.56443787" ]
0.0
-1
Does the program catch any exceptions or errors if the file is missing?
def test_file_error(self): my_reader = DataSetReader() covid_list = CovidCase.objects.all() with self.assertRaises(IOError): my_reader.writeFile(covid_list, "Not_A_File.csv")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_present_file(self):\n\t\ttry:\n\t\t\tmain.Main(['input/abc.txt']).run()\n\t\texcept:\n\t\t\tself.assertTrue(True)", "def FileCheck(fn):\n try:\n open(fn, \"r\")\n return 1\n except IOError:\n print(\"Error: File does not exist.\")\n return 0", "def test_no_such_file(self):\n\t\twith self.assertRaises(IOError):\n\t\t\tanalyse_text('foobar')", "def fileCheck(filename):\n if not os.path.isfile(filename):\n print('File: ' + filename + ' not found. Exiting...', file=sys.stderr)\n sys.exit(1)", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_load_missing_file(self):\n # Technically there's a race condition here, but... I'm not\n # particularly fussed about it.\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n track = Track.from_filename(filename)", "def provoke_and_handle_FileNotFoundError():\n try:\n with open(\"NEIN.mp3\") as f:\n print(\"well\")\n except FileNotFoundError as fnfe:\n print(f\"Sorry! {fnfe}\")", "def file_is_missing(filename, allow_missing=True):\n\n if os.path.exists(filename):\n return False\n if not allow_missing:\n raise RuntimeError(f\"File {filename} does not exist\")\n print(f\"Warning: file {filename} does not exist\")\n return True", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_exit_on_missing_file(self):\n with self.assertRaises(SystemExit):\n pyint = Interpreter()\n pyint.run(file=MISSING_FILE)", "def test_file_read_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read()", "def test_missing_file():\n passed = False\n try:\n x = XPIManager('foo.bar')\n except:\n passed = True\n assert passed", "def fileCheck(file):\n if not os.path.isfile(file):\n print('File : ',file)\n print('E... '+'no file')\n sys.exit()", "def test_readable_error_if_file_not_found(self):\n fake_path = 'this/path/is/not/real'\n self.assertEqual(LoadJsonConfig.read_config_file(LoadJsonConfig(), fake_path), 'File not found at ' + fake_path)", "def file_missing(filename):\n return not os.path.isfile(filename)", "def test_file_read_bin_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_bin()", "def test_info_fail(self):\n path = \"non_existing_audio.wav\"\n with self.assertRaisesRegex(RuntimeError, path):\n self._info(path)", "def _check(self, config: Dict):\n if 'path' not in config:\n raise FileNotFoundError(\"File not found.\")", "def check_file_existence(file_name):\n if not os.path.isfile(file_name):\n raise IOError(\"{} doesn't exist or isn't a file\".format(file_name))", "def test_the_main_non_existent_file(self):\r\n with self.assertRaises(SystemExit):\r\n the_main_function(\"non existent file\")", "def validate_file_exists(filename):\n try:\n file = open(filename, \"r\")\n file.close()\n except IOError:\n print(\"\\nError: The file \" + filename + \" does not exist or could not be read.\\n\")\n sys.exit(1)", "def check_file(path):\n if not os.path.exists(path):\n raise RuntimeError('path [%s] does not exist' % path)\n if not os.path.isfile(path):\n raise RuntimeError('path [%s] is no file' % path)\n if not os.access(path, os.R_OK):\n raise RuntimeError('file [%s] cannot be read' % path)\n if not os.path.getsize(path) > 0:\n raise RuntimeError('file [%s] is 0KB' % path)\n else:\n logging.debug('file [%s] checked successfully', path)", "def file_exception_handler(fpath: str, dryrun: bool):\n if not dryrun:\n if os.path.exists(fpath):\n print(f\"*** Your file {fpath} is being procesed ***\")\n else:\n print(f\"\\n*** WARNING: FATAL ERROR. Your file `{fpath}` does not exist. ***\")\n print(f\"*** Check that your file is correctly named and placed in the right folder and come back ***\")\n sys.exit(1)", "def test_read_no_file():\n filename = 'asdf'\n with pytest.raises(FileNotFoundError):\n read_file(filename)", "def test_missing_file(self):\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n subprocess.check_output(\n [sys.executable, idf_py_path, '--version', '@args_non_existent'],\n env=os.environ,\n stderr=subprocess.STDOUT).decode('utf-8', 'ignore')\n self.assertIn('(expansion of @args_non_existent) could not be opened', cm.exception.output.decode('utf-8', 'ignore'))", "def _check_valid_file(self, file):\n\n try:\n _ = open(f\"{file}\")\n except FileNotFoundError:\n raise ValueError", "def checkExists(fileName):\n if fileName == '' or not pathlib.Path(fileName).exists():\n print('Error: {} is not found !!!'.format(fileName))\n exit()", "def check_for_assemble_file(task_file):\n if not os.path.exists(task_file):\n print_failure_msg(\"{} file is missing\".format(task_file))\n exit(127)\n return True", "def test_wait_for_file(self):\r\n\r\n # wait_for_file has a debug/test mode, in which it raises an exception\r\n # instead of going to sleep\r\n # should not raise anything on valid file\r\n try:\r\n wait_for_file(\"/tmp/denoiser_utils_dummy.tmp\", test_mode=True)\r\n except RuntimeWarning:\r\n self.fail(\"wait_for_file fails on valid file\")\r\n\r\n # but should raise on file not present\r\n self.assertRaises(RuntimeWarning, wait_for_file, \"/foo/bar/baz\",\r\n test_mode=True)", "def check_for_missing_files(self, path):\n return None", "def test_valid_file_raises():\n with pytest.raises(ValueError):\n cli._valid_file(__file__)", "def check_file_exist(self):\n return False", "def assert_is_file_and_exists(self):\n if not self.is_file():\n msg = \"'%s' is not a file or doesn't exists!\" % self\n raise EnvironmentError(msg)", "def check_file(filename: str):\n if os.path.isfile(filename):\n return True\n else:\n raise FileExistsError", "def checkFileExistance(filePath):\n\n try:\n with open(filePath, 'r') as f:\n logger.info(\"Se encontro {}\".format(filePath))\n return True\n except FileNotFoundError as e:\n return False\n except IOError as e:\n return False", "def test_missing_file():\n\n rv, out = getstatusoutput(f'{prg} -o {outfile}')\n assert rv != 0\n assert re.search('the following arguments are required: -f/--file', out)", "def test_existing_file_after_assert_error(exist_of_file):\n try:\n assert read_magic_number(exist_of_file)\n except AssertionError:\n print(\"Now lets do check of existing file\")", "def test_harvester_new_file_exception_recovered(self):\n # create the file so that it is unreadable\n self.create_sample_data_set_dir(\n \"DOS15908_1st7_step1.DAT\",\n RECOV_DIR,\n \"DOS15909.DAT\",\n mode=000\n )\n\n # Start sampling and watch for an exception\n self.driver.start_sampling()\n\n self.assert_exception(IOError)\n\n # At this point the harvester thread is dead. The agent\n # exception handle should handle this case.", "def test_file_readas_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_as(\"utf-8\")", "def test_file_read_gzip_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_gzip()", "def verify_path(path):\r\n if not os.path.exists(path):\r\n raise FileNotFoundException(path)", "def file_exist() -> bool:\n pass", "async def test_reading_non_exitisting_certificate_file() -> None:\n assert (\n mqtt.util.migrate_certificate_file_to_content(\"/home/file_not_exists\") is None\n )", "def check_file_open(filename: str, err_string: str, required: bool = False) -> None:\n if required or filename is not None:\n if filename is None:\n print('\\n' + err_string + '\\n')\n sys.exit(1)\n else:\n try:\n pathlib.Path(filename).resolve(strict=True)\n except FileNotFoundError:\n print('\\n' + err_string + '\\n')\n sys.exit(1)", "def check_file_validity(self):\n # Initialize key variables\n file_ = self.tailed_file\n\n # Check if exists\n if os.path.exists(file_) is False:\n log_message = 'File {} does not exist.'.format(file_)\n log.log2die(1018, log_message)\n\n # Check if file\n if os.path.isfile(file_) is False:\n log_message = '{} is not a file.'.format(file_)\n log.log2die(1035, log_message)\n\n # Check if readable\n if not os.access(file_, os.R_OK):\n log_message = 'File {} is not readable.'.format(file_)\n log.log2die(1036, log_message)", "def _check_file_exists(self, filename):\n if not os.path.exists(filename):\n print('\\n[-] ERROR: %s is not at the specified path! \\\n Please check the filepath and filename...' \n %filename)\n return False\n return True", "def check_file_exists(filepath, file_description):\n import os, sys\n if not os.path.exists(filepath):\n print(\"The \" + file_description + \" (\" + filepath + \") does not exist\")\n sys.exit(1)", "def test_read_file_invalid():\n\tfrom ..skySurvey import SkySurvey\n\tfile_list = 0\n\ttry:\n\t\tSkySurvey(file_list = file_list)\n\texcept TypeError:\n\t\tassert True\n\telse:\n\t\tassert False", "def test_error(file_path):\n assert check_file(file_path), \"Training file is not generated\"", "def test_bad_file():\n\n bad = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))\n rv, out = getstatusoutput(f'{prg} -f {bad}')\n assert rv != 0\n assert re.match('usage:', out, re.I)\n assert re.search(f\"No such file or directory: '{bad}'\", out)", "def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))", "def filecheck(filename):\n if not os.path.isfile(filename):\n print(\"Can't find %s\" % filename)\n exit(1)\n else:\n return filename", "def test_nonfile(self):\n self.assertEqual(None,readfiles.read_file(\"tests.txt))", "def test_exists(self):\n self.assertTrue(os.path.exists(__file__) == self._system.exists(__file__))", "def test_file_readlines_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines()", "def __checkFile(self, filename):\n \n try:\n with open(filename, 'r') as f:\n first_line = f.readline()\n \n if not len(first_line.split(\"\\t\")) == 19:\n raise BadProteomeScoutFile(\"N/A\")\n \n \n except:\n BadProteomeScoutFile(\"Invalid ProteomeScout flat file %s.\\nFile is invalid or corrupted\" % str(filename))", "def test_missing_database_file(self):\n # Technically there's a race condition here, but... I'm not\n # particularly fussed about it.\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n app = App(filename)", "def file_name_check(file_name):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"9\")\n # END OF SOLUTION", "def test_raise_missing_file(self) -> None:\n with pytest.raises(FileNotFoundError):\n YAMLParser().parse(\"test/missing_file.yaml\")", "def _assert_file_is_good(filename):\n\n if not filename:\n return\n\n assert os.path.isfile(filename), filename\n assert os.access(filename, os.R_OK), filename\n assert os.access(filename, os.W_OK), filename", "def test_bad_file():\n\n bad_file = random_string()\n letter = random.choice(string.ascii_lowercase)\n rv, out = getstatusoutput('{} {} -f {}'.format(prg, letter, bad_file))\n assert rv != 0\n expected = \"No such file or directory: '{}'\".format(bad_file)\n assert re.search(expected, out)", "def test_bad_file() -> None:\n\n bad = random_string()\n rv, out = getstatusoutput(f'{RUN} {bad}')\n assert rv != 0\n assert out.lower().startswith('usage:')\n assert re.search(f\"No such file or directory: '{bad}'\", out)", "def test_missing_file(self):\n with self.assertRaises(ConfigFileError):\n engine = Engine(\"/asdfhdfgkjldhsfg.json\", self.api_token)", "def is_file_exists(self):\n pass", "def check_file(f):\n if not os.path.isfile(f):\n raise OSError(f + ' not found.')\n if f.startswith('~'):\n raise OSError(f + ' has \"~\" in path.')", "def test_failures(self):\n reader = TextReader('jacksheet', subject='R1XXX', localization=0)\n with pytest.raises(FileNotFoundError):\n reader.load()", "def is_valid_file(input_file):\n if not os.path.isfile(input_file):\n print('File \\'{}\\' not found.'.format(input_file))\n exit(1)\n return input_file", "def shouldhave(self, thisfile):\n if not os.path.isfile(thisfile):\n self.logtxt(\"ERROR: expected file (%s/%s) does not exist!\" %\n (os.getcwd(), thisfile), 'error')", "def test_load_measure_definition_missing_file():\n with pytest.raises(IOError):\n code_reader.load_quality_codes(json_path='missing_path')", "def test_stress_not_in(generate_no_stress_one_file):\n fname = generate_no_stress_one_file\n with pytest.raises(Exception):\n process_files([fname])", "def file_exists(path):\n\n try:\n with open(path):\n return True\n except IOError:\n return False", "def test_strain_not_in(generate_no_strain_one_file):\n fname = generate_no_strain_one_file\n with pytest.raises(Exception) as f:\n process_files([fname])", "def test_read_raw_suggested(fname):\n with pytest.raises(ValueError, match='Try reading'):\n read_raw(fname)", "def test_read_xls_file_not_found(self):\r\n my_tape = Tape()\r\n my_tape.read_xls('bogus_tape_file')", "def test_load_configuration_fails_gracefully_when_file_does_not_exist():\n config.load_configuration(invalid_configuration_path, graceful=True)\n assert True", "def fileCheck(filePath):\n if not os.path.isfile(filePath):\n return False\n return True", "def check_requirements(self):\n if not os.path.isfile(self.file_path):\n _logger.error(\"File not found\")\n _logger.error(ex)\n raise\n _logger.info(\"File notifier check passed\")", "def check_file_existence(self, files):\n if len(files) == 0:\n if self._nonfile_error is True:\n raise FileNotFound(\"No files are found.\")\n else:\n self._logger.info(\"No files are found. Nothing to do.\")\n return\n self._logger.info(\"Files found %s\" % files)", "def test_load_invalid_file(self):\n with self.assertRaises(Exception):\n track = Track.from_filename(__file__)", "def _validate_file(self, filepath: str):\n if not os.path.exists(filepath):\n raise FileNotFoundError(f\"No such file or directory: {filepath}\")\n if not os.path.isfile(filepath):\n raise IsADirectoryError(f\"Is a directory: {filepath}\")", "def test_harvester_new_file_exception(self):\n # create the file so that it is unreadable\n self.create_sample_data_set_dir(\n \"node59p1_step1.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n mode=000,\n copy_metadata=False\n )\n\n # Start sampling and watch for an exception\n self.driver.start_sampling()\n\n self.assert_exception(ValueError)\n\n # At this point the harvester thread is dead. The agent\n # exception handle should handle this case.", "def test_invalid_path() -> None:\n path = rsc / \"does-not-exist.ods\"\n with pytest.raises(FileNotFoundError, match=\"does not exist\"):\n read_ods(path)", "def does_file_exist(self, fn):\n if True:\n print(f\"-=- {fn} found.\")\n return True\n else:\n print(f\"-!- {fn} not found. Try again\")\n return False", "def test_empty_file(self):\n input_file = \"does_not_exist.fasta\"\n self.assertFalse(os.path.isfile(input_file))\n\n cline = XXmotifCommandline(outdir=self.out_dir, seqfile=input_file)\n\n try:\n stdout, stderr = cline()\n except ApplicationError as err:\n self.assertEqual(err.returncode, 255)\n else:\n self.fail(f\"Should have failed, returned:\\n{stdout}\\n{stderr}\")", "def test_pickle_file_not_found(self):\n self.assertRaises(IOError, self.plugin.load_data)", "def test_resumeWhenFileDoesNotExist(self):\n fp = FilePath(self.mktemp())\n\n error = self.assertRaises(\n OSError, self.makeConnectedDccFileReceive, fp.path, resumeOffset=1\n )\n\n self.assertEqual(errno.ENOENT, error.errno)", "def test_log_filenames_file_not_found(self):\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n self.app.log_filenames([filename])\n self.assertEqual(self.get_track_count(), 0)", "def test_raise_error_unknown_file():\n\n options = {'input_files': ['Sparta.lol']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match(r'File ([a-zA-Z_\\.\\'].*) not found in file list.')", "def _file_exists(name):\n try:\n f = open(name)\n f.close()\n return True\n except IOError:\n return False", "def check_file_validity(self, file_):\n if not os.access(file_, os.F_OK):\n raise TailError(\"File '%s' does not exist\" % (file_))\n if not os.access(file_, os.R_OK):\n raise TailError(\"File '%s' not readable\" % (file_))\n if os.path.isdir(file_):\n raise TailError(\"File '%s' is a directory\" % (file_))", "def ensure_file_exists(file_path):\n\n if not (os.path.exists(file_path) and os.access(file_path, os.R_OK)):\n # This is bad.\n raise CronException(\"Path {0} does not exist or can not be read.\".format(file_path))", "def isexe(fpath):\n return path.exists(fpath) and access(fpath, X_OK)", "def preliminary_file_check(self):\n\n if self.has_error():\n return False\n\n if not self.filepath:\n self.add_error(\"A file was specified!\")\n return False\n\n if not isfile(self.filepath):\n self.add_error(\"The file was not found: %s\" % basename(self.filepath))\n return False\n\n if getsize(self.filepath) < 1:\n self.add_error(\"The file is empty (no bytes): %s\" % basename(self.filepath))\n return False\n\n if self.file_ext in ['xls', 'xlsx']:\n self.is_excel = True\n\n return True", "def _is_valid_file(arg: str) -> str:\n if not os.path.exists(arg):\n raise FileNotFoundError(\"%s does not exist\" % arg)\n return arg", "def test_file_unused(self):\n try:\n with get_temp_file() as (fd, name):\n pass\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))\n else:\n file_exists = os.access(name, os.F_OK)\n self.assertFalse(file_exists)", "def test_read_components_from_rulefile_rulefile_not_specified2(tmp_path):\n with pytest.raises(SystemExit):\n _read_components_from_rulefile()", "def test_empty_file(self):\n with open(os.path.join(test_dir, \"empty_file.txt\")) as f:\n for idx, line in enumerate(reverse_readline(f)):\n raise ValueError(\"an empty file is being read!\")", "def test_nonexistent_path():\r\n with pytest.raises(RuntimeError):\r\n Image(os.path.join(current_dir, \"0--0.jpg\")).read_all()\r\n assert check_md5(path, jpg_path), \"The file has been changed when reading\"", "def test_file_exists(self):\n self.assertTrue(os.path.exists(\"file.json\"))", "def file_checker(file_name):\n if os.path.islink(file_name):\n print \"Crypto device Symlink %s exists\" % file_name\n return True\n else: \n try:\n with open(file_name):\n print \"File %s exists\" % file_name\n return True\n except IOError:\n print \"File %s does not exists\" % file_name\n return False" ]
[ "0.7671936", "0.7364344", "0.7215916", "0.7126976", "0.7051832", "0.7051832", "0.7029998", "0.7013632", "0.69907284", "0.6982232", "0.69754636", "0.6973892", "0.6960266", "0.68636847", "0.68334687", "0.6810809", "0.67925715", "0.67872965", "0.67819893", "0.6771102", "0.6724908", "0.6721395", "0.6694034", "0.66726434", "0.6670869", "0.6646556", "0.6609078", "0.6607676", "0.6606538", "0.6591546", "0.65807873", "0.6561787", "0.65379953", "0.65204704", "0.65124065", "0.64760566", "0.646833", "0.64619297", "0.64577335", "0.64511764", "0.6450888", "0.64444727", "0.6423475", "0.6416815", "0.64082366", "0.63980687", "0.6368829", "0.63531935", "0.6344527", "0.6325671", "0.63033515", "0.63024646", "0.629707", "0.62952715", "0.6294567", "0.62915194", "0.62910336", "0.6284607", "0.6280517", "0.626077", "0.62450516", "0.62379956", "0.6230584", "0.6221134", "0.62114877", "0.6209573", "0.62055224", "0.62028795", "0.61910576", "0.6185444", "0.6181444", "0.61754054", "0.6159011", "0.61548615", "0.6153296", "0.6150749", "0.6150181", "0.6146286", "0.61427885", "0.61318415", "0.61286753", "0.61252105", "0.6121587", "0.6118366", "0.6116177", "0.6115056", "0.6112975", "0.61093307", "0.6099326", "0.6097191", "0.6096866", "0.6096486", "0.60879964", "0.60872656", "0.60865474", "0.6083822", "0.60832006", "0.6079074", "0.60769475", "0.6076337", "0.6073209" ]
0.0
-1
Run the Viterbi algorithm. N number of tokens (length of sentence) L number of labels
def run_viterbi(emission_scores, trans_scores, start_scores, end_scores): L = start_scores.shape[0] assert end_scores.shape[0] == L assert trans_scores.shape[0] == L assert trans_scores.shape[1] == L assert emission_scores.shape[1] == L N = emission_scores.shape[0] trans_scores += start_scores back_ptrs = np.zeros_like(emission_scores,dtype=np.int32) emission_scores += start_scores em_scores = np.zeros_like(emission_scores) em_scores[0] = start_scores+emission_scores[0] for k in range(1,N): transition_plus_score =trans_scores+np.expand_dims(em_scores[k-1],1) back_ptrs[k] =np.argmax(transition_plus_score,0) em_scores[k] =np.max(transition_plus_score,0)+emission_scores[k] v = [np.argmax(end_scores+em_scores[-1])] v_score = np.max(end_scores+em_scores[-1]) for back_ptr in reversed(back_ptrs[1:]): v.append(back_ptr[v[-1]]) v.reverse() return v_score,v
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_viterbi(self, tokens: TokenSeq) -> Tuple[NDArray, NDArray, PosSeq]:", "def label_n_elements(\n self,\n n_elements: int,\n model,\n data_process_fn,\n ) -> int:\n n_to_sample = min(len(self.unlabelled_idx_set), n_elements)\n model.eval()\n unlabelled_indices = list(self.unlabelled_idx_set)\n heap = FixedHeap(key=lambda x : x[0])\n\n for indices in BatchSampler(SequentialSampler(unlabelled_indices),\n self._batch_sampler_size,\n drop_last=False):\n indices_to_evaluate = [unlabelled_indices[i] for i in indices]\n batch_data = [self.train_data[i] for i in indices_to_evaluate]\n model_input, _, seq_lens = data_process_fn(batch_data)\n # batch size, seq_len, n_tags\n output = model(*model_input)\n nll = output.max(axis=2)[0]\n nll_masked = mask_sequence(nll, seq_lens)\n nll_sentences = nll_masked.sum(axis=1)\n # mnlp = nll_sentences\n mnlp = torch.div(nll_sentences, seq_lens)\n # min heap\n for mnlp, index in zip(mnlp, indices_to_evaluate):\n mnlp = mnlp.item()\n if len(heap) < n_to_sample:\n heap.push((-mnlp, index))\n else:\n top_mnlp, _ = heap.top()\n if mnlp < -top_mnlp:\n heap.pop()\n heap.push((-mnlp, index))\n while len(heap) > 0:\n mnlp, idx = heap.pop()\n self.labelled_idx_set.add(idx)\n self.unlabelled_idx_set.remove(idx)\n del heap\n return n_to_sample", "def Viterbi(_sentence, _model, _emission_df, _transition_df):\n\n if not _sentence:\n return []\n\n # EXECUTE VITERBI\n states = [state for state, _ in _model.y_count.items()]\n states.remove('__START__')\n states.remove('__STOP__')\n\n # keep table of values\n # (len(states) x len(sentence))\n value_table = [[0 for x in range(len(_sentence) + 1)] for y in range(len(states))]\n\n # keep table of sequences\n sequence_table = [[[] for x in range(len(_sentence))] for y in range(len(states))]\n\n # base case - START to all states\n for i in range(len(states)):\n # transition prob from __START__ to anything\n try:\n transition_prob = _transition_df[('__START__', states[i])]\n except KeyError:\n transition_prob = 0.0\n\n # error occurs here due to empty _sentence\n try:\n emission_prob = _emission_df[(_sentence[0], states[i])]\n except KeyError:\n emission_prob = 0.0\n\n value_table[i][0] = float(transition_prob) * float(emission_prob)\n sequence_table[i][0] = ['__START__', states[i]]\n\n # iterative/recursive case - state to state\n for i in range(1, len(_sentence)):\n\n # storage for prev\n prev_optimal = 0.0\n prev_state_seq = []\n\n for j in range(len(states)):\n try:\n # find e(xi|yj)\n emission_prob = float(_emission_df[(_sentence[i], states[j])])\n except KeyError:\n emission_prob = 0.0\n\n if prev_optimal == 0.0:\n # find optimal from state to state prob\n for k in range(len(states)):\n test_opti = float(value_table[k][i-1])\n if test_opti >= prev_optimal:\n prev_optimal = test_opti\n prev_state_seq = sequence_table[k][i-1]\n\n # given prev optimal, calculate transition prob\n try:\n # find transition prob from prev optimal state to current\n transition_prob = float(_transition_df[(prev_state_seq[-1], states[j])])\n except KeyError:\n transition_prob = 0.0\n\n prob = prev_optimal * transition_prob * emission_prob\n next_state_seq = prev_state_seq + [states[j]]\n\n value_table[j][i] = prob\n sequence_table[j][i] = next_state_seq\n\n # end case - all states to __STOP__\n for i in range(len(states)):\n try:\n transition_prob = _transition_df[(states[i], '__STOP__')]\n except KeyError:\n transition_prob = 0.0\n\n value_table[i][-1] = float(transition_prob) * float(value_table[i][-2])\n\n # take optimal from table and return optimal val and sequence\n max_val = 0\n result_seq = []\n for i in range(len(states)):\n prob = float(value_table[i][-1]) # take all from last\n if max_val == 0 or prob > max_val:\n max_val = prob\n result_seq = sequence_table[i][-1]\n\n return result_seq[1:]", "def viterbi(sent, dqml, eqml, S, V_CASE=-1):\n\n if type(sent) is list:\n sent_words = sent\n else:\n sent_words = word_tokenize(sent)\n n = len(sent_words)\n\n # define and initialize PI table\n pi = defaultdict(Counter)\n pi[0]['*'] = 1\n bp = {}\n\n for k in range(1, n+1):\n bp[k] = {}\n for v in S:\n eml = compute_eml(V_CASE, eqml, k, sent_words, v)\n if k-1 is 0: # w e S_0 -> w = '*'\n qmlr = compute_qml(dqml, v, '*')\n pival = pi[0]['*'] * qmlr * eml\n pi[k][v] = pival\n bp[k][v] = '*'\n else: # for w e S_k, S_k = S\n max_S = None\n max_w = -1\n for w in S:\n qmlr = compute_qml(dqml, v, w)\n currmax = pi[k-1][w] * qmlr * eml\n if currmax > 0 and currmax > max_w:\n max_w = currmax\n max_S = w\n # if word is unknown use tag 'NN'\n if max_S is None:\n max_w = 0.0\n max_S = UNKNOWN_TAG\n pi[k][v] = max_w\n bp[k][v] = max_S\n\n # calculate y_n\n max_y = -1\n yn = None\n for v in S:\n nextmax = pi[n][v] * compute_propability('STOP', v, dqml)\n if nextmax > max_y:\n max_y = nextmax\n yn = v\n\n # calculate y_n-1....y1\n yk1 = yn\n tagSequence = list()\n tagSequence.append(yn)\n for k in range(n-1,0,-1):\n yk = bp[k+1][yk1]\n tagSequence.append(yk)\n yk1 = yk\n\n tagSequence.reverse()\n return tagSequence", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n #T - Score matrix same as in assignement pdf\r\n T = np.zeros(shape=(L,N))\r\n #Back pointers - to store the previous best tag for word at (i-1)th position\r\n #that resulted into current best tag for (i)th word \r\n back_pointer = np.full((L,N), -1)\r\n\r\n for i in xrange(L):\r\n emission = emission_scores[0][i]\r\n combined = emission + start_scores[i]\r\n T[i][0] = combined\r\n\r\n # Loop over all the words in a sequesnce\r\n for i in xrange(1, N):\r\n # Loop over all the tags for the word at index i \r\n for j in xrange(L):\r\n # Varibale for maximum tag score from previous word (word at i-1)\r\n tmp_max = float('-inf')\r\n tmp_max_idx = -1\r\n #Emission value of word at idx i from state (i.e tag) j\r\n emission = emission_scores[i][j]\r\n #Loop over all the possibile tags for previous word T[tag (1..L), word at i-1]\r\n #and get max among them. Store the corresponding back pointer for there T[tag (1..L), word at i-1]\r\n for k in xrange(L):\r\n transition = trans_scores[k][j]\r\n prev_path = T[k][i-1]\r\n combined = transition + prev_path\r\n if (tmp_max < combined):\r\n tmp_max = combined\r\n tmp_max_idx = k\r\n\r\n back_pointer[j][i] = tmp_max_idx\r\n T[j][i] = tmp_max + emission\r\n\r\n # Doing this step outside because if N == 1 then above loop will not run\r\n # Variable for maximum tag score\r\n tag_max = float('-inf')\r\n # Variable for back pointer(previous T[tag, word])\r\n tag_max_idx = -1\r\n for i in xrange(L):\r\n T[i][N-1] = T[i][N-1] + end_scores[i]\r\n if (tag_max < T[i][N-1]):\r\n tag_max = T[i][N-1]\r\n tag_max_idx = i\r\n # print(\"Max tag -> \" + str(tag_max_idx))\r\n\r\n #Variable to track the path length - should be equal to N\r\n path_length = 0\r\n #Variable to back track on the tags\r\n tag_idx = tag_max_idx\r\n #Varibale to track the word index in N\r\n word_idx = N-1 \r\n #Path strored using backtracking\r\n y = []\r\n\r\n #Getting the best path using backtracking on back_pointers\r\n while path_length != N-1:\r\n y.append(back_pointer[tag_idx][word_idx])\r\n tag_idx = back_pointer[tag_idx][word_idx]\r\n word_idx = word_idx - 1\r\n path_length = path_length + 1\r\n\r\n #Reversing the backtracked path\r\n y = y[::-1]\r\n #Adding the tag for the last word idx in N\r\n y.append(tag_max_idx)\r\n # print(\"Path -> \" + str(y))\r\n\r\n return (tag_max, y)", "def Modified_Viterbi(_sentence, _model, _emission_df, _transition_df, _2nd_order_df):\n\n if not _sentence:\n return []\n\n # EXECUTE VITERBI\n states = [state for state, _ in _model.y_count.items()]\n states.remove('__START__')\n states.remove('__STOP__')\n\n # keep table of values\n # (len(states) x len(sentence))\n value_table = [[0 for x in range(len(_sentence) + 1)] for y in range(len(states))]\n\n # keep table of sequences\n sequence_table = [[[] for x in range(len(_sentence))] for y in range(len(states))]\n\n # base case - START to all states, 1st order.\n # 2nd order not possible for base case\n for i in range(len(states)):\n # use 1st order, since 2nd order is non-existent\n # transition prob from __START__ to anything\n try:\n # find transition from start to state\n transition_prob = _transition_df[('__START__', states[i])]\n except KeyError:\n transition_prob = 0.0\n\n # error occurs here due to empty _sentence\n try:\n # Find emission of word from state\n emission_prob = _emission_df[(_sentence[0], states[i])]\n except KeyError:\n emission_prob = 0.0\n\n value_table[i][0] = float(transition_prob) * float(emission_prob)\n sequence_table[i][0] = ['__START__', states[i]]\n\n # iterative/recursive case - 2nd order\n # loop through rest of words in sentence\n for i in range(1, len(_sentence)):\n\n # storage for prev\n prev_optimal = 0.0\n prev_state_seq = []\n\n # loop through states for every word\n for j in range(len(states)):\n try:\n # find e(xi|yj), prob emitting word from current state\n emission_prob = float(_emission_df[(states[j], _sentence[i])])\n except KeyError:\n emission_prob = 0\n\n # find prev_optimal\n if prev_optimal == 0.0:\n for k in range(len(states)):\n test_optimal = float(value_table[k][i-1])\n if test_optimal >= prev_optimal:\n prev_optimal = test_optimal\n prev_state_seq = sequence_table[k][i-1]\n\n prev_1 = prev_state_seq[-1]\n prev_2 = prev_state_seq[-2]\n\n # use 2nd order here - modified\n try:\n transition_prob = float(_2nd_order_df[((prev_2, prev_1), states[j])])\n except KeyError:\n transition_prob = 0.0\n\n prob = prev_optimal * transition_prob * emission_prob\n next_state_seq = prev_state_seq + [states[j]]\n\n value_table[j][i] = prob\n sequence_table[j][i] = next_state_seq\n\n # end case - all states to __STOP__\n for i in range(len(states)):\n prev_state_seq = sequence_table[i][-1]\n prev_1 = prev_state_seq[-1]\n prev_2 = prev_state_seq[-2]\n try:\n transition_prob = float(_2nd_order_df[((prev_2, prev_1), '__STOP__')])\n except KeyError:\n transition_prob = 0.0\n\n value_table[i][-1] = float(transition_prob) * float(value_table[i][-2])\n\n max_val = 0\n result_seq = []\n for i in range(len(states)):\n prob = float(value_table[i][-1]) # take all from last\n if max_val == 0 or prob > max_val:\n max_val = prob\n result_seq = sequence_table[i][-1]\n\n return result_seq[1:]", "def naiveBayesMixture(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):\n\n # TODO: Write your code here\n # return predicted labels of development set\n\n # counters for Training Phase\n ham = Counter()\n ham_bi = Counter()\n spam = Counter()\n spam_bi = Counter()\n\n for string, label in zip(train_set, train_labels):\n for i in range(len(string)):\n word = string[i]\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if label == 1:\n ham_bi.update({word_bi:1})\n else:\n spam_bi.update({word_bi:1})\n if label == 1:\n ham.update({word:1})\n else:\n spam.update({word:1})\n\n ham_len = 0\n for w in ham:\n ham_len += ham[w]\n spam_len = 0\n for w in spam:\n spam_len += spam[w]\n \n hambi_len = 0\n for w in ham_bi:\n hambi_len += ham_bi[w]\n spambi_len = 0\n for w in spam_bi:\n spambi_len += spam_bi[w]\n\n # labels for Development Phase\n dev_labels = []\n # dicts for P(word|ham) and P(word|spam)\n p_ham = {}\n p_spam = {}\n p_hambi = {}\n p_spambi = {}\n\n # develop likelihoods based on dev_set\n for word in ham:\n numerator = ham[word] + unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham[word] = numerator / denominator\n for word in spam:\n numerator = spam[word] + unigram_smoothing_parameter\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam[word] = numerator / denominator\n\n for word_bi in ham_bi:\n numerator = ham_bi[word_bi] + bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi[word_bi] = numerator / denominator\n for word_bi in spam_bi:\n numerator = spam_bi[word_bi] + bigram_smoothing_parameter\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi[word_bi] = numerator / denominator\n \n numerator = unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham_zero = numerator / denominator\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam_zero = numerator / denominator\n\n numerator = bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi_zero = numerator / denominator\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi_zero = numerator / denominator\n\n for string in dev_set:\n p_words_ham = math.log(pos_prior)\n p_words_spam = math.log(1 - pos_prior)\n\n p_words_hambi = math.log(pos_prior)\n p_words_spambi = math.log(1 - pos_prior)\n \n for i in range(len(string)):\n word = string[i]\n if word in p_ham:\n p_words_ham += math.log(p_ham[word])\n else:\n p_words_ham += math.log(p_ham_zero)\n if word in p_spam:\n p_words_spam += math.log(p_spam[word])\n else:\n p_words_spam += math.log(p_spam_zero)\n\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if word_bi in p_hambi:\n p_words_hambi += math.log(p_hambi[word_bi])\n else:\n p_words_hambi += math.log(p_hambi_zero)\n if word_bi in p_spambi:\n p_words_spambi += math.log(p_spambi[word_bi])\n else:\n p_words_spambi += math.log(p_spambi_zero)\n\n p_ham_mix = p_words_ham*(1-bigram_lambda) + p_words_hambi*bigram_lambda\n p_spam_mix = p_words_spam*(1-bigram_lambda) + p_words_spambi*bigram_lambda\n\n dev_labels.append(p_ham_mix >= p_spam_mix)\n\n return dev_labels", "def decode(self, input):\n S = [s[0] for s in self.ngram_counts[0].keys()] # Set of tags\n _S = [s[0] for s in self.ngram_counts[0].keys()]\n _S.append('*') # _S includes '*' tag\n X = ['*'] # X stores each sentence. X[0] = '*', X[i] = xi\n for l in input:\n x = l.strip()\n if x: # Word\n X.append(x)\n else: # End of a sentence\n n = len(X) - 1 # the length of the sentence\n pi = defaultdict(float) # DP table PI\n bp = {} # back pointer\n\n # Initialize DP table\n for u in _S:\n for v in _S:\n pi[tuple([0, u, v])] = 0\n pi[tuple([0, '*', '*'])] = 1\n\n # Viterbi algorithm\n for k in xrange(1, n + 1):\n for u in _S:\n for v in S: # v will not be '*' \n max_score = 0\n tag = None\n for w in _S:\n if sum([self.emission_counts[tuple([y, X[k]])] for y in S]) < 5: # If the word X[k] is rare word or unseen word in the training corpus,\n x = symbolize(X[k], self.symbolize_option) # use RARE word probability\n else:\n x = X[k]\n try:\n score = pi[tuple([k-1, w, u])] * self.q(v, w, u) * self.e(x, v)\n if max_score < score:\n max_score = score\n tag = w\n except:\n pass\n pi[tuple([k, u, v])] = max_score # Update DP table entry\n bp[tuple([k, u, v])] = tag\n\n # Find tag sequence\n Y = ['*'] # Y stores tag sequence for X. Y[0] = '*', Y[i] = yi\n Y.extend(n * [None])\n max_score = None\n tag = None\n for u in _S:\n for v in _S:\n if self.ngram_counts[1][tuple([u, v])]:\n score = pi[tuple([n, u, v])] * self.q('STOP', u, v)\n if max_score is None or max_score < score:\n max_score = score\n tag = [u, v]\n Y[n-1] = tag[0]\n Y[n] = tag[1]\n for k in xrange(n - 2, 0, -1):\n Y[k] = bp[tuple([k + 2, Y[k + 1], Y[k + 2]])]\n\n # Write result\n prev = '*'\n for k in xrange(1, n + 1):\n print X[k], Y[k], log(pi[tuple([k, prev, Y[k]])])\n prev = Y[k]\n print ''\n\n X = ['*'] # set for the next sentence", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--file', '-f', type=str, help='path to corpus file', default='./train')\n args = parser.parse_args()\n\n corpus_reader = CorpusReader(args.file)\n model = BigramModel(corpus_reader.sents())\n\n test_sentences = ['Suggestive, Watson, is it not?',\n 'It is amazing that a family can be torn apart by something as simple as a pack of wild dogs!',\n 'So spoke Sherlock Holmes and turned back to the great scrapbook in which he was arranging and indexing some of his recent material.',\n 'What I like best about my friends is that they are few.',\n 'Friends what is like are they about I best few my that.']\n\n # prints two paragraphs with each five sentences\n for _ in range(2):\n print(generate(model, 5) + '\\n')\n\n # for each sentence in the test_sentences print the perplexity\n for sentence in test_sentences:\n print(model.perplexity(nltk.word_tokenize(sentence)))", "def Viterbi(words:Sequence[str], train_bag:Sequence[Tuple[str, str]]=train_tagged_words)-> Sequence[Tuple[str, str]]:\n state = []\n tags_set = list(set([pair[1] for pair in train_bag]))\n\n for key, word in enumerate(words):\n # initialise list of probability column for a given observation\n p = []\n for tag in tags_set:\n if key == 0:\n transition_p = tags_df.loc['.', tag]\n else:\n transition_p = tags_df.loc[state[-1], tag]\n\n # compute emission and state probabilities\n emission_p_parts = word_given_tag(word, tag)\n emission_p = emission_p_parts[0]/emission_p_parts[1]\n state_probability = emission_p * transition_p\n p.append(state_probability)\n\n p_max = max(p)\n # getting state for which probability is maximum\n state_max = tags_set[p.index(p_max)]\n state.append(state_max)\n return list(zip(words, state))", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\r\n # TODO: Write your code here\r\n # return predicted labels of development set\r\n retval = []\r\n smoothing_parameter = 0.0055\r\n # Generate a unigram BOW for both positive and negative reviews, choose the top 2500 words\r\n pos_bow, neg_bow = generate_unigram_BOW(train_set, train_labels)\r\n sorted_pos = sorted(pos_bow.items(), key=lambda x: x[1], reverse = True)\r\n sorted_neg = sorted(neg_bow.items(), key=lambda x: x[1], reverse = True)\r\n pos_words = sorted_pos[:].copy()\r\n neg_words = sorted_neg[:].copy()\r\n\r\n pos_bi_bow, neg_bi_bow = generate_bigram_BOW(train_set, train_labels)\r\n sorted_bi_pos = sorted(pos_bi_bow.items(), key=lambda x: x[1], reverse = True)\r\n sorted_bi_neg = sorted(neg_bi_bow.items(), key=lambda x: x[1], reverse = True)\r\n bi_pos_words = sorted_bi_pos[:].copy()\r\n bi_neg_words = sorted_bi_neg[:].copy()\r\n\r\n # Calculate the log probabilities each word given type\r\n pos_count = sum(pair[1] for pair in pos_words)\r\n neg_count = sum(pair[1] for pair in neg_words)\r\n bi_pos_count = sum(pair[1] for pair in bi_pos_words)\r\n bi_neg_count = sum(pair[1] for pair in bi_neg_words)\r\n\r\n log_probability_pos = {} #(word)->P(word|positive)\r\n log_probability_neg = {} #(word)->P(word|negative)\r\n log_prob_bi_pos = {}\r\n log_prob_bi_neg = {}\r\n\r\n for pair in pos_words:\r\n pos_prob = np.log((pair[1]+smoothing_parameter)/(pos_count+smoothing_parameter*(len(pos_words) + 1)))\r\n log_probability_pos[pair[0]] = pos_prob\r\n\r\n for pair in neg_words:\r\n neg_prob = np.log((pair[1]+smoothing_parameter)/(neg_count+smoothing_parameter*(len(neg_words) + 1)))\r\n log_probability_neg[pair[0]] = neg_prob\r\n\r\n for pair in bi_pos_words:\r\n bi_pos_prob = np.log((pair[1]+smoothing_parameter)/(bi_pos_count+smoothing_parameter*(len(bi_pos_words) + 1)))\r\n log_prob_bi_pos[pair[0]] = bi_pos_prob\r\n\r\n for pair in bi_neg_words:\r\n bi_neg_prob = np.log((pair[1]+smoothing_parameter)/(bi_neg_count+smoothing_parameter*(len(bi_neg_words) + 1)))\r\n log_prob_bi_neg[pair[0]] = bi_neg_prob\r\n # Finished training\r\n\r\n # For each of the new reviews from development data\r\n for review in dev_set:\r\n uni_pos = np.log(pos_prior)\r\n uni_neg = np.log(1 - pos_prior)\r\n for word in review:\r\n if word in log_probability_pos:\r\n uni_pos += log_probability_pos[word]\r\n elif word not in log_probability_pos:\r\n uni_pos += np.log(smoothing_parameter/(pos_count+smoothing_parameter*(len(pos_words) + 1)))\r\n\r\n if word in log_probability_neg:\r\n uni_neg += log_probability_neg[word]\r\n elif word not in log_probability_neg:\r\n uni_neg += np.log(smoothing_parameter/(neg_count+smoothing_parameter*(len(neg_words) + 1)))\r\n\r\n bi_pos = np.log(pos_prior)\r\n bi_neg = np.log(1 - pos_prior)\r\n for i in range(len(review)-1):\r\n currTuple = (review[i], review[i+1])\r\n if currTuple in log_prob_bi_pos:\r\n bi_pos += log_prob_bi_pos[currTuple]\r\n elif currTuple not in log_prob_bi_pos:\r\n bi_pos += np.log(smoothing_parameter/(bi_pos_count+smoothing_parameter*(len(bi_pos_words) + 1)))\r\n\r\n if currTuple in log_prob_bi_neg:\r\n bi_neg += log_prob_bi_neg[currTuple]\r\n elif currTuple not in log_prob_bi_neg:\r\n bi_neg += np.log(smoothing_parameter/(bi_neg_count+smoothing_parameter*(len(bi_neg_words) + 1)))\r\n\r\n MAP_pos = (1-0.4)*uni_pos + 0.4*bi_pos\r\n MAP_neg = (1-0.4)*uni_neg + 0.4*bi_neg\r\n\r\n if MAP_pos >= MAP_neg:\r\n retval.append(1)\r\n else:\r\n retval.append(0)\r\n\r\n return retval", "def TagWithViterbi(_out, _file, _model, _emission_df, _transition_df, _2nd_order_df=None):\n\n # Generate array for possible words\n word_bag = _model.x_y_count\n reader = open(_file, 'r', encoding='utf-8')\n\n # Generate array of arrays for sentences in document\n unlabelled_tweets = []\n temp_data = []\n for line in reader:\n word = line.strip()\n word = word.lower()\n if word == \"\":\n if temp_data: # catch any multiple line breaks\n unlabelled_tweets.append(temp_data)\n temp_data = []\n else:\n temp_data.append(word)\n unlabelled_tweets.append(temp_data)\n\n # Keep a global array of array of results for final\n # most likely states\n results = []\n\n # execute viterbi for each sentence\n for sentence in unlabelled_tweets:\n parsed_sentence = []\n # parse and replace unknowns with #UNK#\n for i in range(len(sentence)):\n if sentence[i] in word_bag:\n parsed_sentence.append(sentence[i])\n else:\n parsed_sentence.append('#UNK#')\n if _2nd_order_df is None:\n result = Viterbi(parsed_sentence, _model, _emission_df, _transition_df)\n else:\n result = Modified_Viterbi(parsed_sentence, _model, _emission_df, _transition_df, _2nd_order_df)\n\n results.append(result)\n\n # write results array into generated file\n writer = open(_out, 'w', encoding='utf-8')\n for i in range(len(unlabelled_tweets)):\n for j in range(len(unlabelled_tweets[i])):\n tweet = unlabelled_tweets[i][j]\n sentiment = results[i][j]\n writer.write('{} {}\\n'.format(tweet, sentiment))\n writer.write('\\n') # empty line denoting end of tweet sentence\n writer.close()\n reader.close()", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, org_dev_labels):\r\n\r\n # set to false to use bigram implementation instead\r\n # isUnigram = True\r\n isUnigram = True\r\n\r\n # return predicted labels of development set\r\n spam_words, spam_wordcount = parseIntoWordList(train_set, train_labels, 1)\r\n ham_words, ham_wordcount = parseIntoWordList(train_set, train_labels, 0)\r\n\r\n spamWords, spamProbs, spamUNK = createProbabilitiesList(spam_words, spam_wordcount, smoothing_parameter)\r\n hamWords, hamProbs, hamUNK = createProbabilitiesList(ham_words, ham_wordcount, smoothing_parameter)\r\n\r\n loggedSpam = np.log(spamProbs)\r\n loggedSpamUNK = np.log(spamUNK)\r\n loggedHam = np.log(hamProbs)\r\n loggedHamUNK = np.log(hamUNK)\r\n\r\n # Unigram\r\n dev_spam = []\r\n dev_ham = []\r\n\r\n dev_labels = []\r\n\r\n if isUnigram:\r\n for i in range(len(dev_set)):\r\n probSpam = 0\r\n probHam = 0\r\n\r\n for word in dev_set[i]:\r\n if word in spamWords:\r\n index = spamWords.index(word)\r\n probSpam += loggedSpam[index]\r\n else:\r\n probSpam += loggedSpamUNK\r\n\r\n if word in hamWords:\r\n index = hamWords.index(word)\r\n probHam += loggedHam[index]\r\n else:\r\n probHam += loggedHamUNK\r\n\r\n if (probSpam > probHam):\r\n dev_labels.append(1)\r\n else:\r\n dev_labels.append(0)\r\n\r\n else:\r\n for i in range(len(dev_set)):\r\n probSpam = 0\r\n probHam = 0\r\n\r\n for word in dev_set[i]:\r\n if word in spamWords:\r\n index = spamWords.index(word)\r\n probSpam += loggedSpam[index]\r\n else:\r\n probSpam += loggedSpamUNK\r\n\r\n if word in hamWords:\r\n index = hamWords.index(word)\r\n probHam += loggedHam[index]\r\n else:\r\n probHam += loggedHamUNK\r\n dev_spam.append(probSpam)\r\n dev_ham.append(probHam)\r\n # BiGram\r\n bi_spam_words, bi_spam_count = parseIntoBigramList(train_set, train_labels, 1)\r\n bi_ham_words, bi_ham_count = parseIntoBigramList(train_set, train_labels, 0)\r\n\r\n biSpamWords, biSpamProbs, biSpamUNK = createProbabilitiesList(bi_spam_words, bi_spam_count, smoothing_parameter)\r\n biHamWords, biHamProbs, biHamUNK = createProbabilitiesList(bi_ham_words, bi_ham_count, smoothing_parameter)\r\n\r\n biLoggedSpam = np.log(biSpamProbs)\r\n biLoggedSpamUNK = np.log(biSpamUNK)\r\n biLoggedHam = np.log(biHamProbs)\r\n biLoggedHamUNK = np.log(biHamUNK)\r\n\r\n # Bigram\r\n bi_dev_spam = []\r\n bi_dev_ham = []\r\n\r\n for i in range(len(dev_set)):\r\n biProbSpam = 0\r\n biProbHam = 0\r\n curr_email = dev_set[i]\r\n\r\n for j in range(len(curr_email) - 1):\r\n if (j % 2 == 1):\r\n continue\r\n curr_bigram = curr_email[j] + ' ' + curr_email[j + 1]\r\n\r\n if curr_bigram in biSpamWords:\r\n index = biSpamWords.index(curr_bigram)\r\n biProbSpam += biLoggedSpam[index]\r\n else:\r\n biProbSpam += biLoggedSpamUNK\r\n\r\n if curr_bigram in biHamWords:\r\n index = biHamWords.index(curr_bigram)\r\n biProbHam += biLoggedHam[index]\r\n else:\r\n probHam += biLoggedHamUNK\r\n bi_dev_spam.append(probSpam)\r\n bi_dev_ham.append(probHam)\r\n\r\n # Weights the models (1-lambda) multiplier for unigram and lamba multiplier for bigram\r\n dev_labels = getBigram(bi_dev_ham, bi_dev_spam, dev_ham, dev_set, dev_spam, org_dev_labels)\r\n\r\n return dev_labels", "def viterbi_bigrams(transition_probabilities, label_matches, prev_tag, word, tag_possibilities):\r\n\tmax_prob = 0\r\n\tbest_tag = \"\"\r\n\ttag_counts = get_tag_counts(label_matches)\r\n\tfor tag in tag_possibilities:\r\n\t\temissions_probability = get_emissions_probability(label_matches, tag, word, tag_counts)\r\n\t\ttag_bigram = (prev_tag,tag)\r\n\t\ttransition_probability = transition_probabilities.get(tag_bigram, 0.000027)\r\n\t\tprob = emissions_probability * transition_probability\r\n\t\tif prob > max_prob:\r\n\t\t\tmax_prob = prob\r\n\t\t\tbest_tag = tag\r\n\tif best_tag == \"\":\r\n\t\tbest_tag = \"o\"\r\n\treturn best_tag", "def evaluate_ngrams(eval_dataset, trigram_counts, bigram_counts, unigram_counts, train_token_count, lambda1, lambda2):\n perplexity = 0\n ### YOUR CODE HERE\n raise NotImplementedError\n ### END YOUR CODE\n return perplexity", "def process_sentence_bigrams(sentence, base_index, label_matches, label_index_dict, transition_probabilities):\r\n\t\"New line!\"\r\n\tsentence_process_index = 0\r\n\tlabel = ''\r\n\tprev_tag = \"<START>\"\r\n\tknown_words = get_known_words(label_matches)\r\n\ttag_options = get_tag_options(label_matches)\r\n\tprev_is_valid = False\r\n\tcurr_is_valid = False\r\n\tstarting_index = 0\r\n\tending_index = starting_index\r\n\twhile sentence_process_index < len(sentence):\r\n\t\tword = sentence[sentence_process_index]\r\n\t\tif word in known_words:\r\n\t\t\tlookup_word = word\r\n\t\telse:\r\n\t\t\tlookup_word = \"<UNK>\"\r\n\t\tbest_tag = viterbi_bigrams(transition_probabilities, label_matches, prev_tag, lookup_word, tag_options)\r\n\t\tcurr_is_valid = (best_tag != 'o') and (len(best_tag)>2)\r\n\t\tif prev_is_valid and curr_is_valid and (best_tag[2:] != prev_tag[2:]):\r\n\t\t\tlabel_index_dict = add_to_label_index_dict(prev_tag[2:], base_index+starting_index, base_index+ending_index, label_index_dict)\r\n\t\t\tstarting_index = sentence_process_index\r\n\t\t\tending_index = starting_index\r\n\t\telif curr_is_valid and prev_is_valid and (best_tag[2:] == prev_tag[2:]):\r\n\t\t\tending_index +=1\r\n\t\telif curr_is_valid:\r\n\t\t\tstarting_index = sentence_process_index\r\n\t\t\tending_index = starting_index\r\n\t\telif prev_is_valid:\r\n\t\t\tlabel_index_dict = add_to_label_index_dict(prev_tag[2:], base_index+starting_index, base_index+ending_index, label_index_dict)\r\n\t\t\tstarting_index = sentence_process_index\r\n\t\t\tending_index = starting_index\r\n\t\tprev_tag = best_tag\r\n\t\tprev_is_valid = curr_is_valid\r\n\t\tsentence_process_index +=1\r\n\treturn label_index_dict", "def test_rnnslu(**kwargs):\n # process input arguments\n param = {\n 'fold': 3,\n 'lr': 0.1,\n 'verbose': True,\n 'decay': False,\n 'win': 3,\n 'nhidden': 300,\n 'seed': 345,\n 'emb_dimension': 50,\n 'nepochs': 60,\n 'normal': False,\n 'folder':'../result',\n 'longdependence':None,\n 'optimization':'Adagrad'\n }\n param_diff = set(kwargs.keys()) - set(param.keys())\n if param_diff:\n raise KeyError(\"invalid arguments:\" + str(tuple(param_diff)))\n param.update(kwargs)\n\n if param['verbose']:\n for k,v in param.items():\n print(\"%s: %s\" % (k,v))\n\n # create result folder if not exists\n check_dir(param['folder'])\n\n # load the dataset\n print('... loading the dataset')\n train_set, valid_set, test_set, dic = load_data(param['fold'])\n\n # create mapping from index to label, and index to word\n idx2label = dict((k, v) for v, k in dic['labels2idx'].items()) # change label2index - index2label\n idx2word = dict((k, v) for v, k in dic['words2idx'].items()) # change words2index - index2words\n\n # unpack dataset\n train_lex, train_ne, train_y = train_set\n valid_lex, valid_ne, valid_y = valid_set\n test_lex, test_ne, test_y = test_set \n\n train_lex = train_lex + test_lex\n train_y = train_y + test_y\n train_ne = train_ne + test_ne\n\n vocsize = len(dic['words2idx']) # # of words\n nclasses = len(dic['labels2idx']) # # of classes \n nsentences = len(train_lex) # # training sample [a batch is all the words in a sentence]\n\n ## get the label for (input,output) for test and valid set \n groundtruth_valid = [map(lambda x: idx2label[x], y) for y in valid_y]\n words_valid = [map(lambda x: idx2word[x], w) for w in valid_lex]\n\n # instanciate the model\n numpy.random.seed(param['seed'])\n random.seed(param['seed'])\n \n\n print('... building the model')\n lstm = LSTM(\n nh=param['nhidden'],\n nc=nclasses,\n ne=vocsize,\n de=param['emb_dimension'],\n cs=param['win'],\n normal=param['normal'],\n longdependence = param['longdependence'],\n optimization = param['optimization']\n )\n\n ## build the model for mini-batch\n # train with early stopping on validation set\n print('... training')\n best_f1 = -numpy.inf\n param['clr'] = param['lr']\n \n for epoch in range(param['nepochs']):\n\n param['ce'] = epoch\n tic = timeit.default_timer()\n print('epoch %i out of %i' %(epoch,param['nepochs']) )\n \n for i, (x, y) in enumerate(zip(train_lex, train_y)):\n input_length = len(x)\n lstm.train(x, y, param['win'], param['clr'])\n print('[learning] epoch %i >> %2.2f%%' % (\n epoch, (i + 1) * 100. / nsentences), end=' ')\n print('completed in %.2f (sec) <<\\r' % (timeit.default_timer() - tic), end='')\n\n # evaluation // back into the real world : idx -> words\n predictions_valid = [map(lambda x: idx2label[x],\n lstm.classify(numpy.asarray(\n contextwin(x, param['win'])).astype('int32')))\n for x in valid_lex]\n\n # evaluation // compute the accuracy using conlleval.pl\n res_valid = conlleval(predictions_valid,\n groundtruth_valid,\n words_valid,\n param['folder'] + '/current.valid.txt',\n param['folder'])\n\n if res_valid['f1'] > best_f1:\n\n best_f1 = res_valid['f1']\n\n if param['verbose']:\n print('NEW BEST: epoch', epoch,\n 'best test F1', res_valid['f1'])\n\n param['tf1'] = res_valid['f1']\n param['tp'] = res_valid['p']\n param['tr'] = res_valid['r']\n param['be'] = epoch\n else:\n if param['verbose']:\n print('')\n\n # learning rate decay if no improvement in 10 epochs\n if param['decay'] and abs(param['be']-param['ce']) >= 10:\n param['clr'] *= 0.5\n\n if param['clr'] < 1e-5:\n break\n \n\n print('BEST RESULT: epoch', param['be'],\n 'best test F1', param['tf1'],\n 'with the model', param['folder'])\n \n return lstm", "def performClassification(ngram, df, mode = None, split = 0.9):\r\n \r\n if type(mode) == str:\r\n X = df[mode.capitalize()]\r\n else:\r\n X = df.Features\r\n \r\n y = df.Label\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, train_size = split)\r\n \r\n vect = CountVectorizer(analyzer='word', ngram_range=(ngram,ngram))\r\n \r\n X_train_dtm = vect.fit_transform(X_train)\r\n X_test_dtm = vect.transform(X_test)\r\n \r\n nb = MultinomialNB()\r\n svm = LinearSVC(random_state = 1)\r\n\r\n nb.fit(X_train_dtm, y_train)\r\n svm.fit(X_train_dtm, y_train)\r\n\r\n nb_pred_class = nb.predict(X_test_dtm)\r\n svm_pred_class = svm.predict(X_test_dtm)\r\n\r\n nb_accuracy = metrics.accuracy_score(y_test, nb_pred_class)\r\n nb_precision = metrics.precision_score(y_test, nb_pred_class, pos_label='negative')\r\n nb_recall = metrics.recall_score(y_test, nb_pred_class, pos_label='negative')\r\n nb_f1 = metrics.f1_score(y_test, nb_pred_class, pos_label='negative')\r\n\r\n svm_accuracy = metrics.accuracy_score(y_test, svm_pred_class)\r\n svm_precision = metrics.precision_score(y_test, svm_pred_class, pos_label='negative')\r\n svm_recall = metrics.recall_score(y_test, svm_pred_class, pos_label='negative')\r\n svm_f1 = metrics.f1_score(y_test, svm_pred_class, pos_label='negative')\r\n\r\n print('=====Naive Bayes===== \\t =====Linear SVC=====' )\r\n print('Accuracy score \\t\\t Accuracy score')\r\n print(round((nb_accuracy * 100), 1), '\\t\\t\\t', round((svm_accuracy * 100), 1), '\\n')\r\n print('Precision \\t\\t Precision')\r\n print(round((nb_precision * 100), 1), '\\t\\t\\t', round((svm_precision * 100), 1), '\\n')\r\n print('Recall \\t\\t\\t Recall')\r\n print(round((nb_recall * 100), 1), '\\t\\t\\t', round((svm_recall * 100), 1), '\\n')\r\n print('F1-score \\t\\t F1-score')\r\n print(round((nb_f1 * 100), 1), '\\t\\t\\t', round((svm_f1 * 100), 1))", "def train():\n counts = {size: dict() for size in NGRAM_SIZES}\n for word in tqdm.tqdm(word_iterator(\"resources/datasets\")):\n if word == \"\":\n continue\n for size in NGRAM_SIZES:\n for token in ngrams(word, 2 * size):\n left, right = token[:size], token[size:]\n counts[size].setdefault(left, dict())\n counts[size][left].setdefault(right, 0)\n counts[size][left][right] += 1\n model = {size: dict() for size in NGRAM_SIZES}\n for size in NGRAM_SIZES:\n for left in counts[size]:\n total = sum(counts[size][left].values())\n model[size][left] = dict()\n for right in counts[size][left]:\n model[size][left][right] = math.log(\n counts[size][left][right] / total)\n with open(MODEL_FILENAME, \"wb\") as file:\n pickle.dump(model, file)", "def evaluate_ngrams(eval_dataset, trigram_counts, bigram_counts, unigram_counts, train_token_count, lambda1, lambda2):\n perplexity = 0\n\n ### YOUR CODE HERE\n def calc_prob(sentense, i, word, trigram_counts, bigram_counts, unigram_counts, train_token_count, model):\n prob = 0.0\n prev_word = sentense[i - 1]\n prev_to_prev_word = sentense[i - 2]\n\n if model == \"unigram\":\n if word in unigram_counts:\n prob = (unigram_counts[word] + 0.0) / train_token_count\n else:\n prob = (unigram_counts[word_to_num['UUUNKKK']] + 0.0) / \\\n train_token_count\n\n if model == \"bigram\":\n if (prev_word, word) in bigram_counts:\n prob = (bigram_counts[(prev_word, word)] + 0.0) / \\\n unigram_counts[prev_word]\n # print(num_to_word[prev_word] ,num_to_word[word])\n # print(bigram_counts[(prev_word, word)])\n # print(unigram_counts[prev_word])\n # print(\"---------------------------\")\n else:\n prob = 0.0\n\n if model == \"trigram\":\n if (prev_to_prev_word, prev_word, word) in trigram_counts:\n prob = (trigram_counts[(prev_to_prev_word, prev_word, word)] + 0.0) \\\n / bigram_counts[(prev_to_prev_word, prev_word)]\n # / bigram_counts[(prev_word, word)] #this according to lecture notes slide 27\n else:\n prob = 0.0\n\n return prob\n\n l = 0\n num_of_words = 0\n\n ##########3\n better_than_chance = 0\n ###########\n\n for sentense in eval_dataset:\n for i, word in enumerate(sentense[2:]):\n num_of_words += 1\n prob = lambda1 * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts, unigram_counts,\n train_token_count, \"trigram\") + \\\n lambda2 * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts, unigram_counts,\n train_token_count, \"bigram\") + \\\n (1 - lambda1 - lambda2) * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts,\n unigram_counts, train_token_count, \"unigram\")\n ######################################\n if prob > (1.0 / vocabsize):\n better_than_chance += 1\n #########################\n l += np.log2(prob)\n l /= num_of_words\n perplexity = 2 ** -l\n\n print(\"better_than_chance:\", (better_than_chance + 0.0) / num_of_words)\n\n ### END YOUR CODE\n return perplexity", "def svm():", "def viterbi_trigrams(transition_probabilities, label_matches, prev_tag, prev_prev_tag, word, tag_possibilities):\r\n\tmax_prob = 0\r\n\tbest_tag = \"\"\r\n\ttag_counts = get_tag_counts(label_matches)\r\n\tfor tag in tag_possibilities:\r\n\t\temissions_probability = get_emissions_probability(label_matches, tag, word, tag_counts)\r\n\t\ttag_trigram = (prev_prev_tag, prev_tag,tag)\r\n\t\ttransition_probability = transition_probabilities.get(tag_trigram, 0.000027)\r\n\t\tprob = emissions_probability * transition_probability\r\n\t\tif prob > max_prob:\r\n\t\t\tmax_prob = prob\r\n\t\t\tbest_tag = tag\r\n\tif best_tag == \"\":\r\n\t\tbest_tag = \"o\"\r\n\treturn best_tag", "def __init__(self, text_train, labels_train):\n self.vectorizer = CountVectorizer(stop_words='english')\n self.features = self.vectorizer.fit_transform(text_train)\n\n self.nbc = MultinomialNB().fit(self.features, labels_train)", "def Viterbi(self, sent):\n viterbi = defaultdict(dict)\n backpointer = defaultdict(dict)\n sent_tag = []\n pos_list = [end_token]\n viterbi['0'] = 1.0\n\n # Initialization step\n # This loop will run for all the tags of each first word (sent[1][0])(word next to <S>) in dictionary\n for tag in self.dictionary[sent[1][0]]:\n # if any sentance in our trained data starts with a word that has same tag as \"state\"\n if (start_token, tag) in self.transitions:\n viterbi[str(1)][tag] = self.transitions[(start_token, tag)] + self.emissions[(sent[1][0], tag)]\n else:\n viterbi[str(1)][tag] = -float('inf')\n backpointer[str(1)][tag] = start_token\n\n # Recursion step\n # This loop will run for rest of the tuples (word, pos) after first tuple in \"sent\"\n for i in xrange(2, len(sent)):\n # This loop will run for all the tags of each word (sent[idx][0]) in dictionary\n for tag in self.dictionary[sent[i][0]]:\n maximum_value = -float(\"inf\")\n maximum_loc = []\n # This loop will run for all the tags in previous word (sent[idx-1][0]) in dictionary\n for prev_tag in self.dictionary[sent[i - 1][0]]:\n # if any sentance in our trained data has (privious tag, current tag) or (pre_state, state) of given word\n if (prev_tag, tag) in self.transitions:\n t = viterbi[str(i - 1)][prev_tag] + self.transitions[(prev_tag, tag)]\n else:\n t = -float('inf')\n if t >= maximum_value:\n maximum_value = t\n maximum_loc = prev_tag\n\n viterbi[str(i)][tag] = maximum_value + self.emissions[(sent[i][0], tag)]\n backpointer[str(i)][tag] = maximum_loc\n\n t = end_token\n for i in xrange(1, len(sent)):\n t = backpointer[str(len(sent) - i)][t]\n pos_list.append(t)\n\n for tup in sent:\n sent_tag.append((tup[0], pos_list.pop()))\n\n #print \"viterbi:\", viterbi\n #print \"backpointer:\", backpointer\n #print \"sent_tagged\", sent_tag\n\n return sent_tag", "def __init__(self, n, sents, corpus='', beta=None, addone=True):\n self.n = n\n self.beta = beta\n self.corpus = corpus\n self.beta_flag = True\n self.addone = addone\n self.smoothingtechnique = 'Back Off (Katz) with Discounting Smoothing'\n self.counts = counts = defaultdict(int)\n self.A_set = defaultdict(set)\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = set(voc)\n if beta is None:\n self.beta_flag = False\n\n # if no beta given, we compute it\n if not self.beta_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent por training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n for sent in train_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(train_sents)\n counts[('</s>',)] = len(train_sents)\n\n self.tocounts = counts\n # search for the beta that gives lower perplexity\n beta_candidates = [i*0.1 for i in range(1, 10)]\n # xs is a list with (beta, perplexity)\n xs = []\n self.sents = train_sents\n for aux_beta in beta_candidates:\n self.beta = aux_beta\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_beta, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.beta = xs[0][0]\n with open('old-stuff/backoff_'+str(n)+'_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Beta: {}\\n'.format(self.beta))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n else:\n sents = list(map((lambda x: x + ['</s>']), sents))\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n\n for sent in sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(sents)\n counts[('</s>',)] = len(sents)", "def __init__(self, sents, n, corpus='', D=None):\n\n self.n = n\n self.D = D\n self.corpus = corpus\n self.smoothingtechnique = 'Kneser Ney Smoothing'\n # N1+(·w_<i+1>)\n self._N_dot_tokens_dict = N_dot_tokens = defaultdict(set)\n # N1+(w^<n-1> ·)\n self._N_tokens_dot_dict = N_tokens_dot = defaultdict(set)\n # N1+(· w^<i-1>_<i-n+1> ·)\n self._N_dot_tokens_dot_dict = N_dot_tokens_dot = defaultdict(set)\n self.counts = counts = defaultdict(int)\n vocabulary = []\n\n if D is None:\n total_sents = len(sents)\n k = int(total_sents*9/10)\n training_sents = sents[:k]\n held_out_sents = sents[k:]\n training_sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], training_sents))\n for sent in training_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n - 1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n D_candidates = [i*0.12 for i in range(1, 9)]\n xs = []\n for D in D_candidates:\n self.D = D\n aux_perplexity = self.perplexity(held_out_sents)\n xs.append((D, aux_perplexity))\n xs.sort(key=lambda x: x[1])\n self.D = xs[0][0]\n with open('old-stuff/kneserney_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('D: {}\\n'.format(self.D))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n # discount value D provided\n else:\n sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], sents))\n for sent in sents:\n for j in range(n+1):\n # all k-grams for 0 <= k <= n\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n # e.g., ngram = (1,2,3,4,5,6,7,8)\n # right_token = (8,)\n # left_token = (1,)\n # right_kgram = (2,3,4,5,6,7,8)\n # left_kgram = (1,2,3,4,5,6,7)\n # middle_kgram = (2,3,4,5,6,7)\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n-1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n\n xs = [k for k, v in counts.items() if v == 1 and n == len(k)]\n ys = [k for k, v in counts.items() if v == 2 and n == len(k)]\n n1 = len(xs)\n n2 = len(ys)\n self.D = n1 / (n1 + 2 * n2)", "def learn(self,n):\n for i in range(n):\n self.class_counts,self.feature_counts = self.em_step(self.class_counts,\n self.feature_counts)", "def viterbi_segment(text, P):\n # best[i] = best probability for text[0:i]\n # words[i] = best word ending at position i\n n = len(text)\n words = [''] + list(text)\n best = [1.0] + [0.0] * n\n\n # Fill in the vectors best, words via dynamic programming\n for i in range(n + 1):\n for j in range(0, i):\n w = text[j:i]\n if P[w] * best[i - len(w)] >= best[i]:\n best[i] = P[w] * best[i - len(w)]\n words[i] = w\n\n # Now recover the sequence of best words\n sequence = [];\n i = len(words) - 1\n while i > 0:\n sequence[0:0] = [words[i]]\n i = i - len(words[i])\n\n # Return sequence of best words and overall probability\n return sequence, best[-1]", "def my_impl_variational(in_train, in_test, labels):\n X_train = []\n X_test = []\n for lab in labels:\n for datum in in_train[lab]:\n X_train.append([datum, lab])\n for datum in in_test[lab]:\n X_test.append([datum, lab])\n Variationer_learn(X_train, 500, 1, 0.01, X_test, labels)", "def __init__(self, N, tokens):\n\n self.N = N\n ngrams = self.create_ngrams(tokens)\n\n self.ngrams = ngrams\n self.mdl = self.train(ngrams)\n\n if N < 2:\n raise Exception('N must be greater than 1')\n elif N == 2:\n self.prev_mdl = UnigramLM(tokens)\n else:\n mdl = NGramLM(N-1, tokens)\n self.prev_mdl = mdl", "def train_lvq(numcbv, run_len_mult = 40, fpath = \"datasets\\\\lvq\"):\n # Number of iterations recommended by Kohonen is 40 times the number of codebook vectors\n runlen = run_len_mult * numcbv\n \n #run length for 'sammon'. Doesn't affect learning. May not be necessary.\n #runlen2 = 100\n \n #codebook size 40 will create files \"lvq/c40e.cod\", \"lvq/c40o.sam\" etc.\n cb = \"lvq\\\\c\" + str(numcbv)\n train = fpath + \"_train.txt\"\n test = fpath + \"_test.txt\"\n\n # Little lambdas just to help with readability below.\n cmd = lambda X: \"binaries_windows\\\\\"+X+\".exe\"\n din = lambda X: \" -din \" + str(X)\n cout = lambda X: \" -cout \" + str(X) \n cin = lambda X: \" -cin \" + str(X)\n rlen = lambda X: \" -rlen \" + str(X)\n noc = lambda X: \" -noc \" + str(X)\n cfout = lambda X: \" -cfout \" + str(X) \n \n # Initialize LVQ with even codebooks per class\n check_call(cmd(\"eveninit\") + din(train) + cout(cb + \"e.cod\") + noc(numcbv) )\n \n # Balance codebooks. Optional.\n check_call(cmd(\"balance\") + din(train) + cin(cb + \"e.cod\") + cout(cb + \"b.cod\") )\n \n #Codebook Training\n check_call(cmd(\"olvq1\") + din(train) + cin(cb + \"b.cod\") + cout(cb + \"o.cod\") + rlen(runlen) )\n \n # Compute accuracy for training and testing set.\n check_call(cmd(\"accuracy\") + din(train) + cin(cb + \"o.cod\") + cfout(cb + \"_train.cfo\") )\n check_call(cmd(\"accuracy\") + din(test) + cin(cb + \"o.cod\") + cfout(cb + \"_test.cfo\") )\n \n #Optional. Slow.\n #call(cmd(\"sammon\") + cin(cb + \"o.cod\") + cout(cb + \"o.sam\") + rlen(runlen2) )", "def data_mining_features(index,input_string_x1,input_string_x2,vocab_word2index,word_vec_fasttext_dict,word_vec_word2vec_dict,tfidf_dict,n_gram=8):\r\n input_string_x1=input_string_x1.decode(\"utf-8\")\r\n input_string_x2 = input_string_x2.decode(\"utf-8\")\r\n #1. get blue score vector\r\n feature_list=[]\r\n #get blue score with n-gram\r\n for i in range(n_gram):\r\n x1_list=split_string_as_list_by_ngram(input_string_x1,i+1)\r\n x2_list = split_string_as_list_by_ngram(input_string_x2, i + 1)\r\n blue_score_i_1 = compute_blue_ngram(x1_list,x2_list)\r\n blue_score_i_2 = compute_blue_ngram(x2_list,x1_list)\r\n feature_list.append(blue_score_i_1)\r\n feature_list.append(blue_score_i_2)\r\n\r\n #2. get length of questions, difference of length\r\n length1=float(len(input_string_x1))\r\n length2=float(len(input_string_x2))\r\n length_diff=(float(abs(length1-length2)))/((length1+length2)/2.0)\r\n feature_list.append(length_diff)\r\n\r\n #3. how many words are same, how many words are unique\r\n sentence_diff_overlap_features_list=get_sentence_diff_overlap_pert(index,input_string_x1,input_string_x2)\r\n feature_list.extend(sentence_diff_overlap_features_list)\r\n\r\n #4. question 1,2 start with how/why/when\r\n #how_why_feature_list=get_special_start_token(input_string_x1,input_string_x2,special_start_token)\r\n #print(\"how_why_feature_list:\",how_why_feature_list)\r\n #feature_list.extend(how_why_feature_list)\r\n\r\n #5.edit distance\r\n edit_distance=float(edit(input_string_x1, input_string_x2))/30.0\r\n feature_list.append(edit_distance)\r\n\r\n #6.cos distance from sentence embedding\r\n x1_list=token_string_as_list(input_string_x1, tokenize_style='word')\r\n x2_list = token_string_as_list(input_string_x2, tokenize_style='word')\r\n distance_list_fasttext = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict)\r\n distance_list_word2vec = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_word2vec_dict, tfidf_dict)\r\n #distance_list2 = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict,tfidf_flag=False)\r\n #sentence_diffence=np.abs(np.subtract(sentence_vec_1,sentence_vec_2))\r\n #sentence_multiply=np.multiply(sentence_vec_1,sentence_vec_2)\r\n feature_list.extend(distance_list_fasttext)\r\n feature_list.extend(distance_list_word2vec)\r\n #feature_list.extend(list(sentence_diffence))\r\n #feature_list.extend(list(sentence_multiply))\r\n return feature_list", "def dynamic_programming_on_trellis(self, instance, run_forward_alg=True):\n \"\"\"Build a trellis of length of all tags +1 for tag|START x length of the instance sequence of probabilities.\n Backtrace matrix has the same dimensions but stores corresponding codes for states.\n First, I initialize the probabilities for each state at t=0 as simply emission probability*transition probability.\n backtrace_pointers at t=0 are already initialized to 0.\n Loop through time step starting from t=1 and through states and either sum up (Forward) or get max (Viterbi) of the product of the previous step and emission probability and transition probability for all states (for each another loop through states is needed).\n In Viterbi, the code(index) to the most probable state is added at the same time.\n \"\"\"\n #TODO:Initialize trellis and backtrace pointers\n trellis = numpy.zeros((len(self.labels)+1,len(instance.data)))\n backtrace_pointers = numpy.zeros(shape=(len(self.labels)+1,len(instance.data)), dtype=int)\n #TODO:Traverse through the trellis here\n if run_forward_alg == True:\n for i in xrange(len(self.labels)):\n if instance.data[0] in self.V:\n trellis[i][0] = self.transition_matrix[len(self.labels)][i]*self.emission_matrix[self.V.index(instance.data[0])][i]\n else:\n trellis[i][0] = self.transition_matrix[len(self.labels)][i]*self.emission_matrix[self.V.index('<UNK>')][i]\n for i in xrange(len(self.labels)):\n for j in xrange(1,len(instance.data)):\n emission_prob = 0\n if instance.data[j] in self.V:\n emission_prob = self.emission_matrix[self.V.index(instance.data[j])][i]\n else:\n emission_prob = self.emission_matrix[self.V.index('<UNK>')][i]\n for k in xrange(1, len(self.labels)):\n trellis[i][j] += trellis[k][j-1] * self.transition_matrix[k][i] * emission_prob\n else:\n for i in xrange(len(self.labels)):\n if instance.data[0] in self.V:\n trellis[i][0] = self.transition_matrix[len(self.labels)][i]*self.emission_matrix[self.V.index(instance.data[0])][i]\n else:\n trellis[i][0] = self.transition_matrix[len(self.labels)][i]*self.emission_matrix[self.V.index('<UNK>')][i]\n for j in xrange(1,len(instance.data)):\n for i in xrange(len(self.labels)):\n emission_prob = 0\n if instance.data[j] in self.V:\n emission_prob = self.emission_matrix[self.V.index(instance.data[j])][i]\n else:\n emission_prob = self.emission_matrix[self.V.index('<UNK>')][i]\n max_prob = trellis[0][j-1] * self.transition_matrix[0][i] * emission_prob\n max_index = 0\n for k in xrange(1, len(self.labels)):\n prob = trellis[k][j-1] * self.transition_matrix[k][i] * emission_prob\n if prob > max_prob:\n max_prob = prob\n max_index = k\n trellis[i][j] = max_prob\n backtrace_pointers[i][j] = max_index\n return (trellis, backtrace_pointers)", "def model(self):\n filePath = self.config['data_path']['train_data']\n data = self.loadCSV(filePath)\n cleandata = self.preprocess(data)\n X, y = self.dataSplit(cleandata)\n X = self.CountVect(X, self.config['transform_path']['transform_model_path'])\n X_train, X_test, y_train, y_test = self.TrainTestSplit(X, y)\n self.MultinomialNB(X_train, X_test, y_train, y_test, self.config['nlp_path']['model_path'])", "def nnObjFunction(params, *args):\r\n\r\n n_input, n_hidden, n_class, training_data, training_label, lambdaval = args\r\n \r\n \"\"\"translates label vector of digits 0-9 into 1-K form\"\"\"\r\n count=0\r\n label10=np.zeros((training_label.shape[0],10))\r\n for x in training_label:\r\n if(x==0):\r\n label10[count]=[1,0,0,0,0,0,0,0,0,0]\r\n elif(x==1):\r\n label10[count]=[0,1,0,0,0,0,0,0,0,0]\r\n elif(x==2):\r\n label10[count]=[0,0,1,0,0,0,0,0,0,0]\r\n elif(x==3):\r\n label10[count]=[0,0,0,1,0,0,0,0,0,0]\r\n elif(x==4):\r\n label10[count]=[0,0,0,0,1,0,0,0,0,0]\r\n elif(x==5):\r\n label10[count]=[0,0,0,0,0,1,0,0,0,0]\r\n elif(x==6):\r\n label10[count]=[0,0,0,0,0,0,1,0,0,0]\r\n elif(x==7):\r\n label10[count]=[0,0,0,0,0,0,0,1,0,0]\r\n elif(x==8):\r\n label10[count]=[0,0,0,0,0,0,0,0,1,0]\r\n else:\r\n label10[count]=[0,0,0,0,0,0,0,0,0,1]\r\n count+=1\r\n \r\n w1 = params[:n_hidden * (n_input+1)].reshape((n_hidden, (n_input+1)))\r\n w2 = params[(n_hidden * (n_input+1)):].reshape((n_class, (n_hidden+1)))\r\n obj_val = 0 \r\n \r\n print('in nnobj')\r\n\r\n #Get bias dimension\r\n bias_dimension = training_data.shape[0]\r\n\r\n #Fill it all with ones\r\n bias = np.ones((bias_dimension,1))\r\n\r\n #Add bias to weights \r\n training_data_with_bias = np.concatenate((training_data,bias),1)\r\n\r\n #Feed Foward Start By Multiplying Training data by weights of w1\r\n z2 = np.dot(training_data_with_bias,np.transpose(w1))\r\n\r\n #Apply Sigmoid function\r\n a2= sigmoid(z2)\r\n #Apply Another Bias Dimension to the new matrix\r\n\r\n #bias_dimension = a2.shape[0]\r\n bias = np.ones((bias_dimension,1))\r\n a2_bias= np.concatenate((a2,bias),1)\r\n\r\n #Multiply new matrix by the weights of w2\r\n z3 = np.dot(a2_bias,np.transpose(w2))\r\n \r\n #Apply Sigmoid Function to the new data\r\n y= sigmoid(z3)\r\n\r\n #yl-ol (element of equation (9))\r\n dif= label10-y\r\n \r\n #1-ol (element of equation (9))\r\n dif2= 1-y\r\n\r\n # Finish Forward Propagation\r\n \r\n #equation (15)\r\n obj_val = ((lambdaval/(2*y.shape[0]))*(np.sum(np.square(w1))+np.sum(np.square(w2))))+(np.sum(.5*np.sum(np.square(dif),axis=1))/y.shape[0])\r\n \r\n #column vector, equation (9)\r\n elem1=np.transpose(np.array(-1*dif*dif2*y,ndmin=2))\r\n\r\n #w2 matrix with bias cut out\r\n w2trim= np.delete(w2,w2.shape[1]-1,1)\r\n\r\n #equation (12) without multiplying the xi term yet\r\n elem2=(-1*(1-a2)*(a2))*(np.dot((dif*dif2*y),w2trim))\r\n\r\n#summing up the inner part of equation (17)\r\n total=np.zeros_like(w1)\r\n for x in range(0,y.shape[0]):\r\n total+=np.dot(np.transpose(np.array(elem2[x],ndmin=2)),np.array(training_data_with_bias[x],ndmin=2))\r\n\r\n #equation (17)\r\n grad_w1 = (total+(lambdaval*w1))/y.shape[0]\r\n\r\n #equation (16)\r\n grad_w2 = (np.dot(elem1,a2_bias)+(lambdaval*w2))/y.shape[0]\r\n\r\n \r\n \r\n \r\n #Make sure you reshape the gradient matrices to a 1D array. for instance if your gradient matrices are grad_w1 and grad_w2\r\n #you would use code similar to the one below to create a flat array\r\n obj_grad = np.concatenate((grad_w1.flatten(), grad_w2.flatten()),0)\r\n print (obj_val)\r\n return (obj_val,obj_grad)", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\n # TODO: Write your code here\n # Bag of words model\n smoothing_parameter = .007 # override smoothing parameter\n posWordBag = {}\n negWordBag = {}\n for reviewIndex, review in enumerate(train_set):\n for word in review:\n if isWordTooCommon(word) == True: # If word is too common, skip the word\n continue\n else:\n if train_labels[reviewIndex] == 1: # Positive\n if word not in posWordBag.keys():\n posWordBag[word] = 1\n else:\n posWordBag[word] += 1\n elif train_labels[reviewIndex] == 0: # Negative\n if word not in negWordBag.keys():\n negWordBag[word] = 1\n else:\n negWordBag[word] += 1\n \n posProbList, posUNK = genProb(posWordBag, smoothing_parameter)\n negProbList, negUNK = genProb(negWordBag, smoothing_parameter)\n\n # Done with training. Now development with MAP\n dev_labels = []\n for devRev in dev_set:\n reviewIsPos = math.log10(pos_prior)\n reviewIsNeg = math.log10(1 - pos_prior)\n for word in devRev:\n if isWordTooCommon(word) == True: # If word is too common, skip the word\n continue\n else:\n if word in posProbList.keys():\n reviewIsPos += posProbList[word]\n else:\n reviewIsPos += posUNK\n if word in negProbList.keys():\n reviewIsNeg += negProbList[word]\n else:\n reviewIsNeg += negUNK\n if reviewIsPos < reviewIsNeg:\n dev_labels.append(0)\n else:\n dev_labels.append(1)\n\n # return predicted labels of development set\n return dev_labels", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n # SHAPES \r\n # N = 5, L = 3\r\n # emission_scores = (5,3), trans_scores = (3,3)\r\n # start_scores = (3,), end_scores = (3,)\r\n\r\n # Creating the transition DP matrix\r\n T = [[0 for _ in range(N)] for _ in range(L)]\r\n backpointers = [[0 for _ in range(N)] for _ in range(L)]\r\n\r\n # Filling the first column\r\n for row in range(L):\r\n T[row][0] = emission_scores[0][row] + start_scores[row] # emission_scores matrix is (N X L)\r\n \r\n # Filling the rest of the transition matrix\r\n for col in range(1, N):\r\n for row in range(L):\r\n prev_list = []\r\n for prev_label in range(L):\r\n prev_list.append(trans_scores[prev_label, row] + T[prev_label][col-1])\r\n T[row][col] = max(prev_list) + emission_scores[col][row] \r\n backpointers[row][col] = np.argmax(prev_list)\r\n\r\n # Filling the last column\r\n for row in range(L):\r\n T[row][N-1] += end_scores[row]\r\n\r\n # print for debug\r\n # print \"T\"\r\n # for i in T:\r\n # print i\r\n \r\n # print \r\n # print\r\n\r\n # print \"B\"\r\n # for i in backpointers:\r\n # print i\r\n\r\n # Finding max score in last column of T matrix\r\n T = np.array(T)\r\n score = np.asscalar(np.max(T[:,N-1]))\r\n location = np.asscalar(np.argmax(T[:,N-1]))\r\n\r\n # Getting best sequence from right to left using backpointers\r\n y = [location]\r\n for col in range(N-1, 0, -1):\r\n y.insert(0, backpointers[location][col])\r\n location = backpointers[location][col]\r\n\r\n '''\r\n y = []\r\n for i in xrange(N):\r\n # stupid sequence\r\n y.append(i % L)\r\n # score set to 0\r\n return (0.0, y)\r\n '''\r\n return (score, y)", "def recognize_ngram(models: dict, test_set: SinglesData,probs,BIC_guesses):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n probabilities = []\n guesses = []\n\n model = arpa.loadf(\"devel-lm-M3.sri.lm\")\n lm = model[0] # ARPA files may contain several models.\n # TODO implement the recognizer\n # return probabilities, guesses\n test_sequences = list(test_set.get_all_Xlengths().values())\n word_keys = list(test_set.get_all_Xlengths().keys())\n i = -1 \n for sentence in test_set.sentences_index.values():\n f = {}\n maxs = float(\"-inf\")\n prob = []\n words = []\n\n sentenceLength = 0\n for word_index in sentence:\n i+=1\n word = test_set.wordlist[word_index]\n sentenceLength+=1\n try:\n f[word] = probs[word][i]\n except:\n f[word] = float(\"-inf\")\n prob.append(f[word]) ## These are Just the probabilities unchanged from the BIC recognizer.\n \n # Find Six most probable words and generate the possible permutations \n sixwords = sorted(f,key=f.get,reverse=True)[:6]\n for k in permutations(sixwords, r=sentenceLength):\n l = 0\n for j in range(len(k)):\n l += f[k[j]]\n try:\n sentenceLP = l + 13*lm.log_s(\" \".join(k)) ## According to one student in the forum 13 is the best hyperparameter\n if sentenceLP > maxs: ## https://discussions.udacity.com/t/slm-data-for-this-asl-dataset/230822/8?u=spiros\n sentence = \" \".join(k)\n maxs = sentenceLP\n words = list(k)\n except:\n pass\n\n if(words == []):\n words = BIC_guesses[len(guesses):len(guesses)+sentenceLength] ## Fall back to BIC guesses\n probabilities.append(prob) \n guesses += words\n return (probabilities,guesses)", "def process_sentence_trigrams(sentence, base_index, label_matches, label_index_dict, transition_probabilities):\r\n\tsentence_process_index = 0\r\n\tlabel = ''\r\n\tprev_tag = \"<START>\"\r\n\tprev_prev_tag = \"<START>\"\r\n\tknown_words = get_known_words(label_matches)\r\n\ttag_options = get_tag_options(label_matches)\r\n\tprev_is_valid = False\r\n\tcurr_is_valid = False\r\n\tstarting_index = 0\r\n\tending_index = starting_index\r\n\twhile sentence_process_index < len(sentence):\r\n\t\tword = sentence[sentence_process_index]\r\n\t\tif word in known_words:\r\n\t\t\tlookup_word = word\r\n\t\telse:\r\n\t\t\tlookup_word = \"<UNK>\"\r\n\t\tbest_tag = viterbi_trigrams(transition_probabilities, label_matches, prev_tag, prev_prev_tag, lookup_word, tag_options)\r\n\t\tcurr_is_valid = (best_tag != 'o') and (len(best_tag)>2)\r\n\t\tif prev_is_valid and curr_is_valid and (best_tag[2:] != prev_tag[2:]):\r\n\t\t\tlabel_index_dict = add_to_label_index_dict(prev_tag[2:], base_index+starting_index, base_index+ending_index, label_index_dict)\r\n\t\t\tstarting_index = sentence_process_index\r\n\t\t\tending_index = starting_index\r\n\t\telif curr_is_valid and prev_is_valid and (best_tag[2:] == prev_tag[2:]):\r\n\t\t\tending_index +=1\r\n\t\telif curr_is_valid:\r\n\t\t\tstarting_index = sentence_process_index\r\n\t\t\tending_index = starting_index\r\n\t\telif prev_is_valid:\r\n\t\t\tlabel_index_dict = add_to_label_index_dict(prev_tag[2:], base_index+starting_index, base_index+ending_index, label_index_dict)\r\n\t\t\tstarting_index = sentence_process_index\r\n\t\t\tending_index = starting_index\r\n\t\tprev_prev_tag = prev_tag\r\n\t\tprev_tag = best_tag\r\n\t\tprev_is_valid = curr_is_valid\r\n\t\tsentence_process_index +=1\r\n\treturn label_index_dict", "def train(self, iterable):\n for ngram in generate_ngrams(iterable, self.n + 1):\n self.markov_dict.setdefault(ngram[: self.n], Counter()).update([ngram[self.n]])\n self.prob_dict.update([ngram[: self.n]])", "def predict_greedy(self, tokens: TokenSeq) -> Tuple[NDArray, PosSeq]:\n \n #array to hold predictions\n predictions = np.zeros((len(tokens), len(self.l), len(self.l)))\n for i in range(len(tokens)):\n\n for prev_tag in self.l:\n new_feature_matrix = []\n temp_dict = {}\n feature_dict = add_features(tokens, prev_tag ,i, temp_dict)\n new_feature_matrix.append(feature_dict)\n new_feature_matrix = self.vectorizer.transform(new_feature_matrix)\n \n probabilities = self.clf.predict_proba(new_feature_matrix)\n predictions[i, self.label_index(prev_tag)] = probabilities\n \n cur = len(probabilities[0])- 1\n final_predictions = []\n for i in range(len(predictions)):\n cur_pred = np.argmax(predictions[i, cur])\n final_predictions.append(cur_pred)\n cur = cur_pred\n ret_matrix = []\n #print(tokens)\n final_predictions = self.le.inverse_transform(final_predictions)\n #print(final_predictions)\n new_pos = final_predictions.tolist()\n new_pos.insert(0, \"<s>\")\n new_pos.pop(len(new_pos) - 1)\n for i in range(len(new_pos)):\n feature_dict = add_features(tokens, new_pos[i] ,i, temp_dict)\n ret_matrix.append(feature_dict)\n ret_matrix = self.vectorizer.transform(ret_matrix)\n\n return (ret_matrix, final_predictions)", "def train(self, n):\n t = self.t\n\n parallel_sentences = list(zip(self.target,self.source))\n\n for i in range(n):\n\n count = defaultdict(lambda:defaultdict(int))\n s_total = dict()\n total = defaultdict(int)\n\n for E,F in parallel_sentences:\n # compute normalization\n for e in E:\n t_e = t[e]\n s_total[e] = 0\n for f in F:\n s_total[e] += t_e[f]\n\n # collect counts\n for e in E:\n count_e = count[e]\n t_e = t[e]\n s_total_e = s_total[e]\n for f in F:\n tmp = t_e[f] / s_total_e\n count_e[f] += tmp\n total[f] += tmp\n\n # estimate probabilities\n for e in self.t_words:\n t_e = t[e]\n count_e = count[e]\n #for f in self.s_words:\n for f in count_e:\n #if f not in count[e]: continue\n t_e[f] = count_e[f] / total[f]", "def train(self, examples):\n print(examples)\n # first we will do gensim to get word embeddings\n tokens = []\n for example in examples:\n for tuple in example:\n tokens.append([tuple[0]])\n self.model = Word2Vec(tokens, min_count=1, size=100).wv\n # shuffle the examples so that they are gone through 'randomly'\n #print(examples)\n random.shuffle(examples)\n #print(examples)\n # iterate through our examples\n for j in range(len(examples)):\n # the stored label for the previous token\n prev_label = None\n prev_word = None\n # iterate through our tokens for the example\n for i in range(len(examples[j])):\n # store our token and its label\n token = examples[j][i][0]\n y = examples[j][i][1]\n # get the features for our current token\n next_word = None\n if i <= (len(examples)-1):\n next_word = examples[j][i+1][0]\n features = self.featurize(prev_label, prev_word, token, next_word)\n # set our previous label to our current since\n # we are done featurizing and need to store it for\n # the next iteration\n prev_label = y\n # a dictionary that will store our z values\n z = {}\n # calculate our z value for every state for\n # the example we are on\n # z(state) = features * weights\n # z[state] = np.dot(features, weights[state])\n for state in self.states:\n z[state] = np.dot(features, self.weights[state])\n # store our max\n max = -1\n # store our y_hat\n y_hat = None\n # store our probabilities\n prob = {}\n # this runs softmax on our z's\n # y_hat = softmax(z)\n denom = sum(np.exp(np.array(list(z.values()))))\n for state in self.states:\n # softmax = p(state) = e^z[state] / (sum[e^z for all z's)\n # making sure this works the way I want it to, should\n # be three values\n #print(np.array(list(z.values())))\n #print(np.exp(np.array(list(z.values()))))\n prob[state] = np.exp(z[state]) / denom\n # if our current prob is greater than the others then it is our boy\n if prob[state] > max:\n # save the new prob as the max\n max = prob[state]\n # save the state as our prediction y_hat\n y_hat = state\n # this will hold our gradients for all the states\n gradients = {}\n for state in self.states:\n # gradient[state] = ((y_hat == state) - prob[state]) * features\n gradients[state] = ((y_hat == state) - prob[state]) * features\n # weights[state] -= loss * gradients\n self.weights[state] -= self.loss * gradients[state]", "def a_test2_bbvi_mini_batch():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('BBVI',iterations=100, mini_batch=32)\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)", "def viterbi(prob_matrix):\n TINY = 1e-6 # to avoid NaNs in logs\n\n # if prob_matrix is 1D, make it 2D\n if len(np.shape(prob_matrix)) == 1:\n prob_matrix = [prob_matrix]\n \n length = len(prob_matrix)\n\n probs = np.zeros_like(prob_matrix)\n backpt = np.ones_like(prob_matrix, dtype=np.int32) * -1\n \n for i in [0,1,2,3,4]:\n probs[0][i] = np.log(prob_matrix[0][i]+TINY)\n \n # {B, M, E, S} <=== 0:begin, 1:middle, 2:end, 3:single\n for t in range(1, length):\n # E, S -> B | B, M -> M | B, M -> E | E, S -> S\n previous_of = [[0,0], [3,4], [1,2], [1,2], [3,4]]\n for i in range(5):\n prevs = previous_of[i]\n max_id = prevs[np.argmax([probs[t-1][prevs[0]], probs[t-1][prevs[1]]])]\n backpt[t][i] = max_id\n probs[t][i] = np.log(prob_matrix[t][i]+TINY) + probs[t-1][max_id]\n\n seq = np.ones(length, 'int32') * -1\n #print(probs[length-1])\n seq[length-1] = np.argmax(probs[length-1])\n #print(seq[length-1])\n max_prob = probs[length-1][seq[length-1]]\n for t in range(1, length):\n seq[length-1-t] = backpt[length-t][seq[length-t]]\n \n return seq", "def viterbi_tags (untagged_sentences, h):\n transitions = h[0]\n emissions = h[1]\n tags = h[2]\n maxtags = []\n #print tags\n\n for untaggedsent in untagged_sentences:\n #Create empty probtable\n words = untaggedsent.split()\n r = len(tags)\n c = len(words)\n probtable = [None]*r\n for i in range(r):\n probtable[i] = [None]*c\n for j in range(c):\n probtable[i][j] = [None]*2\n\n #Initialize zeroth column of probtable\n prevtag = '<START>'\n word = words[0]\n for i in range(r):\n tag = tags[i]\n\n transition = transitions[prevtag][tag]\n if word in emissions[tag]:\n emission = emissions[tag][word]\n else:\n emission = .0001*emissions[tag]['<UNKNOWN>']\n\n probtable[i][0][0] = transition*emission\n \n #Fill in probtable\n for j in range(1, c):\n word = words[j]\n for i in range(r):\n tag = tags[i]\n maxprob = 0\n maxtag = None\n\n if word in emissions[tag]:\n emission = emissions[tag][word]\n else:\n emission = .0001*emissions[tag]['<UNKNOWN>']\n\n for k in range(r):\n prevtag = tags[k]\n transition = transitions[prevtag][tag]\n prob = probtable[k][j-1][0]*transition*emission\n \n if (prob > maxprob):\n maxprob = prob\n maxtag = k\n\n probtable[i][j][0] = maxprob\n probtable[i][j][1] = maxtag\n\n #Find most likely sequence of POS tags of this sentence\n sentmaxtags = maxsequence(probtable, tags)\n maxtags.extend(sentmaxtags)\n\n #Return most likely sequence of POS tags of all sentences\n return maxtags", "def build_naive_bayes():\n nb_pipeline = None\n ##### Write code here\n nb_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', ComplementNB())\n ])\n\n ##### End of your work ######\n return nb_pipeline", "def sample_bigrams(vectors, n, x_k):\n grams = [sample_bigram(vectors) for _ in range(n)]\n w1 = [w[0] for w in grams]\n w2 = [w[1] for w in grams]\n x, y = process_sequences(w1, w2)\n return x, one_hot_2d(y, x_k)", "def train(self, corpus): \n for sentence in corpus.corpus: # iterate over sentences in the corpus\n for token in sentence: # iterate over datums in the sentence\n self.unigrams[token] += 1\n self.total += 1\n V = len(self.unigrams) # vocabulary size \n for ug,count in self.unigrams.iteritems():\n \tself.f1[ug] = math.log10(count+1) - math.log10(self.total + V)", "def relabelling(run):\n np.random.seed((run ** 5 + 1323002) % 123123) # np.random.seed() alternatively\n\n Xtr, Str, Xts, Yts = data_cache[dset]\n X_train, X_val, y_train, y_val = train_test_split(Xtr, Str, test_size=prop)\n # clf1 is the first classifier while clf2 is the second\n if dset == 2:\n clf1 = svm.SVC(C=2.5, gamma=0.000225, probability=True, max_iter=max_itera)\n else:\n clf1 = svm.SVC(gammma = 'scale',probability=True, max_iter=max_itera)\n if run == 1:\n print(\"learn pre training model:\")\n clf1.fit(X_train, y_train)\n if run == 1:\n print(\"calculating weighting and fit final model:\")\n bb = clf1.predict_proba(X_train)\n nn = len(y_train)\n ind = np.where(abs(bb[:, 1] - y_train) >= 0.5)\n y_train[ind] = 1 - y_train[ind]\n ind_p = int(nn / 3)\n ind5 = np.hstack((np.argsort(-bb[:, 1])[0:ind_p], np.argsort(-bb[:, 0])[0:ind_p]))\n if dset == 2:\n clf2 = svm.SVC(gamma=0.000225, max_iter=max_itera)\n else:\n clf2 = svm.SVC(gamma=0.00865, max_iter=max_itera)\n clf2.fit(X_train[ind5, :], y_train[ind5])\n return clf2.score(Xts, Yts)", "def build_models_NLP(train_pos_vec, train_neg_vec):\n Y = [\"pos\"]*len(train_pos_vec) + [\"neg\"]*len(train_neg_vec)\n\n # Use sklearn's BernoulliNB and LogisticRegression functions to fit two models to the training data.\n # For BernoulliNB, use alpha=1.0 and binarize=None\n # For LogisticRegression, pass no parameters\n # YOUR CODE HERE\n from sklearn import linear_model,naive_bayes\n lr = linear_model.LogisticRegression()\n lr_model = lr.fit(train_pos_vec + train_neg_vec, Y)\n\n nb = naive_bayes.BernoulliNB(alpha=1.0, binarize=None)\n nb_model = nb.fit(train_pos_vec + train_neg_vec, Y)\n\n return nb_model, lr_model", "def makeD2VLabels(sequences, **kargs): # refactored from seqAnalzyer \n # from collections import namedtuple # can customize your own attributes (instead of using gensim's attributes such as words and tags)\n import gensim\n def index_label(i): \n return '%s_%s' % (label_prefix, i)\n\n # [params] redundant? \n # cohort_name = kargs.get('cohort', 'diabetes')\n # seq_ptype = kargs.get('seq_ptype', 'regular') # values: regular, random, diag, med, lab ... default: regular\n\n # attributes = D2V.label_attributes # ['codes', 'labels', ] \n\n # [old] use gensim.models.doc2vec.TaggedDocument\n # LabelDoc = namedtuple('LabelDoc', attributes) # a namedtuple with 2 attributes words and tags\n # LabelDoc = namedtuple('LabelDoc', ['words', 'labels'])\n label_prefix = seqparams.TDoc.doc_label_prefix \n exclude = set(string.punctuation)\n all_docs = []\n\n # [input]\n assert sequences is not None and len(sequences) > 0\n\n labels = kargs.get('labels', []) # precomputed sentence labels \n if not labels: \n # df_ldoc = labelDoc(sequences, load_=load_label, seqr='full', sortby='freq', seq_ptype=seq_ptype)\n raise ValueError, \"No user-defined labels given.\"\n \n # [note] below is for generating surrogate class labels \n # labeling_routine = kargs.get('labeler', labelDocByFreqDiag) # any labelDoc*\n # assert hasattr(labeling_routine, '__call__'), \"Invalid labeler: %s\" % labeling_routine\n # labels = mlabels = labeling_routine(sequences, **kargs)\n # labelx = labelize()\n else: \n assert len(labels) == len(sequences)\n\n # label normalization: ensure that each label is a list \n labelx = TDocTag.labelAsIs(labels) # TDocTag.canonicalize(labels)\n print('makeD2VLabels> doc tag examples:\\n%s\\n' % labelx[:10])\n # each element in tagx should be a list\n\n for i, sen in enumerate(sequences):\n if isinstance(sen, str): \n word_list = sen.split() \n else: \n word_list = sen # split is already done\n\n # For every sentences, if the length is less than 3, we may want to discard it\n # as it seems too short. \n # if len(word_list) < 3: continue # filter short sentences\n \n tagl = labelx[i] # condition tagl is in the list (multilabel) format\n assert isinstance(tagl, list)\n if isinstance(sen, str): \n sen = ''.join(ch for ch in sen if ch not in exclude) # filter excluded characters\n\n all_docs.append(gensim.models.doc2vec.TaggedDocument(sen.split(), tagl))\n # all_docs.append(LabelDoc(sen.split(), tagl)) # format: sequence (list of tokens) + labels (a list of labels)\n else: \n\n all_docs.append(gensim.models.doc2vec.TaggedDocument(sen, tagl))\n # all_docs.append(LabelDoc(sen, tagl)) # assuming unwanted char already filetered \n\n # Print out a sample for one to view what the structure is looking like \n # print all_docs[0:10]\n for i, doc in enumerate(all_docs[0:5]+all_docs[-5:]): \n print('> doc #%d: %s' % (i, doc))\n # [log] e.g. doc #3: LabelDoc(words=['583.81', '250.41', 'V45.81', ... , '48003'], tags=['362.01_599.0_250.51'])\n\n return all_docs", "def train():\n k = len(accepted_chars)\n enc = \"UTF-8\"\n # Assume we have seen 10 of each character pair. This acts as a kind of\n # prior or smoothing factor. This way, if we see a character transition\n # live that we've never observed in the past, we won't assume the entire\n # string has 0 probability.\n counts = [[10 for i in xrange(k)] for i in xrange(k)]\n \n bigrams = filter_chars(accepted_chars, ngrams(2, counter(counts)))\n for c in open('big.txt').read().decode(enc): bigrams.send(c)\n \n # Normalize the counts so that they become log probabilities. \n # We use log probabilities rather than straight probabilities to avoid\n # numeric underflow issues with long texts.\n # This contains a justification:\n # http://squarecog.wordpress.com/2009/01/10/dealing-with-underflow-in-joint-probability-calculations/\n for row in counts:\n s = float(sum(row))\n for j in xrange(len(row)):\n row[j] = math.log(row[j] / s)\n\n # Find the probability of generating a few arbitrarily choosen good and\n # bad phrases.\n good_probs = [avg_transition_prob(line, counts) \\\n for line in open('good.txt').read().decode(enc).split('\\n') if line]\n bad_probs = [avg_transition_prob(line, counts) \\\n for line in open('bad.txt').read().decode(enc).split('\\n') if line]\n # Assert that we actually are capable of detecting the junk.\n assert min(good_probs) > max(bad_probs)\n\n # And pick a threshold halfway between the worst good and best bad inputs.\n thresh = (min(good_probs) + max(bad_probs)) / 2\n pickle.dump({'mat': counts, 'thresh': thresh}, open('gib_model.pki', 'wb'))", "def Viterbi_Transition(words:Sequence[str], train_bag:Sequence[Tuple[str, str]]=train_tagged_words)-> Sequence[Tuple[str, str]]:\n state = []\n all_tags = list(set([pair[1] for pair in train_bag]))\n\n for word_idx, word in enumerate(words):\n # initialise list of probability column for a given observation\n p = []\n for tag in all_tags:\n if word_idx == 0:\n transition_p = tags_df.loc['.', tag]\n else:\n transition_p = tags_df.loc[state[-1], tag]\n\n # compute emission and state probabilities\n emission_p_parts = word_given_tag(word, tag)\n emission_p = emission_p_parts[0]/emission_p_parts[1]\n\n if word in V:\n state_probability = transition_p * emission_p\n else:\n state_probability = transition_p\n\n p.append(state_probability)\n\n p_max = max(p)\n # getting state for which probability is maximum\n state_max = all_tags[p.index(p_max)]\n state.append(state_max)\n return list(zip(words, state))", "def main():\n num_rows = 500000\n review_df = pd.read_csv(\"s3://msia490project/processed_video_reviews.csv\").dropna().head(num_rows)\n # train and test set split\n X_train, X_test, y_train, y_test = train_test_split(review_df['reviewText'], review_df['score'],\n random_state=115)\n # re-run the model pipeline and generate necessary artifacts for making predictions\n best_svm = LinearSVC(random_state=115)\n ngram_range = (1, 3)\n generate_artifacts_for_best_svm_model(best_svm, ngram_range, X_train, y_train)", "def build_model(self , text, n=3): #should be called build_model\n self.n = n\n self.vocab = Counter(words(text))\n\n tokens=tokenize(text)\n for gram in list(ngrams(tokens,self.n)):\n self.lm_dict[tuple(gram[:-1])][gram[-1]]+=1", "def train_model(self, text, labels):\n clf = svm.SVR()\n count_vect = CountVectorizer()\n tfidf_transformer = TfidfTransformer()\n counts = count_vect.fit_transform(text)\n tfidf = tfidf_transformer.fit_transform(counts)\n clf.fit(tfidf, labels)\n\n return clf, count_vect, tfidf_transformer", "def viterbi(self, word_seq):\n # Initialize scores\n scores = [{}]\n path = {}\n # Populate scores\n for i in range(0, len(word_seq)):\n for label in self.label_type_map:\n scores[i][label] = 0\n scores.append({})\n self.initialize(scores, word_seq, path)\n path = self.iterate(scores, word_seq, path)\n return self.identify(scores, word_seq, path)", "def __init__(self, tokens):\n self.mdl = self.train(tokens)", "def __init__(self, tokens):\n self.mdl = self.train(tokens)", "def test_ngram():\n #Some examples of functions usage\n trigram_counts, bigram_counts, unigram_counts, token_count = train_ngrams(S_train)\n print \"#trigrams: \" + str(len(trigram_counts))\n print \"#bigrams: \" + str(len(bigram_counts))\n print \"#unigrams: \" + str(len(unigram_counts))\n print \"#tokens: \" + str(token_count)\n perplexity = evaluate_ngrams(S_dev, trigram_counts, bigram_counts, unigram_counts, token_count, 0.5, 0.4)\n print \"#perplexity: \" + str(perplexity)\n ### YOUR CODE HERE\n ### END YOUR CODE", "def __init__(self, generator, tgt_vocab,\n normalization=\"sents\",\n label_smoothing=0.0,\n use_kl_annealing=False,\n use_kl_freebits=False,\n kl_freebits_margin=0.0,\n kl_annealing_current=0.0,\n kl_annealing_increment=0.0001,\n kl_annealing_warmup_steps=1000,\n image_loss_type='logprob',\n use_local_image_features=False,\n two_step_image_prediction=False\n ):\n self.multimodal_model_type = 'vi-model1'\n\n super(NMTVIModel1LossCompute, self).__init__(generator, tgt_vocab,\n normalization, label_smoothing)\n\n # kl annealing parameters\n self.n_model_updates = 0\n self.use_kl_annealing = use_kl_annealing\n if use_kl_annealing:\n self.kl_annealing_current = kl_annealing_current\n self.kl_annealing_increment = kl_annealing_increment\n self.kl_annealing_warmup_steps = kl_annealing_warmup_steps\n else:\n self.kl_annealing_current = 1.0\n self.kl_annealing_increment = 0.0\n self.kl_annealing_warmup_steps = 0\n\n self.use_kl_freebits = use_kl_freebits\n if use_kl_freebits:\n self.kl_freebits_margin = kl_freebits_margin\n else:\n self.kl_freebits_margin = 0.0\n\n self.image_loss_type = image_loss_type\n self.use_local_image_features = use_local_image_features\n self.two_step_image_prediction = two_step_image_prediction\n self._statistics = onmt.VIStatistics\n\n if image_loss_type == 'categorical':\n self.image_loss_criterion = nn.NLLLoss2d()", "def go(arg):\n # load the IMDB data\n if arg.final:\n train, test = datasets.IMDB.splits(TEXT, LABEL)\n\n TEXT.build_vocab(train, max_size=arg.vocab_size - 2)\n LABEL.build_vocab(train)\n\n train_iter, test_iter = data.BucketIterator.splits((train, test), batch_size=arg.batch_size,\n device=d())\n else:\n tdata, _ = datasets.IMDB.splits(TEXT, LABEL)\n train, test = tdata.split(split_ratio=0.8)\n\n TEXT.build_vocab(train, max_size=arg.vocab_size - 2) # - 2 to make space for <unk> and <pad>\n LABEL.build_vocab(train)\n\n train_iter, test_iter = data.BucketIterator.splits((train, test), batch_size=arg.batch_size,\n device=d())\n\n print(f'- nr. of training examples {len(train_iter)}')\n print(f'- nr. of {\"test\" if arg.final else \"validation\"} examples {len(test_iter)}')\n\n if arg.max_length < 0:\n mx = max([input.text[0].size(1) for input in train_iter])\n mx = mx * 2\n print(f'- maximum sequence length: {mx}')\n else:\n mx = arg.max_length\n\n # create the model\n model = Transformer(k=arg.dim_model, heads=arg.num_heads, depth=arg.depth,\n num_tokens=arg.vocab_size, num_classes=NUM_CLS)\n use_cuda = torch.npu.is_available() and not arg.cpu\n device = torch.device(f'npu:{NPU_CALCULATE_DEVICE}')\n\n model = model.to(f'npu:{NPU_CALCULATE_DEVICE}')\n\n opt = Adam(params=model.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0, amsgrad=False)\n sch = torch.optim.lr_scheduler.LambdaLR(opt, lambda i: min(i / (arg.lr_warmup / arg.batch_size), 1.0))\n\n # training loop\n seen = 0\n for e in range(arg.num_epochs):\n\n print(f'\\n epoch {e}')\n model.train(True)\n for batch in tqdm.tqdm(train_iter):\n\n opt.zero_grad()\n\n input = batch.text[0].to(f'npu:{NPU_CALCULATE_DEVICE}')\n label = batch.label - 1\n label = label.to(f'npu:{NPU_CALCULATE_DEVICE}')\n\n if input.size(1) > mx:\n input = input[:, :mx]\n out = model(input)\n loss = F.nll_loss(out, label)\n\n loss.backward()\n\n # clip gradients\n # - If the total gradient vector has a length > 1, we clip it back down to 1.\n if arg.gradient_clipping > 0.0:\n nn.utils.clip_grad_norm_(model.parameters(), arg.gradient_clipping)\n\n opt.step()\n sch.step()\n\n seen += input.size(0)\n # tbw.add_scalar('classification/train-loss', float(loss.item()), seen)\n\n with torch.no_grad():\n\n model.train(False)\n tot, cor = 0.0, 0.0\n\n for batch in test_iter:\n\n input = batch.text[0]\n label = batch.label - 1\n\n if input.size(1) > mx:\n input = input[:, :mx]\n out = model(input).argmax(dim=1)\n\n tot += float(input.size(0))\n cor += float((label == out).sum().item())\n\n acc = cor / tot\n print(f'-- {\"test\" if arg.final else \"validation\"} accuracy {acc:.3}')\n # tbw.add_scalar('classification/test-loss', float(loss.item()), e)\n for batch in test_iter:\n input = batch.text[0]\n label = batch.label - 1\n\n if input.size(1) > mx:\n input = input[:, :mx]\n print(input)", "def evaluate(session, model, char_data, tag_data, dict_data, len_data, eval_op, batch_size, verbose=False):\n correct_labels = 0\n total_labels = 0\n\n xArray, yArray, dArray, lArray = reader.iterator(char_data, tag_data, dict_data, len_data, batch_size)\n yp_wordnum = 0\n yt_wordnum = 0\n cor_num = 0\n for x, y, d, l in zip(xArray, yArray, dArray, lArray):\n fetches = [model.loss, model.logits, model.trans]\n feed_dict = {}\n feed_dict[model.input_data] = x\n feed_dict[model.targets] = y\n feed_dict[model.dicts] = d\n feed_dict[model.seq_len] = l\n loss, logits, trans = session.run(fetches, feed_dict)\n\n for logits_, y_, l_ in zip(logits, y, l):\n logits_ = logits_[:l_]\n y_ = y_[:l_]\n viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(logits_, trans)\n\n yp_wordnum += viterbi_sequence.count(2) + viterbi_sequence.count(3)\n yt_wordnum += (y_ == 2).sum() + (y_ == 3).sum()\n correct_labels += np.sum(np.equal(viterbi_sequence, y_))\n total_labels += l_\n\n start = 0\n for i in range(len(y_)):\n if (y_[i] == 2 or y_[i] == 3):\n flag = True\n for j in range(start, i + 1):\n if y_[j] != viterbi_sequence[j]:\n flag = False\n if flag == True:\n cor_num += 1\n start = i + 1\n P = cor_num / float(yp_wordnum)\n R = cor_num / float(yt_wordnum)\n F = 2 * P * R / (P + R)\n accuracy = 100.0 * correct_labels / float(total_labels)\n return accuracy, P, R, F", "def build_naive_bayes():\n nb_pipeline = None\n \n nb_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', ComplementNB()),\n ])\n \n return nb_pipeline", "def model(X, Y, word_to_vec_map, learning_rate = 0.01, num_iterations = 400):\n \n # Get a valid word contained in the word_to_vec_map \n any_word = list(word_to_vec_map.keys())[0]\n \n # Initialize cost. It is needed during grading\n cost = 0\n \n # Define number of training examples\n m = Y.shape[0] # number of training examples\n n_y = len(np.unique(Y)) # number of classes \n n_h = word_to_vec_map[any_word].shape[0] # dimensions of the GloVe vectors \n \n # Initialize parameters using Xavier initialization\n W = np.random.randn(n_y, n_h) / np.sqrt(n_h)\n b = np.zeros((n_y,))\n \n # Convert Y to Y_onehot with n_y classes\n Y_oh = convert_to_one_hot(Y, C = n_y) \n \n # Optimization loop\n for t in range(num_iterations): # Loop over the number of iterations\n for i in range(m): # Loop over the training examples\n \n ### START CODE HERE ### (≈ 4 lines of code)\n # Average the word vectors of the words from the i'th training example\n # def sentence_to_avg(sentence, word_to_vec_map): # return avg\n avg = sentence_to_avg(X[i], word_to_vec_map)\n\n # Forward propagate the avg through the softmax layer. \n # You can use np.dot() to perform the multiplication.\n z = np.dot(W, avg) + b\n a = softmax(z)\n\n # Compute cost using the i'th training label's one hot representation and \"A\" (the output of the softmax)\n cost = - np.sum(Y_oh[i] * a)\n ### END CODE HERE ###\n \n # Compute gradients \n dz = a - Y_oh[i]\n dW = np.dot(dz.reshape(n_y,1), avg.reshape(1, n_h))\n db = dz\n\n # Update parameters with Stochastic Gradient Descent\n W = W - learning_rate * dW\n b = b - learning_rate * db\n \n if t % 100 == 0:\n print(\"Epoch: \" + str(t) + \" --- cost = \" + str(cost))\n pred = predict(X, Y, W, b, word_to_vec_map) #predict is defined in emo_utils.py\n\n return pred, W, b", "def fit(self, X: List[str], y: List[str]) -> None:\n # 1. Add all unique vectors from messages (X)\n for i in range(len(X)):\n for word in X[i].split():\n if self.vectors.get(word):\n if self.vectors[word]['n'].get(y[i]):\n self.vectors[word]['n'][y[i]] += 1\n else:\n self.vectors[word]['n'][y[i]] = 1\n else:\n self.vectors[word] = {'n': {y[i]: 1}}\n self.d += 1\n\n if self.labels_d.get(y[i]):\n self.labels_d[y[i]] += 1\n else:\n self.labels_d[y[i]] = 1\n\n self.labels_p[y[i]] = 1 if not self.labels_p.get(y[i]) else self.labels_p[y[i]] + 1\n\n # 2. Count probabilities in each added vector of each class (label)\n for vector in self.vectors:\n for label in self.labels_d:\n n = 0 if not self.vectors[vector]['n'].get(label) else self.vectors[vector]['n'][label]\n p = (n + self.alpha) / (self.labels_d[label] + (self.d * self.alpha))\n\n if self.vectors[vector].get('p'):\n self.vectors[vector]['p'][label] = p\n else:\n self.vectors[vector]['p'] = {label: p}\n\n # 3. Count probability of each class\n sum = 0\n for label in self.labels_p:\n sum += self.labels_p[label]\n\n for label in self.labels_p:\n self.labels_p[label] = self.labels_p[label] / sum", "def predict(cls, input):\n clf = cls.get_model() \n\n input.to_csv(data_dir + 'vdok_predction_src_file.csv')\n\n q = qa_serializer_lang_selector(data_dir)\n q.serialize_record('vdok_predction_src_file.csv', task_name)\n q.select_lang([1], task_name).to_csv(data_dir + data_file, encoding= 'latin1')\n\n pipeline=['pos', 'lemma', 'synset', 'hype', 'hypo']\n\n bnlqd = fex_basic_nlp(data_file, data_dir)\n bnlqd.nlp_run(pipeline[0])\n bnlqd.nlp_run(pipeline[1])\n bnlqd.df_ac_lemma.to_csv(data_dir + 'Lemma-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[2])\n bnlqd.df_ac_synset.to_csv(data_dir + 'Synset-' + data_file , encoding= 'latin1')\n bnlqd.nlp_run(pipeline[3])\n bnlqd.df_ac_hypernyms.to_csv(data_dir + 'Hypernyms-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[4])\n bnlqd.df_ac_hyponyms.to_csv(data_dir + 'Hyponyms-' + data_file, encoding= 'latin1')\n\n bnlpd = fex_basic_nlp(def_file, data_dir, task_name)\n bnlpd.nlp_run(pipeline[0])\n bnlpd.nlp_run(pipeline[1])\n bnlpd.df_ac_lemma.to_csv(data_dir + 'Lemma-P-' + data_file, encoding= 'latin1')\n \n btgqd = bi_trigram(data_file, data_dir)\n btgqd.nlp_run(r'bigram')\n btgqd.nlp_run(r'trigram') \n\n stop_words_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words)\n\n oanc_shelve = oanc_resource + 'ANC-all-lemma-04262014.db'\n oalqd = odi_oanc_lemma_frequency(data_file, oanc_shelve, None, data_dir, stop_words_d) \n oalqd.oanc_lemma_frequency('Lemma-' + data_file, 'Student_Question_Index', 'Pre_Col_Name')\n \n stop_words_hy_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words_hy)\n\n ovlqd = odi_overlapping(data_file, def_file, data_dir, stop_words_d)\n ovlqd.count_overlapping('Lemma-' + data_file, 'Student_Question_Index',\n 'Pre_Col_Name', 'Question_ID', 'Question_ID_Sec',\n 'Lemma-P-' + data_file, 'Question_ID', 'Question_ID_Sec')\n ovlqd.count_overlapping_synset('Synset-' + data_file)\n ovlqd.count_overlapping_hypernyms('Hypernyms-' + data_file, stop_words_hy_d)\n ovlqd.count_overlapping_hyponyms('Hyponyms-' + data_file, stop_words_hy_d)\n\n df_ac_pmi_dist_bigram = cls.bi_trigram_pmi_distribution(pmi_bigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_bigram, 'bigram')\n df_ac_pmi_dist_trigram = cls.bi_trigram_pmi_distribution(pmi_trigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_trigram, 'Trigram')\n\n df_ac_aggregate = cls.aggregate_plim(bnlqd, oalqd, ovlqd, df_ac_pmi_dist_bigram, df_ac_pmi_dist_trigram,\n bnlpd, specific_count_lemmas, stop_words_pos, task_name)\n df_ac_aggregate.to_csv(data_dir + 'vdok_predction_Aggregate_plim.csv', encoding= 'latin1')\n df_ac_aggregate_item_level = cls.aggregate_item_level_plim(df_ac_aggregate, oalqd.stem_option_name_clm, \n task_name)\n df_ac_aggregate_item_level.to_csv(data_dir + 'vdok_predction_Key_Stem_Passage_Aggregate_plim.csv',\n encoding= 'latin1')\n\n rfrpod = tmv_RF_classify('Independent_Variable_w_Label-Def.csv', data_dir)\n rfrpod.load_data('vdok_predction_Key_Stem_Passage_Aggregate_plim.csv', True, drop_vars, dependent_var)\n clf.perform_prediction(rfrpod.df_ac_modeling_values)\n return clf.df_ac_classified", "def _oim_node2vec_test(\n df,\n df_feats,\n num_inf=10,\n sigma=4,\n c=0.1,\n epsilon=0.4,\n num_repeats=30,\n num_repeats_regret_algo=20,\n num_repeats_regret_true=20,\n num_nodes_tim=-1,\n oracle=tim,\n):\n logger_tlu.debug(\"Started Online Influence Maximization...\")\n logger_tlu.debug(\"Setting parameters\")\n num_feats = df_feats.shape[1]\n num_edges_t = df.shape[0]\n\n # \"True\" probabilities - effectively our test set\n true_weights = df[\"probab\"].copy()\n # Using nodes_t[-1] because TIM wants the max node id\n s_true = sorted(\n oracle(\n df[[\"source\", \"target\", \"probab\"]],\n num_nodes_tim,\n num_edges_t,\n num_inf,\n epsilon,\n )\n )\n # Gathering the stats for the \"true\" seed set\n all_true_nodes = []\n all_true_edges = []\n all_true_obs = []\n for k in range(num_repeats_regret_true):\n true_act_nodes, true_act_edges, true_obs_edges = run_ic_eff(df, s_true)\n all_true_nodes.append(true_act_nodes)\n all_true_edges.append(true_act_edges)\n all_true_obs.append(true_obs_edges)\n\n # Means for nodes and activated edges\n mean_true_nodes = np.mean([len(i) for i in all_true_nodes])\n mean_true_edges = np.mean([len(i) for i in all_true_edges])\n mean_true_obs = np.mean([len(i) for i in all_true_obs])\n\n # b, M_inv - used by IMLinUCB\n b = np.zeros((num_feats, 1))\n m_inv = np.eye(num_feats, num_feats)\n\n # Returning these\n s_best = []\n reward_best = 0\n u_e_best = []\n regrets = []\n regrets_edges = []\n\n for iter_oim in tqdm(\n range(num_repeats),\n desc=f\"OIM iters {num_edges_t} edges\",\n leave=False,\n file=sys.stderr,\n ):\n # ---- Step 1 - Calculating the u_e ----\n theta = (m_inv @ b) / (sigma * sigma)\n # xMx = (df_feats.values @ m_inv @ df_feats.T.values).clip(min=0)\n\n u_e = []\n for i in range(num_edges_t):\n x_e = df_feats.loc[i].values\n xMx = x_e @ m_inv @ x_e.T # .clip(min=0)\n u_e.append(np.clip(x_e @ theta + c * np.sqrt(xMx), 0, 1))\n # u_e.append(expit(x_e @ theta + c * np.sqrt(xMx)))\n\n u_e = np.array(u_e)\n\n # ---- Step 2 - Evaluating the performance ----\n # Loss function\n df[\"probab\"] = u_e\n s_oracle = sorted(\n oracle(\n df[[\"source\", \"target\", \"probab\"]],\n num_nodes_tim,\n num_edges_t,\n num_inf,\n epsilon,\n )\n )\n\n # Observing edge-level feedback\n df[\"probab\"] = true_weights\n\n all_algo_nodes = []\n all_algo_edges = []\n all_algo_obs = []\n for k in range(num_repeats_regret_algo):\n algo_act_nodes, algo_act_edges, algo_obs_edges = run_ic_eff(df, s_oracle)\n all_algo_nodes.append(algo_act_nodes)\n all_algo_edges.append(algo_act_edges)\n all_algo_obs.append(algo_obs_edges)\n\n # Mean node counts\n mean_algo_nodes = np.mean([len(i) for i in all_algo_nodes])\n # Mean activated edge counts\n mean_algo_edges = np.mean([len(i) for i in all_algo_edges])\n mean_algo_obs = np.mean([len(i) for i in all_algo_obs])\n\n # Used for updating M and b later\n all_algo_edges = np.unique(np.concatenate(all_algo_edges))\n all_algo_obs = np.unique(np.concatenate(all_algo_obs))\n\n regrets.append(mean_true_nodes - mean_algo_nodes)\n regrets_edges.append(mean_true_edges - mean_algo_edges)\n\n logger_tlu.debug(f\"True seeds: {s_true}\")\n logger_tlu.debug(f\"Algo seeds: {s_oracle}\")\n logger_tlu.debug(\n \"Diff between true and algo seeds: \"\n f\"{len(np.setdiff1d(s_true, s_oracle))}\"\n )\n logger_tlu.debug(f\"True reward: {mean_true_nodes}\")\n logger_tlu.debug(f\"Algo reward: {mean_algo_nodes}\")\n logger_tlu.debug(f\"Best algo reward: {reward_best}\")\n logger_tlu.debug(f\"Regrets: {regrets}\")\n logger_tlu.debug(f\"Edge regrets: {regrets_edges}\")\n logger_tlu.debug(f\"Observed diff: {mean_true_obs - mean_algo_obs}\")\n logger_tlu.debug(f\"Algo weights {u_e[80:90]}\".replace(\"\\n\", \"\"))\n logger_tlu.debug(f\"Real weights {true_weights[80:90]}\".replace(\"\\n\", \"\"))\n\n if mean_algo_nodes > reward_best:\n reward_best = mean_algo_nodes\n s_best = s_oracle\n u_e_best = u_e\n\n if mean_algo_nodes > mean_true_nodes:\n logger_tlu.debug(\n \"The algorithm has achieved better reward than the true seed node set.\"\n )\n logger_tlu.debug(\"Stopping learning.\")\n logger_tlu.debug(f\"Best algo seed node set: {s_best}\")\n return_dict = {\n \"regrets\": regrets,\n \"regrets_edges\": regrets_edges,\n \"s_true\": s_true,\n \"s_best\": s_best,\n \"u_e_best\": u_e_best,\n \"reward_best\": reward_best,\n }\n logger_tlu.debug(f\"Returning {return_dict}\")\n return return_dict\n\n # ---- Step 3 - Calculating updates ----\n for i in all_algo_obs:\n x_e = np.array([df_feats.loc[i].values])\n m_inv -= (m_inv @ x_e.T @ x_e @ m_inv) / (\n x_e @ m_inv @ x_e.T + sigma * sigma\n )\n b += x_e.T * int(i in all_algo_edges)\n\n return_dict = {\n \"regrets\": regrets,\n \"regrets_edges\": regrets_edges,\n \"s_true\": s_true,\n \"s_best\": s_best,\n \"u_e_best\": u_e_best,\n \"reward_best\": reward_best,\n }\n logger_tlu.debug(\"The algorithm has finished running.\")\n logger_tlu.debug(f\"Returning: {return_dict}\")\n return return_dict", "def nn(model, text, vectors, query, k=5):\n\tqf = encode(model, [query])\n\tqf /= norm(qf)\n\tscores = numpy.dot(qf, vectors.T).flatten()\n\tsorted_args = numpy.argsort(scores)[::-1]\n\tsentences = [text[a] for a in sorted_args[:k]]\n\tprint ('QUERY: ' + query)\n\tprint ('NEAREST: ')\n\tfor i, s in enumerate(sentences):\n\t\tprint (s, sorted_args[i])", "def test_ngram():\n # Some examples of functions usage\n trigram_counts, bigram_counts, unigram_counts, token_count = train_ngrams(S_train)\n print \"#trigrams: \" + str(len(trigram_counts))\n print \"#bigrams: \" + str(len(bigram_counts))\n print \"#unigrams: \" + str(len(unigram_counts))\n print \"#tokens: \" + str(token_count)\n perplexity = evaluate_ngrams(S_dev, trigram_counts, bigram_counts, unigram_counts, token_count, 0.5, 0.4)\n print \"#perplexity: \" + str(perplexity)\n ### YOUR CODE HERE\n print(vocabsize)\n ### END YOUR CODE", "def get_viterbi_pairwise_potentials(self):\r\n all_labels = self.vocab.get_index_to_token_vocabulary(\"labels\")\r\n num_labels = len(all_labels)\r\n transition_matrix = torch.zeros([num_labels, num_labels])\r\n\r\n for i, previous_label in all_labels.items():\r\n for j, label in all_labels.items():\r\n # I labels can only be preceded by themselves or\r\n # their corresponding B tag.\r\n if i != j and label[0] == 'I' and not previous_label == 'B' + label[1:]:\r\n transition_matrix[i, j] = float(\"-inf\")\r\n return transition_matrix", "def gen(length):\n return itertools.product(LABELS,repeat=length)", "def optimalize(): \n start = time()\n max = 0\n maxn=2\n maxm=3\n check = [(n,m) for n in range(24,30) for m in range(3,20)]\n dict = {}\n print \"start optimalization of: bigram-features,uniqueness\"\n for n,m in check:\n score=0\n print \">lem>>n(uniqueness):\"+str(n)\n print \">lem>>m(commonness):\"+str(m)\n wrds = common_but_unique(ngrams_dict(1,authors,compactcorpus,n,False),m)\n bigrams = common_but_unique(ngrams_dict(2,authors,compactcorpus,n,False),m)\n trigrams = common_but_unique(ngrams_dict(3,authors,compactcorpus,n,False),m)\n #pos_feat = [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)]\n pos_feat = [\"bi:(\"+str(bi[0])+\",\"+str(bi[1])+\")>\"+str(num) for bi in bigrams for num in range(0,1)] + [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)] + [\"tri:(\"+str(tri[0])+\",\"+str(tri[1])+\",\"+str(tri[2])+\")>\"+str(num) for tri in trigrams for num in range(0,1)]\n\n print \"number of features AFTER selection:\" + str(len(pos_feat))\n for x in range(0,4):\n data = split_train_test_data(authors, corp,45)\n train_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"train\"]]\n train_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"train\"]]\n test_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"test\"]]\n classifier1 = NaiveBayesClassifier.train(train_set)\n acc = nltk.classify.accuracy(classifier1,test_set)\n print \"accuracy:\"+str(acc)\n score +=acc\n print \"time elapsed: \"+str(time()-start)\n print \"score(\" + str(n) +\")=\"+str(score/4)\n classifier1.show_most_informative_features(8)\n dict[(n,m)]=(score/4)\n if(score/4)>max:\n max = (score/4)\n maxn =n\n maxm = m\n print \"max score=\"+str(max)\n print \"where n = \"+str(maxn)\n print \"where m = \"+str(maxm)\n print \"time:\"+str(time()-start)\n writetofile(dict,\"optimalizedict_commonwrdsandbigrams_latest_lem.pkl\")", "def l2_learning(self, msg):\n pass", "def modified_gram_schmidt_step_arnoldi(j, vals):\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]", "def train_naive(): # add arguments as needed\n pass", "def __init__(self, input_n, input_t, input_language, input_epochs, input_embedding_type, input_clusters_num,\n input_training_data, input_evaluation_data, input_hunits_lower, input_hunits_upper,\n input_embedding_dim_lower, input_embedding_dim_upper, input_c, input_iterations):\n self.n = input_n\n self.t = input_t\n self.language = input_language\n self.epochs = input_epochs\n self.embedding_type = input_embedding_type\n self.clusters_num = input_clusters_num\n self.training_data = input_training_data\n self.evaluation_data = input_evaluation_data\n self.hunits_lower = input_hunits_lower\n self.hunits_upper = input_hunits_upper\n self.embedding_dim_lower = input_embedding_dim_lower\n self.embedding_dim_upper = input_embedding_dim_upper\n self.c = input_c\n self.iterations = input_iterations\n\n # Setting self.lambda to the number of the parameters of the largest possible model\n word_segmenter = WordSegmenter(input_name=\"temp\", input_n=50, input_t=10000,\n input_clusters_num=self.clusters_num,\n input_embedding_dim=self.embedding_dim_upper, input_hunits=self.hunits_upper,\n input_dropout_rate=0.2, input_output_dim=4, input_epochs=1,\n input_training_data=self.training_data,\n input_evaluation_data=self.evaluation_data, input_language=self.language,\n input_embedding_type=self.embedding_type)\n word_segmenter.train_model()\n self.lam = 1/word_segmenter.model.count_params()", "def test_Bernoulli_NB_estimators():", "def main(model=None, output_dir=None, n_iter=20):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, last=True)\n # otherwise, get it so we can add labels\n else:\n ner = nlp.get_pipe('ner')\n\n # add labels\n for _, annotations in TRAIN_DATA:\n for ent in annotations.get('entities'):\n ner.add_label(str(ent[2]))\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']\n with nlp.disable_pipes(*other_pipes): # only train NER\n optimizer = nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in TRAIN_DATA:\n nlp.update(\n [text], # batch of texts\n [annotations], # batch of annotations\n drop=0.5, # dropout - make it harder to memorise data\n sgd=optimizer, # callable to update weights\n losses=losses)\n print(losses)\n \n # test the trained model\n for text, _ in TRAIN_DATA:\n doc = nlp(text)\n print('Entities', [(ent.text, ent.label_) for ent in doc.ents])\n print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])\n \n # save model to output directory\n if output_dir is not None:\n print(output_dir)\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n for text, _ in TRAIN_DATA:\n doc = nlp2(text)\n print('Entities', [(ent.text, ent.label_) for ent in doc.ents])\n print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])", "def viterbi(self, observation):\n N=len(observation)\n tab=[[0]*self.nStates for i in range(N)]\n backtrack=[[-1]*self.nStates for i in range(N)]\n if not self.logdomain:\n self.__convert_to_log()\n\n for i in range(self.nStates):\n tab[0][i]=self.e[i][observation[0]]+self.pi[i]\n \n for i in range(1,N):\n for j in range(self.nStates):\n smax=-1\n maxval=float('-inf')\n for s in range(self.nStates):\n cs=tab[i-1][s]+self.t[s][j]\n if cs>maxval:\n smax=s\n maxval=cs\n assert(smax>-1 and smax<self.nStates)\n tab[i][j]=self.e[j][observation[i]]+maxval\n backtrack[i][j]=smax\n\n smax=-1\n llike=float('-inf')\n for s in range(self.nStates):\n if llike<tab[N-1][s]:\n llike=tab[N-1][s]\n smax=s\n\n best=[-1]*N\n best[-1]=smax\n for i in range(N-2, -1, -1):\n best[i]=backtrack[i+1][best[i+1]]\n\n return best, llike", "def main(model=None, output_dir=None, n_iter=10):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # add the parser to the pipeline if it doesn't exist\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'parser' not in nlp.pipe_names:\n parser = nlp.create_pipe('parser')\n nlp.add_pipe(parser, first=True)\n # otherwise, get it, so we can add labels to it\n else:\n parser = nlp.get_pipe('parser')\n\n # add labels to the parser\n for _, annotations in TRAIN_DATA:\n for dep in annotations.get('deps', []):\n parser.add_label(dep)\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'parser']\n with nlp.disable_pipes(*other_pipes): # only train parser\n optimizer = nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in TRAIN_DATA:\n nlp.update([text], [annotations], sgd=optimizer, losses=losses)\n print(losses)\n\n # test the trained model\n test_text = \"It was back in 2007 that hip-hop bible XXL launched its first ever Freshman Class, a list of ten up-and-coming artists poised to change the rap game for good. The last decade has seen more than a hundred stars spotlighted as part of the list and its accompanying annual cover feature, but this year features a history-making entry: Stefflon Don. The talented star has already built a strong reputation for herself in the UK; her unique blend of hard-hitting raps and smooth, dancehall beats has galvanized the scene, earning her critical acclaim and a series of impressive chart positions. Now, she seems ready to achieve the unthinkable: global stardom. Earlier this year, her infectious hit “Hurtin’ Me” – featuring former XXL Freshman French Montana – ascended the Billboard charts, peaking at no. 7 and confirming her US fanbase; but could she truly become the first artist to crack the US? And, more importantly, why has it taken so long for UK rappers to achieve Stateside success?\"\n doc = nlp(test_text)\n print('Dependencies', [(t.text, t.dep_, t.head.text) for t in doc])\n sentence_spans = list(doc.sents)\n displacy.serve(sentence_spans, style='dep')\n\n # save model to output directory\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n doc = nlp2(test_text)\n print('Dependencies', [(t.text, t.dep_, t.head.text) for t in doc])", "def Create_truven(object):\n\tdef __init__(self, is_train = True, **config):\n\t\tprint(2)\n\t\tpass\n\n\t\t'''\n\t\tself.is_train = is_train\n\t\tfilename = config['train_file'] if is_train else config['test_file']\n\t\tbatch_size = config['batch_size']\n\t\tself.admis_dim = config['input_dim']\n\t\tself.max_length = config['max_length']\t\t\n\t\twith open(filename, 'r') as fin:\n\t\t\tlines = fin.readlines()\n\t\t\tf1 = lambda x:[int(i) for i in x.rstrip().split(';')[-1].split(' ')]\n\t\t\tself.label = list(map(f1, lines))\n\t\t\tf2 = lambda x:[[int(j) for j in i.split(' ')] for i in x.rstrip().split(config['separate_symbol_between_visit'])[:-1]]\n\t\t\tself.data_lst = list(map(self.line_to_visit_level, lines))\n\t\t\tadd = lambda x,y:x+y\n\t\t\tfrom functools import reduce\n\t\t\tf3 = lambda x:list(set(reduce(add,x)))\n\t\t\tself.data_decoder = list(map(f3, self.data_lst))\n\t\t\tdel lines\n\t\tself.batch_size = batch_size\n\t\tself.total_num = len(self.label)\n\t\tself.batch_num = int(np.ceil(self.total_num / self.batch_size))\n\t\tself.batch_id = 0 \n\t\tself.random_shuffle = np.arange(self.total_num) ### no shuffle at first epoch \n\t\t'''\n\t'''\n\tdef next(self):\n\t\tbgn = self.batch_id * self.batch_size\n\t\tendn = bgn + self.batch_size\n\t\tself.batch_id += 1\n\t\tif self.batch_id > self.batch_num - 1:\n\t\t\tself.batch_id = 0\n\t\treturn self.label[bgn:endn], self.data_lst[bgn:endn], self.data_decoder[bgn:endn]\n\t\t#data, label = self.data_lst[bgn:endn], self.label[bgn:endn]\n\t\t#return data, label\n\t'''", "def a_test_bbvi_mini_batch():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('BBVI',iterations=100, mini_batch=32)\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)", "def get_features(self, para, label_list, tokenizer, max_seq_length):\n\t\tlabel_map = {label : i for i, label in enumerate(label_list)}\n# self.reverse_label_map = {v: k for k, v in label_map.items()}\n\t\tguid = \"%s-%s\" % (\"test\", 1)\n\t\ttext_a = para[\"model_answer\"]\n\t\ttext_b = para[\"candidate_answer\"]\n\t\tlabel = label_list[0]\n\t\texample = InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)\n\t\t\n\t\ttokens_a = tokenizer.tokenize(example.text_a)\n\n\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\tif example.text_b:\n\t\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\t\t# Modifies `tokens_a` and `tokens_b` in place so that the total\n\t\t\t# length is less than the specified length.\n\t\t\t# Account for [CLS], [SEP], [SEP] with \"- 3\"\n\t\t\tself._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n\t\telse:\n\t\t\t# Account for [CLS] and [SEP] with \"- 2\"\n\t\t\tif len(tokens_a) > max_seq_length - 2:\n\t\t\t\ttokens_a = tokens_a[:(max_seq_length - 2)]\n\n\t\t# The convention in BERT is:\n\t\t# (a) For sequence pairs:\n\t\t# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n\t\t# (b) For single sequences:\n\t\t# tokens: [CLS] the dog is hairy . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0\n\t\t#\n\t\t# Where \"type_ids\" are used to indicate whether this is the first\n\t\t# sequence or the second sequence. The embedding vectors for `type=0` and\n\t\t# `type=1` were learned during pre-training and are added to the wordpiece\n\t\t# embedding vector (and position vector). This is not *strictly* necessary\n\t\t# since the [SEP] token unambigiously separates the sequences, but it makes\n\t\t# it easier for the model to learn the concept of sequences.\n\t\t#\n\t\t# For classification tasks, the first vector (corresponding to [CLS]) is\n\t\t# used as as the \"sentence vector\". Note that this only makes sense because\n\t\t# the entire model is fine-tuned.\n\t\ttokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n\t\tsegment_ids = [0] * len(tokens)\n\n\t\tif tokens_b:\n\t\t\ttokens += tokens_b + [\"[SEP]\"]\n\t\t\tsegment_ids += [1] * (len(tokens_b) + 1)\n\n\t\tinput_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n\t\t# The mask has 1 for real tokens and 0 for padding tokens. Only real\n\t\t# tokens are attended to.\n\t\tinput_mask = [1] * len(input_ids)\n\n\t\t# Zero-pad up to the sequence length.\n\t\tpadding = [0] * (max_seq_length - len(input_ids))\n\t\tinput_ids += padding\n\t\tinput_mask += padding\n\t\tsegment_ids += padding\n\n\t\tassert len(input_ids) == max_seq_length\n\t\tassert len(input_mask) == max_seq_length\n\t\tassert len(segment_ids) == max_seq_length\n\t\tlabel_id = label_map[example.label]\n# print(\"*** Example ***\")\n# print(\"guid: %s\" % (example.guid))\n# print(\"tokens: %s\" % \" \".join(\n# [str(x) for x in tokens]))\n# print(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n# print(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n# print(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\n\t\t\n\t\treturn InputFeatures(input_ids=input_ids,\n\t\t\t\t\t\t\t input_mask=input_mask,\n\t\t\t\t\t\t\t segment_ids=segment_ids,\n\t\t\t\t\t\t\t label_id=label_id)", "def train_SVM(data: np.array, labels: np.array)->None:\n print(\"SVM is not implemented yet!\")", "def smooth_tag_model(cls, heldout_data):\n\n # bi_transition_counts = defaultdict(int)\n n = 0 # count word-tags\n e = .0001 # stopping condition\n L = [.25, .25, .25, .25] # initialize lambdas uniformly\n i = 1 # iteration\n while True:\n # E Step (Step 1)\n # Iterate through all occurring trigrams\n # in the heldout.txt data (H), i.e. minimizing\n # log likelihood\n counts = [0, 0, 0, 0]\n ratio = [0, 0, 0, 0]\n nextL = 4 * [0] # next lambda\n\n for sent in heldout_data:\n\n # Handle beginning of sentence\n t = '<s>'\n u = '<s>'\n # bi_transition_counts[t, u] += 1\n\n # if first == '<s>' and second == '<s>': # add bigram for nulls\n\n for word, tag in sent:\n v = tag\n # tri_transitions[t, u, v] += 1\n # bi_transitions[u, v] += 1\n # # if first == '<s>' and second == '<s>': # add bigram for nulls\n # # tagger._bi_transitions[('<s>', '<s>')] += 1\n # uni_transitions[v] += 1\n # cls._emissions[word, tag] += 1\n # tags.add(v)\n # words.add(word)\n # uni_words[word] += 1\n\n\n # Calculate expected counts of lambdas\n ratio = cls.calc_tag_ratio(t, u, v, L)\n\n # M-step (Step 2)\n # Calculate expected counts of lambdas, i.e. weight, taking\n # into account the number of occurrences of each trigram (cnt)\n for j in range(len(L)):\n counts[j] += ratio[j] # weight of lambda in whole equation (count)\n\n t = u\n u = v\n # n += 1\n\n # Handle end of sentence\n # tri_transitions[t, u, '</s>'] += 1\n v = '</s>'\n ratio = cls.calc_tag_ratio(t, u, v, L)\n for j in range(len(L)):\n counts[j] += ratio[j] # weight of lambda in whole equation (count)\n\n # cls._bi_transitions[first, second] += 1\n # cls._tri_transitions[second, '</s>', '</s>'] += 1\n\n\n # Update values for parameters given current distribution\n for k in range(len(L)):\n total = np.sum(counts)\n nextL[k] = counts[k] / total # next lambda\n\n # Check if lambda values have converged\n converged = True\n for l in range(len(L)):\n if np.abs(nextL[l] - L[l]) > e: # tolerance = e\n converged = False\n L = nextL\n\n # Return values if lambdas have converged\n if converged:\n break\n\n i += 1 # increment iteration counter\n\n\n return L # copy lambdas passed by reference", "def feature_vecs_NLP(train_pos, train_neg, test_pos, test_neg):\n # English stopwords from nltk\n stopwords = set(nltk.corpus.stopwords.words('english'))\n \n # Determine a list of words that will be used as features. \n # This list should have the following properties:\n # (1) Contains no stop words\n # (2) Is in at least 1% of the positive texts or 1% of the negative texts\n # (3) Is in at least twice as many postive texts as negative texts, or vice-versa.\n # YOUR CODE HERE\n\n pos_unique_words = []\n neg_unique_words = []\n intermediate_vec = []\n feature_vec = []\n\n for line in train_pos:\n line = list(set(line))\n for word in line:\n if word not in stopwords:\n pos_unique_words.append(word)\n\n for line in train_neg:\n line = list(set(line))\n for word in line:\n if word not in stopwords:\n neg_unique_words.append(word)\n\n\n pos_word_dict = collections.Counter(pos_unique_words)\n neg_word_dict = collections.Counter(neg_unique_words)\n\n unique_words = list(set(pos_word_dict.keys()).intersection(set(neg_word_dict.keys())))\n\n for word in unique_words:\n if(pos_word_dict[word] >= 0.01*len(train_pos) or neg_word_dict[word] >= 0.01*len(train_neg)):\n intermediate_vec.append(word)\n\n for word in intermediate_vec:\n if (int(pos_word_dict[word]) >= 2*int(neg_word_dict[word])or neg_word_dict[word] >= 2*pos_word_dict[word]):\n feature_vec.append(word)\n\n train_pos_vec = []\n train_neg_vec = []\n test_pos_vec = []\n test_neg_vec = []\n # Using the above words as features, construct binary vectors for each text in the training and test set.\n # These should be python lists containing 0 and 1 integers.\n # YOUR CODE HERE\n for line in train_pos:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n train_pos_vec.append(lst)\n\n for line in train_neg:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n train_neg_vec.append(lst)\n\n for line in test_pos:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n test_pos_vec.append(lst)\n\n for line in test_neg:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n test_neg_vec.append(lst)\n\n # Return the four feature vectors\n return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec", "def train(self, tagged_sentences: Iterator[Tuple[TokenSeq, PosSeq]]) -> Tuple[NDArray, NDArray]:\n #add tokens\n for sentence in tagged_sentences:\n tokens, pos_tags = sentence\n for pos in pos_tags:\n self.pos_tags.append(pos)\n pos_tags.insert(0, \"<s>\")\n pos_tags.pop(len(pos_tags) - 1)\n for i in range(0, len(tokens)):\n temp_dict = {}\n temp_dict = add_features(tokens,pos_tags[i],i, temp_dict)\n self.features.append(temp_dict)\n #print(self.features)\n feature_matrix = self.vectorizer.fit_transform(self.features)\n label_vector = self.le.fit_transform(self.pos_tags)\n for i in range(0, len(label_vector)):\n self.l[self.pos_tags[i]] = i\n \n self.feature_matrix = feature_matrix\n self.label_vector = label_vector\n self.clf.fit(self.feature_matrix, self.label_vector)\n\n return (self.feature_matrix, label_vector)", "def naiveBayes(self, vector):\n \n # Find product of all Unigram and Bigram probabilities, which are already converted to log probabilities\n value = 0 \n for word1 in vector.words.iterkeys():\n unigram, unigramExists = self.getUnigram(word1)\n if unigramExists:\n value += self.words[word1].probability\n else:\n # Use add one smoothing for unseen events, P(unseen) = 1 / (numSeenInstances + sizeVocab) \n value += math.log(1 / (self.numTokens + len(self.words)), 2)\n for word2 in vector.words[word1].bigrams.iterkeys():\n bigram, bigramExists = self.getBigram(word1, word2) \n if bigramExists:\n value += self.words[word1].bigrams[word2].probability\n else:\n # Use add one smoothing for unseen events, P(unseen) = 1 / (numSeenInstances + sizeVocab)\n count = 0 \n if word1 in self.words:\n count = self.words[word1].count \n else:\n count = 1 \n value += math.log(1 / (count + len(self.words)), 2)\n \n return value", "def train(feat_file,model_dir,M,ivector_dim=None,num_gselect=None):\n if num_gselect==None or ivector_dim == None:\n k=int(np.log2(M))\n if num_gselect==None:\n num_gselect=k+1\n if ivector_dim==None:\n # Read to obtain the dimension of the feature vector\n for key,mat in kaldi_io.read_mat_scp(feat_file):\n feat_dim=mat.shape[1]\n break\n ivector_dim=k*feat_dim\n os.system(\"./train_ivector_models.sh \"+str(M) +\" \"+ str(ivector_dim) + \" \" + str(num_gselect) + \" \" + feat_file + \" \" + model_dir)\n return num_gselect", "def train_ori(self, model, edges, G, chunksize=150, iter=1):\n assert model.node_embedding.dtype == np.float32\n\n log.info(\"O1 training model with %i workers on %i vocabulary and %i features and 'negative sampling'=%s\" %\n (self.workers, len(model.vocab), model.layer1_size, self.negative))\n\n if not model.vocab:\n raise RuntimeError(\"you must first build vocabulary before training the model\")\n\n edges = RepeatCorpusNTimes(edges, iter)\n total_node = edges.corpus.shape[0] * edges.corpus.shape[1] * edges.n\n log.debug('total edges: %d' % total_node)\n start, next_report, node_count = time.time(), [5.0], [0]\n\n #int(sum(v.count * v.sample_probability for v in self.vocab.values()))\n jobs = Queue(maxsize=2*self.workers) # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(\n lock = threading.Lock()\n\n\n def worker_train():\n \"\"\"Train the model, lifting lists of paths from the jobs queue.\"\"\"\n py_work = np.zeros(model.layer1_size, dtype=np.float32)\n\n while True:\n job = jobs.get(block=True)\n if job is None: # data finished, exit\n jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n\n lr = max(self.min_lr, self.lr * (1 - 1.0 * node_count[0]/total_node))\n #lr = self.lr \n job_words = 0\n #out_i = 0\n for edge in job:\n if edge is not None:\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n neg_l = []\n #负样本node选取和主node不连通的点\n min_node0, min_conn_tup = sorted(model.connected_path[model.vocab_t[edge[0].index]].items(), key=lambda x:x[1][0])[0]\n min_conn0 = min_conn_tup[0]\n min_node1, min_conn_tup = sorted(model.connected_path[model.vocab_t[edge[1].index]].items(), key=lambda x:x[1][0])[0]\n min_conn1 = min_conn_tup[0]\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size - 1)]\n if (model.vocab_t[nodeidx] not in model.connected_path[model.vocab_t[edge[0].index]]\n or (model.connected_path[model.vocab_t[edge[0].index]][model.vocab_t[nodeidx]][0] <= max(0.1,min_conn0))) \\\n and (model.vocab_t[nodeidx] not in model.connected_path[model.vocab_t[edge[1].index]]\n or (model.connected_path[model.vocab_t[edge[1].index]][model.vocab_t[nodeidx]][1] <= max(0.1,min_conn1))):\n neg_l.append(nodeidx)\n if len(neg_l) == 0:\n neg_l.append(model.vocab[min_node0].index)\n neg_l.append(model.vocab[min_node1].index)\n neg_np = np.asarray(neg_l)\n if weight >= 0.0:\n #job_words += sum(train_o1(model.node_embedding, edge, weight, lr, self.negative, model.table,\n job_words += sum(train_o1(model.node_embedding, edge, lr, self.negative, neg_np,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n #for i in range(int(10 * weight)))\n #job_words = sum(train_o1(model.node_embedding, edge, lr, self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) for edge in job if edge is not None)\n jobs.task_done()\n lock.acquire(timeout=30)\n try:\n node_count[0] += job_words\n\n elapsed = time.time() - start\n if elapsed >= next_report[0]:\n log.info(\"PROGRESS: at %.2f%% \\tnode_computed %d\\talpha %.05f\\t %.0f nodes/s\" %\n (100.0 * node_count[0] / total_node, node_count[0], lr, node_count[0] / elapsed if elapsed else 0.0))\n next_report[0] = elapsed + 5.0 # don't flood the log, wait at least a second between progress reports\n finally:\n lock.release()\n \n workers = [threading.Thread(target=worker_train, name='thread_'+str(i)) for i in range(self.workers)]\n for thread in workers:\n thread.daemon = True # make interrupting the process with ctrl+c easier\n thread.start()\n\n # convert input strings to Vocab objects (eliding OOV/downsampled words), and start filling the jobs queue\n for job_no, job in enumerate(chunkize_serial(prepare_sentences(model, edges), chunksize)):\n jobs.put(job)\n\n for _ in range(self.workers):\n jobs.put(None) # give the workers heads up that they can finish -- no more work!\n\n for thread in workers:\n thread.join()\n\n elapsed = time.time() - start\n log.info(\"training on %i words took %.1fs, %.0f words/s\" %\n (node_count[0], elapsed, node_count[0]/ elapsed if elapsed else 0.0))", "def summarize(text, query, d2v_model, nn_model, limit = 250, remove_stop_words = True,with_txt_vect=False):\n if remove_stop_words : \n stopwords = stop_words()\n else :\n stopwords = []\n \n if with_txt_vect :\n text_prep = gensim.utils.simple_preprocess(text, deacc=True)\n text_vector = d2v_model.infer_vector(remove_stopwords(text_prep,stopwords))\n\n \n query_prep = gensim.utils.simple_preprocess(query, deacc=True)\n query_vector = d2v_model.infer_vector(remove_stopwords(query_prep,stopwords))\n \n summary = \"\"\n summary_vector = d2v_model.infer_vector([\"\"])\n summary_idx = []\n \n sentences = text.split('.')\n sentences = np.asarray(sentences)\n \n remaining_sentences = copy.copy(sentences)\n \n size = 0\n counter = 0\n while size < limit and len(remaining_sentences)>0 :\n counter = counter+1\n scores = []\n for sentence in remaining_sentences :\n sentence_prep = gensim.utils.simple_preprocess(sentence, deacc=True)\n sentence_vector = d2v_model.infer_vector(sentence_prep)\n if with_txt_vect :\n nn_input = np.hstack([query_vector, summary_vector, sentence_vector, text_vector])\n else:\n nn_input = np.hstack([query_vector, summary_vector, sentence_vector])\n nn_input = np.asarray([nn_input]) # weird but it is important to do it\n score = nn_model.predict(nn_input) \n scores.append(score)\n #print(scores)\n max_idx_rem = int(np.argmax(scores))\n idx_selected_sentence = np.arange(len(sentences))[sentences == remaining_sentences[max_idx_rem]]\n idx_selected_sentence = int(idx_selected_sentence[0])\n size += len(remaining_sentences[max_idx_rem].split())\n \n remaining_sentences = list(remaining_sentences)\n del remaining_sentences[max_idx_rem]\n bisect.insort_left(summary_idx,idx_selected_sentence)\n\n summary = \"\"\n\n for idx in summary_idx:\n summary = summary + \" \" + sentences[idx]\n\n summary_prep = gensim.utils.simple_preprocess(summary, deacc=True)\n summary_vector = d2v_model.infer_vector(summary_prep)\n\n return summary", "def viterbi_score(confusion_networks):\n for confusion_network in confusion_networks:\n prev, score = [-infinity] * len(confusion_network), [-infinity] + [0.0] * len(confusion_network)\n for t in range(0, len(confusion_network)): # t: words in the sentence (\"bfs\")\n prev, score = score, prev\n for j in range(0, len(confusion_network[t])): # Iterates deep-first in a CN position (\"dfs\")\n score[j] = max([prev[i] +\n confusion_network[i][j][2]\n for i in range(0, len(confusion_network[t]))])\n return max([score[i] for i in range(1, len(confusion_network[t]))])", "def main():\r\n x = [\r\n [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ], [ 0,0 ], [ 0,0 ],\r\n [ 0,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 0,0 ], [ 1,0 ],\r\n [ 1,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ]\r\n ]\r\n\r\n # Encoding of the correct classes for the training material\r\n y = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0]\r\n b = BinaryLogisticRegression(x, y)\r\n b.fit()\r\n b.print_result()", "def test():\n listpost,listclass = bayes.loaddataset()\n myvocablist = bayes.createlist(listpost)\n tmatrix = list()\n for doc in listpost:\n\t vec = bayes.word2vec(myvocablist,doc)\n\t tmatrix.append(vec)\n p0,p1,pa = bayes.train(tmatrix,listclass)\n testdoc1 = ['love','my','dalmation']\n testvec1 = bayes.word2vec(myvocablist,testdoc1)\n print testdoc1,'classify as :',bayes.classify(testvec1,p0,p1,pa)\n testdoc2 = ['stupid','love']\n testvec2 = bayes.word2vec(myvocablist,testdoc2)\n print testdoc2,'classify as :',bayes.classify(testvec2,p0,p1,pa)", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n y = []\r\n dp_scores = []\r\n back_pointer = []\r\n\r\n for i in xrange(N):\r\n dp_scores.append([])\r\n back_pointer.append([])\r\n for j in xrange(L):\r\n if (i == 0):\r\n score = start_scores[j] + emission_scores[0, j]\r\n back = -1\r\n else:\r\n max = dp_scores[i-1][0] + trans_scores[0, j]\r\n back = 0\r\n for k in xrange(L):\r\n if (dp_scores[i-1][k] + trans_scores[k, j] > max):\r\n max = dp_scores[i-1][k] + trans_scores[k, j]\r\n back = k\r\n score = max + emission_scores[i, j]\r\n dp_scores[i].append(score)\r\n back_pointer[i].append(back)\r\n\r\n s = dp_scores[N-1][0] + end_scores[0]\r\n back = 0\r\n for k in xrange(L):\r\n if (dp_scores[N-1][k] + end_scores[k] > s):\r\n s = dp_scores[N-1][k] + end_scores[k]\r\n back = k\r\n\r\n y.append(back)\r\n for i in range(N-1, 0, -1):\r\n y.append(back_pointer[i][back])\r\n back = back_pointer[i][back]\r\n y.reverse()\r\n\r\n return (s, y)", "def train_sentence_dbow(model, sentence, lbls, alpha, work=None, train_words=True, train_lbls=True):\n neg_labels = []\n if model.negative:\n # precompute negative labels\n neg_labels = zeros(model.negative + 1)\n neg_labels[0] = 1.0\n\n for label in lbls:\n if label is None:\n continue # OOV word in the input sentence => skip\n for word in sentence:\n if word is None:\n continue # OOV word in the input sentence => skip\n train_sg_pair(model, word, label, alpha, neg_labels, train_words, train_lbls)\n\n return len([word for word in sentence if word is not None])", "def __init__(self):\n self.bigramCounts = collections.defaultdict(lambda : 0)\n self.trigramCounts = collections.defaultdict(lambda : 0)\n self.unigramCounts = collections.defaultdict(lambda : 1)\n self.continuationCounts = collections.defaultdict(lambda: 0)\n self.followingCounts = collections.defaultdict(lambda: 0)\n self.total = 1\n self.totalBigramCounts = 0\n print \"Training Language Model...\"\n self.train(brown.sents())\n print \"--Training Complete--\"", "def train(self, instance_list):\n \"\"\"Observation probabilities b_t=c(o_t=x,q_t=y)/c(q_t=y)\n Transition probabilities a_t=c(q_t-1=i,q_t=j)/c(q_t-1=i)\n Based on the empirical counts from _collect_counts, I compute probabilities for each word being emitted in given state and for each state-to-state transition, including START->state.\n <UNK> is used to account for unseen features in the training set.\n \"\"\"\n # Get labels and final V (replacing rare words with <UNK>) for the training data\n self.get_labels(instance_list)\n self.get_rare_words(instance_list)\n self.get_V(instance_list)\n\n # Get maps of label and indices:\n for i in xrange(len(self.labels)):\n self.label2index[self.labels[i]] = i\n self.index2label.append(self.labels[i])\n\n # transition probabilities: matrix labels x labels\n self.transition_matrix = numpy.zeros((len(self.labels)+1,len(self.labels))) #a\n # observation probabilities: matrix of V x labels\n self.emission_matrix = numpy.zeros((len(self.V),len(self.labels))) #b\n self.transition_count_table = numpy.zeros((len(self.labels)+1,len(self.labels)))\n self.feature_count_table = numpy.zeros((len(self.V),len(self.labels)))\n self._collect_counts(instance_list)\n #TODO: estimate the parameters from the count tables\n for instance in instance_list:\n index = 0\n for t in instance.data:\n index = instance.data.index(t)\n if t in self.V:\n self.emission_matrix[self.V.index(t)][self.labels.index(instance.label[index])] = self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])]/self.feature_count_table[:,self.labels.index(instance.label[index])].sum()\n else:\n self.emission_matrix[self.V.index('<UNK>')][self.labels.index(instance.label[index])] = self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])]/self.feature_count_table[:,self.labels.index(instance.label[index])].sum()\n\n if index > 0:\n self.transition_matrix[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] = self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])]/self.transition_count_table[self.labels.index(instance.label[index-1]), :].sum()\n else:\n self.transition_matrix[len(self.labels)][self.labels.index(instance.label[index])] = self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])]/self.transition_count_table[len(self.labels), :].sum()" ]
[ "0.6409644", "0.6073446", "0.6022884", "0.5973923", "0.5945685", "0.58445257", "0.57420796", "0.5737962", "0.56900644", "0.56370634", "0.56175566", "0.5607194", "0.5601182", "0.5584242", "0.55672693", "0.5564353", "0.55216706", "0.5517806", "0.551739", "0.5480801", "0.5472328", "0.5467731", "0.54414564", "0.5438321", "0.5431898", "0.54246473", "0.5407684", "0.53868747", "0.53782654", "0.5361541", "0.5332205", "0.53277737", "0.53270775", "0.53246933", "0.53050387", "0.52937293", "0.52928746", "0.5292295", "0.5291893", "0.52772415", "0.52759665", "0.527462", "0.5269254", "0.52532506", "0.5250824", "0.5241305", "0.5241215", "0.52353317", "0.52321297", "0.5225839", "0.52253026", "0.5210127", "0.5198424", "0.5197703", "0.5188042", "0.5181151", "0.51793265", "0.51724905", "0.51631385", "0.51631385", "0.51628274", "0.5152925", "0.51508814", "0.51489097", "0.5146774", "0.5146658", "0.5140314", "0.5138428", "0.5138355", "0.5125642", "0.5123603", "0.5119982", "0.51104754", "0.510348", "0.5098086", "0.50964606", "0.50962466", "0.5092926", "0.5090401", "0.5085412", "0.5078379", "0.50759226", "0.5072088", "0.5071708", "0.50706613", "0.5063788", "0.5062735", "0.5043711", "0.5043361", "0.50432974", "0.50415677", "0.5034312", "0.5030327", "0.50225425", "0.502185", "0.5021763", "0.50171125", "0.5016878", "0.5009243", "0.5005493" ]
0.53388405
30
Run a single epoch
def eval_model(device, model, sampler, loss_compute, logit_modifier_fxn, token_sampler, print_every, max_len, user_items_df, max_name_len=15, ingr_map=None, base_save_dir='', pad_ingr=None, ppx_only=False, **tensor_kwargs): start = datetime.now() results_dicts = [] # Extract into tuples and list tensor_names, base_tensors = zip(*tensor_kwargs.items()) # Iterate through batches in the epoch model.eval() with torch.no_grad(): total_tokens = 0 total_name_tokens = 0 total_loss = 0.0 total_name_loss = 0.0 print_tokens = 0 for i, batch in enumerate(tqdm(sampler.epoch_batches(), total=sampler.n_batches), 1): batch_users, items = [t.to(device) for t in batch] # Fill out batch information batch_map = dict(zip( tensor_names, get_batch_information_general(items, *base_tensors) )) use_ingr_embedding = batch_map['ingr_tensor'].size(-1) != MAX_INGR * MAX_INGR_TOK user_prior_technique_masks = torch.stack([get_user_prior_techniques_mask( user_ix=uix.item(), item_ix=iix.item(), user_items_df=user_items_df, tech_mask_tensor=tensor_kwargs['tech_mask_tensor'], device=device, normalize=True ) for uix, iix in zip(batch_users, items)], dim=0) # Logistics this_batch_size = batch_map['steps_tensor'].size(0) this_batch_num_tokens = (batch_map['steps_tensor'] != PAD_INDEX).data.sum().item() this_batch_num_name_tokens = (batch_map['name_tensor'] != PAD_INDEX).data.sum().item() name_targets = batch_map['name_tensor'][:, :-1] ''' Teacher forcing - evaluate ''' # Comparing out(token[t-1]) to token[t] (log_probs, _), (name_log_probs, _) = model.forward( device=device, inputs=( batch_map['calorie_level_tensor'], batch_map['name_tensor'], batch_map['ingr_tensor'] ), ingr_masks=batch_map['ingr_mask_tensor'], user_prior_technique_masks=user_prior_technique_masks, targets=batch_map['steps_tensor'][:, :-1], max_len=max_len-1, start_token=START_INDEX, teacher_forcing=True, name_targets=name_targets, max_name_len=max_name_len-1, visualize=False ) loss, name_loss = loss_compute( log_probs, batch_map['steps_tensor'][:, 1:], name_outputs=name_log_probs, name_targets=name_targets, norm=this_batch_size, model=model, clip=None ) total_loss += loss total_name_loss += name_loss # Logging total_tokens += this_batch_num_tokens total_name_tokens += this_batch_num_name_tokens print_tokens += this_batch_num_tokens del log_probs, name_log_probs # Short-circuit if we only want to calculate test perplexity if ppx_only: if i % print_every == 0: elapsed = datetime.now() - start print("Epoch Step: {} LM Loss: {:.5f}; Name Loss: {:.5f}; Tok/s: {:.3f}".format( i, loss / this_batch_size, name_loss / this_batch_size, print_tokens / elapsed.seconds )) start = datetime.now() print_tokens = 0 continue ''' Non-teacher-forcing - Generate! ''' # Generates probabilities (log_probs, output_tokens, ingr_attns, prior_tech_attns), \ (name_log_probs, name_output_tokens) = model.forward( device=device, inputs=( batch_map['calorie_level_tensor'], batch_map['name_tensor'], batch_map['ingr_tensor'] ), ingr_masks=batch_map['ingr_mask_tensor'], user_prior_technique_masks=user_prior_technique_masks, targets=batch_map['steps_tensor'][:, :-1], max_len=max_len-1, start_token=START_INDEX, teacher_forcing=False, logit_modifier_fxn=logit_modifier_fxn, token_sampler=token_sampler, visualize=True, max_name_len=max_name_len-1, name_targets=name_targets, ) del log_probs, name_log_probs # Generated recipe calorie_levels, technique_strs, ingredient_strs, gold_strs, generated_strs, \ prior_items, recipe_reprs = get_batch_generated_recipes( batch_users=batch_users, batch_generated=output_tokens, max_ingr=MAX_INGR, max_ingr_tok=MAX_INGR_TOK, names_generated=name_output_tokens, ingr_map=ingr_map, user_items_df=user_items_df, **batch_map ) for ix in range(len(generated_strs)): # Create save location: test_i<item>_u<user> ii = items[ix].data.item() uu = batch_users[ix].data.item() sample_id = 'test_i{}_u{}'.format(ii, uu) trial_save_dir = os.path.join(base_save_dir, sample_id) if not os.path.exists(trial_save_dir): os.mkdir(trial_save_dir) # Output tokens for heatmap axes out_indices = output_tokens[ix].detach().cpu().numpy().tolist() out_tokens = decode_ids(out_indices) trunc_indices = out_indices[:out_indices.index(END_INDEX)] \ if END_INDEX in out_indices else out_indices output_len = len(trunc_indices) output_techniques = [t for t in TECHNIQUES_LIST if t in generated_strs[ix]] results_dicts.append({ 'u': uu, 'i': ii, 'generated': generated_strs[ix], 'n_tokens': output_len, 'generated_techniques': output_techniques, 'n_techniques': len(output_techniques) }) # Save output with open(os.path.join(trial_save_dir, 'output.txt'), 'w+', encoding='utf-8') as wf: wf.write(recipe_reprs[ix]) # Ingredient Attention ingr_attentions = np.matrix([ a.squeeze().detach().cpu().numpy().tolist() for a in ingr_attns[ix] ]).T ingr_attn_df = pd.DataFrame( ingr_attentions[:len(ingredient_strs[ix])], index=ingredient_strs[ix], columns=out_tokens ) ingr_attn_df = ingr_attn_df[ingr_attn_df.index != ''] ingr_attn_df.to_pickle( os.path.join(trial_save_dir, 'ingredient_attention.pkl') ) # Prior Technique Attention prior_tech_attention = np.matrix([ a.squeeze().detach().cpu().numpy().tolist() for a in prior_tech_attns[ix] ]).T prior_tech_attn_df = pd.DataFrame( prior_tech_attention, index=TECHNIQUES_LIST + ['PAD'], columns=out_tokens ) prior_tech_attn_df = prior_tech_attn_df[(prior_tech_attn_df.T != 0.0).any()] prior_tech_attn_df.to_pickle( os.path.join(trial_save_dir, 'prior_tech_attention.pkl') ) if i % print_every == 0: elapsed = datetime.now() - start print("Epoch Step: {} LM Loss: {:.5f}; Name Loss: {:.5f}; Tok/s: {:.3f}".format( i, loss / this_batch_size, name_loss / this_batch_size, print_tokens / elapsed.seconds )) print('SAMPLE DECODED RECIPE:\n\n{}\n\n'.format(recipe_reprs[0])) start = datetime.now() print_tokens = 0 # Reshuffle the sampler sampler.renew_indices() if total_name_tokens > 0: print('\nName Perplexity: {}'.format( np.exp(total_name_loss / float(total_name_tokens)) )) # Store perplexity ppx = np.exp(total_loss / float(total_tokens)) with open(os.path.join(base_save_dir, 'ppx.pkl'), 'wb') as wf: pickle.dump(ppx, wf) print('PERPLEXITY: {:.5f}'.format( ppx )) if not ppx_only: # Store recipe information -- generated string, # tokens (length), tech, # tech gen_df = pd.DataFrame(results_dicts)[[ 'u', 'i', 'generated', 'n_tokens', 'generated_techniques', 'n_techniques' ]] df_loc = os.path.join(base_save_dir, 'generated_df.pkl') gen_df.to_pickle(df_loc) print('Saved generation DF to {}'.format( df_loc )) print(gen_df.head(3))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_one_epoch(self):\n raise NotImplementedError", "def run_epoch( self ):\n # --- Init Epoch ----\n total_epoch_loss = 0.0\n epoch_batches = self.dataset.dataloader( self.config.neuron.epoch_length )\n progress_bar = qqdm(enumerate(epoch_batches), total=len(epoch_batches), desc=format_str('blue', f'Epoch Progress'))\n for iteration, (inputs) in progress_bar:\n\n # ---- Forward / Backward ----\n prev_mechanism_weights = self.mechanism_weights.tolist()\n output = self.train ( batch = { 'inputs': inputs } )\n next_mechanism_weights = self.mechanism_weights.tolist()\n total_epoch_loss += output.local_target_loss.item()\n\n # ---- Logs ----\n self.epoch_logs (\n progress_bar,\n iteration = iteration,\n output = output,\n prev_mechanism_weights = prev_mechanism_weights,\n next_mechanism_weights = next_mechanism_weights\n )\n self.global_step += 1\n\n self.epoch_loss = total_epoch_loss / self.config.neuron.epoch_length\n self.epoch += 1", "def train_epoch(self):\r\n for loader in self.loaders:\r\n if self.epoch % loader.epoch_interval == 0:\r\n self.cycle_dataset(loader)\r\n\r\n self._stats_new_epoch()\r\n self._write_tensorboard()\r\n print('{}th epoch train / eval done!'.format(self.epoch))", "def start_epoch(self):\n raise NotImplementedError", "def TrainEpoch(ss):\n ss.StopNow = False\n curEpc = ss.TrainEnv.Epoch.Cur\n while True:\n ss.TrainTrial()\n if ss.StopNow or ss.TrainEnv.Epoch.Cur != curEpc:\n break\n ss.Stopped()", "def run_epoch(model, data, optimizer, epoch):\n traindata, valdata = data\n\n model.train()\n train_bpd = epoch_iter(model, traindata, optimizer, epoch)\n\n model.eval()\n val_bpd = epoch_iter(model, valdata, optimizer, epoch)\n\n return train_bpd, val_bpd", "def run_epoch(model, data, optimizer):\n traindata, valdata = data\n\n model.train()\n train_elbo = epoch_iter(model, traindata, optimizer)\n\n model.eval()\n val_elbo = epoch_iter(model, valdata, optimizer)\n\n return train_elbo, val_elbo", "def run_epoch(model, data, optimizer):\n traindata, valdata = data\n\n model.train()\n train_elbo = epoch_iter(model, traindata, optimizer)\n\n model.eval()\n val_elbo = epoch_iter(model, valdata, optimizer)\n\n return train_elbo, val_elbo", "def run_epoch(self, session):\n start_time = time.time()\n costs = 0.0\n iters = 0\n\n fetches = {\n \"cost\": self._cost,\n \"final_state\": self._final_state,\n }\n\n if(self._is_training):\n fetches[\"train_op\"] = self._train_op\n\n state = session.run(self._initial_state)\n\n for step in range(self._input.epoch_size):\n feed_dict = {}\n for i, (c, h) in enumerate(self._initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n\n costs += cost\n iters += self._input.num_steps\n\n return np.exp(costs / iters)", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def set_epoch(self, epoch):\r\n pass", "def run_epoch(self, train, dev, epoch):\n # iterate over dataset\n for i, (words, labels) in enumerate(minibatches(train, self.config.batch_size)):\n fd, _ = self.get_feed_dict(words, labels, self.config.lr,\n self.config.dropout)\n\n _, train_loss= self.sess.run(\n [self.train_op, self.loss], feed_dict=fd)\n\n# =============================================================================\n# # tensorboard\n# if i % 10 == 0:\n# self.file_writer.add_summary(summary, epoch*nbatches + i)\n# =============================================================================\n\n metrics = self.run_evaluate(dev)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n print(msg)\n\n return metrics[\"f1\"]", "def epoch_start(self, epoch):\n self.epoch = epoch", "def run_one_epoch(self, dataset, phase, lr=None):\n epoch_loss = []\n epoch_predictions = []\n for x_input in dataset.get_batch_data():\n loss, prediction = self.model_wrapper.run_batch(x_input,\n lr,\n phase=phase)\n epoch_loss.append(loss)\n epoch_predictions.append(prediction)\n\n epoch_loss = np.array(epoch_loss)\n\n epoch_predictions = self.concat_element(epoch_predictions)\n\n if phase == RunnerPhase.PREDICT:\n epoch_predictions = dataset.get_last_inversed_pred(epoch_predictions)\n return epoch_loss, epoch_predictions\n else:\n epoch_predictions, epoch_labels = dataset.get_masked_inversed_pred_and_label(epoch_predictions)\n return epoch_loss, epoch_predictions, epoch_labels", "def run_epoch(self):\n self.model_lr_scheduler.step()\n\n print(\"Training\")\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = self.step < 2000\n late_phase = self.step % 2000 == 0\n\n if early_phase or late_phase:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1", "def step(self, epoch):\n\n self.train(epoch)\n self.test(epoch)", "def on_epoch_start(self):", "def on_epoch_begin(self, epoch, logs=None):", "def on_epoch_begin(self, epoch, logs=None):", "def run_epoch(session, model, verbose=False):\n start_time = time.time()\n costs = 0.0\n iters = 0\n state = session.run(model.initial_state)\n\n fetches = {\n \"cost\" : model.cost,\n \"final_state\": util.final_state_tuples(model.final_state, model.name),\n }\n\n for step in range(model.input.epoch_size):\n feed_dict = {}\n for i, (c, h) in enumerate(util.initial_state_tuples(model.initial_state, model.name)):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n\n costs += cost\n iters += model.input.num_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / model.input.epoch_size, np.exp(costs / iters),\n iters * model.input.batch_size * max(1, util.FLAGS.num_gpus) /\n (time.time() - start_time)))\n\n return np.exp(costs / iters)", "def train_one_epoch(self):\n\t\tself.model.train()\n\t\ttrain_loss = 0\n\n\t\tfor batch_idx, data in enumerate(self.data_loader.train_loader):\n\t\t\tInput = data[0].float().to(self.device)\n\t\t\tOutput = data[1].float().to(self.device)\n\n\t\t\tself.optimizer.zero_grad()\n\t\t\tloss = self.loss(self.model(Input)[:,0],Output)\n\t\t\ttrain_loss += loss.item()\n\t\t\tloss.backward()\n\t\t\tself.optimizer.step()\n\t\t\tself.current_iteration += 1\n\n\t\tself.summary_writer.add_scalar('training/loss', loss.item(), self.current_epoch)", "def run_epoch(session, model, eval_op=None, verbose=False):\n start_time = time.time()\n costs = 0.0\n iters = 0\n state = session.run(model.initial_state)\n\n fetches = {\n \"cost\": model.cost,\n \"final_state\": model.final_state,\n }\n if eval_op is not None:\n fetches[\"eval_op\"] = eval_op\n\n for step in range(model.input.epoch_size):\n feed_dict = {}\n for i, (c, h) in enumerate(model.initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n\n costs += cost\n iters += model.input.num_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / model.input.epoch_size, np.exp(costs / iters),\n iters * model.input.batch_size * max(1, FLAGS.num_gpus) /\n (time.time() - start_time)))\n\n return np.exp(costs / iters)", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def run_epoch(data, config, is_train, verbose=False):\n epoch_size = ((len(data) // config.batch_size) - 1) // config.num_steps\n start_time = time.time()\n costs = 0.0\n iters = 0\n for hidden_state in hidden_states:\n hidden_state.set_value(np.zeros_like(hidden_state.get_value()))\n for step, (x, y) in enumerate(data_iterator(data, config.batch_size, config.num_steps)):\n if is_train:\n noise_x = get_noise_x(x, config.drop_x)\n cost = train(x, y, noise_x)\n else:\n cost = evaluate(x, y)\n costs += cost\n iters += config.num_steps\n if verbose and step % (epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" % (step * 1.0 / epoch_size, np.exp(costs / iters),\n iters * config.batch_size / (time.time() - start_time)))\n return np.exp(costs / iters)", "def run_epoch(model, data):\n model.eval()\n state_dict = torch.load('saved_model.pt', map_location=\"cpu\")\n model.load_state_dict(state_dict)\n total_loss = np.zeros(model.seq_len)\n steps = 0\n # LOOP THROUGH MINI BATCHES\n for step, (x, y) in enumerate(ptb_iterator(data, model.batch_size, model.seq_len)):\n steps += 1\n if args.model != 'TRANSFORMER':\n hidden = model.init_hidden()\n hidden = hidden.to(device)\n\n if args.model == 'TRANSFORMER':\n batch = Batch(torch.from_numpy(x).long().to(device))\n model.zero_grad()\n outputs = model.forward(batch.data, batch.mask).transpose(1, 0)\n # print (\"outputs.shape\", outputs.shape)\n else:\n inputs = torch.from_numpy(x.astype(np.int64)).transpose(0, 1).contiguous().to(device)#.cuda()\n model.zero_grad()\n hidden = repackage_hidden(hidden)\n outputs, hidden = model(inputs, hidden)\n\n targets = torch.from_numpy(y.astype(np.int64)).transpose(0, 1).contiguous().to(device)#.cuda()\n total_loss += np.array([loss_fn(outputs[i], targets[i]).item() for i in range(len(outputs))])\n\n total_loss /= float(steps)\n print(total_loss)", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def run_epoch(data_iter, model, loss_compute, ctx):\r\n start = time.time()\r\n total_tokens = 0\r\n total_loss = 0\r\n tokens = 0\r\n for i, batch in enumerate(data_iter):\r\n out = model(batch.src, batch.trg, batch.src_mask, batch.trg_mask)\r\n loss = loss_compute(out, batch.trg_y, batch.ntokens)\r\n total_loss += loss\r\n total_tokens += batch.ntokens\r\n tokens += batch.ntokens\r\n if i % 50 == 1:\r\n elapsed = time.time() - start\r\n ctx.logger.info(\"Epoch Step: %d Loss: %f Tokens per Sec: %f\",\r\n i, loss / batch.ntokens, tokens / elapsed)\r\n start = time.time()\r\n tokens = 0\r\n return total_loss / total_tokens", "def _test_epoch(self):\n raise NotImplementedError", "def set_epoch(self, epoch):\n self.epoch = epoch", "def train_epoch(self):\n for it in range(self.iter_per_epoch):\n # Get batch\n xs, _ = self.mnist.train.next_batch(100)\n _, loss, summary = self.sess.run([self.train_op, self.loss, self.summary_op],\n {self.x: xs})\n self.summary_writer.add_summary(summary, it)\n if it % 1000 == 0:\n print('Iteration {}\\t loss: {}'.format(it, loss))", "def run_epoch(data_iter,\n model,\n loss_compute,\n print_every: int = 50):\n start = time.time()\n total_tokens = 0\n total_loss = 0\n print_tokens = 0\n\n for i, batch in enumerate(data_iter, 1):\n out, _, pre_output = model.forward(batch.src,\n batch.trg,\n batch.src_mask,\n batch.trg_mask,\n batch.src_lengths,\n batch.trg_lengths)\n loss = loss_compute(pre_output, batch.trg_y, batch.nseqs)\n total_loss += loss\n total_tokens += batch.ntokens\n print_tokens += batch.ntokens\n\n if model.training and i % print_every == 0:\n elapsed = time.time() - start\n\n print(f\"Epoch Step: {i} Loss: {loss/batch.nseqs}: Tokens per Sec: \"\n f\"{print_tokens/elapsed}\")\n start = time.time()\n print_tokens = 0\n\n return math.exp(total_loss / float(total_tokens))", "def epoch(args):\n p = OptionParser(__doc__)\n p.parse_args(args)\n\n fig = plt.figure(1, (6, 4))\n root = fig.add_axes([0, 0, 1, 1])\n\n # Separators\n linestyle = dict(lw=2, color=\"b\", alpha=0.2, zorder=2)\n root.plot((0, 1), (0.5, 0.5), \"--\", **linestyle)\n for i in (1.0 / 3, 2.0 / 3):\n root.plot((i, i), (0.5, 1), \"--\", **linestyle)\n for i in (1.0 / 6, 3.0 / 6, 5.0 / 6):\n root.plot((i, i), (0, 0.5), \"--\", **linestyle)\n\n # Diagrams\n plot_diagram(root, 1.0 / 6, 3.0 / 4, \"S\", \"syntenic\")\n plot_diagram(root, 3.0 / 6, 3.0 / 4, \"F\", \"missing, with both flankers\")\n plot_diagram(root, 5.0 / 6, 3.0 / 4, \"G\", \"missing, with one flanker\")\n plot_diagram(root, 2.0 / 6, 1.0 / 4, \"FB\", \"has non-coding matches\")\n plot_diagram(root, 4.0 / 6, 1.0 / 4, \"FN\", \"syntenic region has gap\")\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n figname = fname() + \".pdf\"\n savefig(figname, dpi=300)", "def epoch():\n\treturn time.time()", "def run_epoch(self, sample_batch):\n start = time.time()\n loss = super().run_epoch(sample_batch)\n end = time.time()\n\n duration = end - start\n\n self.iteration += 1\n\n self.log_loss(loss, duration, self.iteration)\n\n return loss", "def run_one_epoch(\n self,\n epoch: int,\n extra_log_info: List[Tuple[str, float, Callable[[float], str]]] = None,\n ) -> None:\n self.lr_scheduler(self.optimizer, epoch)\n\n \n # train\n train_loss, train_stat = self.train_one_epoch()\n\n # test\n test_loss, test_stat = self.test_one_epoch()\n \n\n\n # save all params that showed the best acc\n\n test_acc = test_stat[\"model_acc\"]\n if test_acc > self.best_acc:\n self.best_acc = test_acc\n filename = str(epoch) + \"_\" + f\"{self.best_acc:.2f}\".replace(\".\", \"_\")\n self.save_params(self.model_save_dir, filename, epoch)\n \n # log\n if not extra_log_info:\n extra_log_info = []\n lr = self.optimizer.param_groups[0][\"lr\"]\n log_info: List[Tuple[str, float, Callable[[float], str]]] = []\n log_info.append((\"train/lr\", lr, default_format))\n log_info.append((\"train/loss\", train_loss, default_format))\n log_info += [(\"train/\" + k, v, percent_format) for k, v in train_stat.items()]\n log_info.append((\"test/loss\", test_loss, default_format))\n log_info += [(\"test/\" + k, v, percent_format) for k, v in test_stat.items()]\n log_info.append((\"test/best_acc\", self.best_acc, percent_format))\n self.log_one_epoch(epoch, log_info + extra_log_info)", "def _run_epoch(sess, model, args, data, index=0, tb_summaries=None,\n id_to_word=None, train_op=None, verbose=False):\n epoch_start_time = time.time()\n # total cost and number of words evaluated in this epoch\n costs, total_words = 0.0, 0.0\n # epoch size is number of batches in each epoch\n epoch_size = (len(data[index]) - 1) // model.config['batch_size']\n state = sess.run(model.initial_state)\n\n # iterate through batches\n for step, (x, y) in enumerate(data_reader.batch_iterator(\n data[index], model.config['batch_size'])):\n # return these parameters after running TF session\n fetches = {\n 'cost': model.cost[index],\n 'final_state': model.final_state,\n 'seq_len': model.seq_len\n }\n # only train model has optimizer operation\n if train_op is not None:\n fetches['train_op'] = train_op[index]\n\n # create dict to feed input, targets, and rnn into TF session\n feed_dict = utils.create_feed_dict(model, args, x, y, state)\n # run all parameters in fetches dict\n vals = sess.run(fetches, feed_dict)\n\n costs += vals['cost']\n # number of words evaluated\n total_words += np.sum(vals['seq_len'])\n # use perplexity to evaluate language models\n perplexity = np.exp(costs / total_words)\n\n if verbose and step % (epoch_size // 2) == 1:\n # display perplexity and top word predictions for sequence\n _display_epoch_metrics(step, epoch_size, perplexity, total_words,\n epoch_start_time, args, model, sess,\n index, feed_dict, vals, id_to_word, y)\n\n # generate sample text while training to monitor progress\n if args.display_text == 'True' and model.name == 'Train':\n generate.generate_text(sess, model, id_to_word, train_ind=index)\n\n # write TensorBoard summaries for Train/Valid\n if args.save_path != '' and model.name != 'Test':\n summary = sess.run(tb_summaries.summary_op,\n {tb_summaries.ppl_summary: perplexity})\n model.file_writer.add_summary(summary, get_or_create_global_step().eval())\n\n return perplexity", "def _run_epoch(self, train_loader, valid_loader, threshold):\n # set model in train mode and run a train pass\n self.net.train()\n train_loss, train_metric = self._train_epoch(train_loader, threshold)\n\n # set model in eval mode and validate epoch\n self.net.eval()\n val_loss, val_metric = self._validate_epoch(valid_loader, threshold)\n self.epoch_counter += 1\n\n print(\"Epoch: {}\".format(self.epoch_counter))\n print(\"LOSS - Training : [{}], Validation : [{}]\".format(round(train_loss, 4), round(val_loss, 4)))\n print(\"METRIC - Training : [{}], Validation : [{}]\".format(round(train_metric, 4), round(val_metric, 4)))\n return val_loss, val_metric", "def run_epoch(self, sample_batch):\n start = time.time()\n loss = super().run_epoch(sample_batch)\n end = time.time()\n duration = end - start\n\n self.iteration += 1\n\n self.logger.info(\"Iteration: {}, Loss: {}, Time: {}\".format(self.iteration,\n loss,\n duration))\n\n return loss", "def train_an_epoch(self, sampler, epoch_id):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0\n for batch_id in range(self.num_batch):\n (\n u,\n seq,\n time_seq,\n time_matrix,\n pos,\n neg,\n ) = sampler.next_batch() # tuples to ndarray\n batch_data = (\n np.array(u),\n np.array(seq),\n np.array(time_seq),\n np.array(time_matrix),\n np.array(pos),\n np.array(neg),\n )\n loss = self.train_single_batch(batch_data)\n # print(\n # \"loss in epoch {} iteration {}: {}\".format(epoch, step, loss.item())\n # ) # expected 0.4~0.6 after init few epochs\n total_loss += loss\n print(\"[Training Epoch {}], Loss {}\".format(epoch_id, total_loss))\n self.writer.add_scalar(\"model/loss\", total_loss, epoch_id)", "def run_epoch(self, train, dev, epoch):\n # progbar stuff for logging\n batch_size = self.config.batch_size\n nbatches = (len(train) + batch_size - 1) // batch_size\n prog = Progbar(target=nbatches)\n\n # iterate over dataset\n for i, (words, labels) in enumerate(minibatches(train, batch_size)):\n fd, _ = self.get_feed_dict(words, labels, self.config.lr,\n self.config.dropout)\n\n _, train_loss, summary = self.sess.run(\n [self.train_op, self.loss, self.merged], feed_dict=fd)\n\n prog.update(i + 1, [(\"train loss\", train_loss)])\n\n # tensorboard\n if i % 10 == 0:\n self.file_writer.add_summary(summary, epoch*nbatches + i)\n\n metrics = self.run_evaluate(dev)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n self.logger.info(msg)\n\n return metrics[\"f1\"]", "def train_epoch(self, data_loader):\n raise NotImplementedError", "def __call__(self, initial_lr, step, epoch):\n\n pass", "def run_epoch(session, model, dataset,\n keep_prob=1.0, passes=1.0, verbose=False):\n num_batches = dataset.num_batches\n start_time = time.time()\n train_cost = train_accy = valid_cost = valid_accy = 0.0\n train_evals = valid_evals = 0.0\n dot_count = 0\n total_steps = int(passes*num_batches)\n prog_int = total_steps/100 # progress interval for stdout\n\n if not num_batches > 0:\n raise RuntimeError(\"batch_size*num_unrollings is larger \"\n \"than the training set size.\")\n\n dataset.rewind() # make sure we start a beggining\n\n print(\"batches: %d \"%num_batches,end=' ')\n\n for step in range(total_steps):\n batch = dataset.next_batch()\n\n (tcost, taccy, tevals,\n vcost, vaccy, vevals) = model.train_step(session, batch,\n keep_prob=keep_prob)\n\n train_cost += tcost\n train_accy += taccy\n train_evals += tevals\n valid_cost += vcost\n valid_accy += vaccy\n valid_evals += vevals\n\n if ( verbose and ((prog_int<=1) or\n (step % (int(prog_int)+1)) == 0) ):\n dot_count += 1\n print('.',end='')\n sys.stdout.flush()\n\n if verbose:\n print(\".\"*(100-dot_count),end='')\n print(\" passes: %.2f train iters: %d valid iters: %d \"\n \"speed: %.0f seconds\" % (passes,\n train_evals,\n valid_evals,\n (time.time() - start_time)) )\n sys.stdout.flush()\n\n return (train_cost/train_evals,\n 1.0 - train_accy/train_evals,\n valid_cost/valid_evals,\n 1.0 - valid_accy/valid_evals)", "def eval_one_epoch(sess, val_model, run_metadata):\n val_loss = 0\n data_len = 0\n while True:\n try:\n if NNET_PARAM.time_line:\n loss, current_batchsize = sess.run(\n [val_model.loss, val_model.batch_size],\n options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),\n run_metadata=run_metadata)\n else:\n loss, current_batchsize = sess.run(\n [val_model.loss, val_model.batch_size])\n val_loss += loss\n data_len += current_batchsize\n except tf.errors.OutOfRangeError:\n break\n val_loss /= data_len\n return val_loss", "def run_epoch_test(session, model, verbose=False):\n # fetches = {\"ms\": model.dynamic_eval.global_ms()}\n # vals = session.run(fetches)\n # ms = vals[\"ms\"]\n # s = np.sum(np.sqrt([x for x in ms]))\n # print(s)\n\n\n\n start_time = time.time()\n losses = 0.0\n iters = 0\n\n # zeros initial state for all devices\n state = []\n for k in range(model.gpu_num):\n state.append(session.run(model.initial_state(k)))\n\n # evaluate loss and final state for all devices\n fetches = {\"loss\": model.loss}\n\n if config.dynamic_eval:\n fetches[\"update_op\"] = model.dynamic_eval.update_op()\n\n\n for k in range(model.gpu_num):\n fetches[\"final_state%d\" % k] = model.final_state(k)\n\n for step in range(model.input.epoch_size):\n # pass states between time batches\n feed_dict = {}\n for i in range(model.gpu_num):\n gpu_state = model.initial_state(i)\n for j, (c, h) in enumerate(gpu_state):\n feed_dict[c] = state[i][j].c\n feed_dict[h] = state[i][j].h\n\n vals = session.run(fetches, feed_dict)\n\n loss = vals[\"loss\"]\n\n for k in range(model.gpu_num):\n state[k] = vals[\"final_state%d\" % k]\n\n losses += loss\n iters += model.input.time_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 0:\n print(\"%.3f perplexity: %.3f bits: %.3f speed: %.0f wps\" %\n (step * 1.0 / model.input.epoch_size, np.exp(losses / iters), np.log2(np.exp(losses / iters)),\n iters * model.input.batch_size / (time.time() - start_time)))\n\n return np.exp(losses / iters)", "def set_train_epoch(self, epoch: int):\n self._train_epoch = epoch", "def train_one_epoch(sess, tr_model, i_epoch, run_metadata):\n tr_loss, i = 0, 0\n stime = time.time()\n while True:\n try:\n if NNET_PARAM.time_line:\n _, loss, current_batchsize = sess.run(\n [tr_model.train_op, tr_model.loss, tr_model.batch_size],\n options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),\n run_metadata=run_metadata)\n else:\n _, loss, current_batchsize = sess.run(\n [tr_model.train_op, tr_model.loss, tr_model.batch_size])\n tr_loss += loss\n if (i+1) % NNET_PARAM.minibatch_size == 0:\n if NNET_PARAM.time_line and NNET_PARAM.timeline_type == 'minibatch':\n tl = timeline.Timeline(run_metadata.step_stats)\n ctf = tl.generate_chrome_trace_format()\n with open('_timeline/%03dtimeline%04d.json' % (i_epoch, i+1), 'w') as f:\n f.write(ctf)\n lr = sess.run(tr_model.lr)\n costtime = time.time()-stime\n stime = time.time()\n print(\"MINIBATCH %05d: TRAIN AVG.LOSS %04.6f, \"\n \"(learning rate %02.6f)\" % (\n i + 1, tr_loss / (i*NNET_PARAM.batch_size+current_batchsize), lr), 'cost time: %06dS' % costtime)\n sys.stdout.flush()\n i += 1\n except tf.errors.OutOfRangeError:\n break\n tr_loss /= ((i-1)*NNET_PARAM.batch_size+current_batchsize)\n return tr_loss", "def run_epoch(data_iter, model, loss_compute, print_every=50, num_batches=0, phase=\"train\", epoch_num=0):\n\n start = time.time()\n total_tokens = 0\n total_loss = 0\n print_tokens = 0\n\n for i, batch in tqdm(enumerate(data_iter, 1)):\n out, _, pre_output = model.forward(batch.src, batch.trg,\n batch.src_mask, batch.trg_mask,\n batch.src_lengths, batch.trg_lengths)\n loss = loss_compute(pre_output, batch.trg_y, batch.nseqs)\n #print(f'epoch loss {loss}')\n total_loss += loss\n total_tokens += batch.ntokens\n print_tokens += batch.ntokens\n \n if model.training and i % print_every == 0:\n elapsed = time.time() - start\n print(\"Epoch Step: %d Loss: %f Tokens per Sec: %f\" %\n (i, loss / batch.nseqs, print_tokens / elapsed))\n start = time.time()\n print_tokens = 0\n\n if num_batches > 0 and i > num_batches:\n break\n\n return math.exp(total_loss / float(total_tokens)), total_loss\n #return total_loss / float(total_tokens)", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def run_epoch(data, session, model, train_op=None, verbose=False):\n start_time = time.time()\n costs = 0.0\n num_steps = model['num_steps']\n num_batches = (data.shape[1] - 1) // num_steps\n\n # initialize RNN cell states to be all zero\n state = session.run(model['initial_state'])\n\n fetches = {\n \"cost\": model['cost'],\n \"final_state\": model['final_state'],\n }\n\n # train model\n if train_op is not None:\n fetches[\"train_op\"] = train_op\n\n for batch in range(num_batches):\n feed_dict = {\n model['user_inputs']: data[:, batch * num_steps: (batch + 1) * num_steps],\n model['targets']: data[:, batch * num_steps + 1: (batch + 1) * num_steps + 1],\n }\n for i, (c, h) in enumerate(model['initial_state']):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n costs += cost\n\n if verbose and batch % (num_batches // 10) == 10:\n iters = num_steps * (batch + 1)\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (batch * 1.0 / num_batches, np.exp(costs / iters),\n iters * data.shape[0] * 1 /\n (time.time() - start_time)))\n\n return np.exp(costs / (data.shape[1] - 1))", "def _train_epoch(self, model, tqdm_data,\n optimizer_disc=None, optimizer_gen=None):", "def update_epoch(self):\n raise NotImplementedError", "def epoch_begin(self, model):\n pass", "def train_an_epoch(self, train_loader, epoch_id):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0.0\n\n for batch_data in train_loader:\n loss = self.train_single_batch(batch_data)\n total_loss += loss\n print(\"[Training Epoch {}], Loss {}\".format(epoch_id, loss))\n self.writer.add_scalar(\"model/loss\", total_loss, epoch_id)", "def run_epoch(self, sess, input_data, verbose=None):\n data_len = len(input_data)\n total_steps =data_len // self.config.batch_size\n total_loss = []\n for step, (ret_batch, ret_label, sent_num_enc, sent_num_dec, sent_len) in enumerate(\n helper.data_iter(input_data, self.config.batch_size, self.vocab, \n noize_list=self.train_data_flatten_list, noize_num=noize_num)):\n feed_dict = self.create_feed_dict(ret_batch, sent_len, sent_num_enc, ret_label, sent_num_dec)\n \n _, loss, lr = sess.run([self.train_op, self.loss, self.learning_rate], feed_dict=feed_dict)\n total_loss.append(loss)\n if verbose and step % verbose == 0:\n sys.stdout.write('\\r{} / {} : loss = {}, lr = {}'.format(\n step, total_steps, np.mean(total_loss[-verbose:]), lr))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n avg_loss = np.mean(total_loss)\n return avg_loss", "def train_epoch(self):\n\n if self._train_data_set is not None and self._train_data_set is not None:\n self._model.fit_num_epochs(self._train_data_set, self._test_data_set)\n else:\n raise RuntimeError(\"[Triggerbot]: No training or test set available\")", "def on_epoch_start(self):\n self.current_epoch += 1\n self.current_lr = self.fn(self.current_epoch)\n self.model.set_learning_rate(self.current_lr)\n self.epochs += [self.current_epoch]\n self.learning_rate += [self.current_lr]", "def run_epoch(session, model, char_data, tag_data, dict_data, len_data, eval_op, batch_size, verbose=False):\n start_time = time.time()\n losses = 0.0\n iters = 0.0\n\n char_data, tag_data, dict_data, len_data = reader.shuffle(char_data, tag_data, dict_data, len_data)\n xArray, yArray, dArray, lArray = reader.iterator(char_data, tag_data, dict_data, len_data, batch_size)\n\n for x, y, d, l in zip(xArray, yArray, dArray, lArray):\n fetches = [model.loss, model.logits, eval_op]\n feed_dict = {}\n feed_dict[model.input_data] = x\n feed_dict[model.targets] = y\n feed_dict[model.dicts] = d\n feed_dict[model.seq_len] = l\n loss, logits, _ = session.run(fetches, feed_dict)\n losses += loss\n iters += 1\n\n if verbose and iters % 50 == 0:\n print(\"%.3f perplexity: %.3f\" %\n (iters / float(len(xArray)), np.exp(losses / iters / len(xArray))))\n\n return np.exp(losses / iters)", "def train_epoch(self, train=False):\n # init params\n config = self.config\n writer = self.writer\n train_params = self.get_train_params()\n args = self.args\n # net, net_SP = self.net, self.net_SP\n optimizer, optimizer_SP = self.optimizer, self.optimizer_SP\n\n lr = self.get_learning_rate()\n logging.info(f\"current learning rate: {lr}\")\n\n running_losses = []\n self.save_lists = [\n \"err_q\",\n \"err_t\",\n \"epi_dists\",\n \"relative_poses_cam\",\n \"relative_poses_body\",\n ]\n dict_of_lists_in_train = init_dict_of_lists(config, self.save_lists)\n dict_of_lists_in_val = init_dict_of_lists(config, self.save_lists)\n if_val_in_train_trigger = False\n\n thd_corr = 300\n writer.add_scalar(\"training-lr\", lr, self.n_iter)\n\n # Train one epoch\n for i, sample_train in tqdm(enumerate(self.train_loader)):\n # if training\n if train:\n # eval in training script\n if (\n self.n_iter != 0\n and self.n_iter % config[\"training\"][\"val_interval_in_train\"] == 0\n ):\n if_val_in_train_trigger = True\n if if_val_in_train_trigger:\n logging.info(\n \"+++[Train]+++ Collecting training batch for %s at train step %d\"\n % (args.exper_name, self.n_iter)\n )\n self.net.eval()\n else:\n self.net.train()\n\n # train one batch\n (\n loss_train_out,\n dict_of_lists_in_train,\n clamp_cum,\n ) = self.train_val_batch(\n train_params,\n sample_train,\n True,\n if_val=if_val_in_train_trigger,\n dict_of_lists=dict_of_lists_in_train,\n )\n\n if if_val_in_train_trigger:\n if (\n dict_of_lists_in_train[\"count\"]\n > config[\"training\"][\"val_batches\"]\n ):\n dict_of_lists_in_train = self.flush_dict_of_lists(\n writer, \"training\", self.n_iter, **dict_of_lists_in_train\n )\n if_val_in_train_trigger = False\n else:\n # running_losses.append(loss_train_out)\n print(self.n_iter, \"%.8f\" % loss_train_out)\n self.n_iter += 1\n\n # if testing\n if args.eval and self.n_iter % config[\"training\"][\"val_interval\"] == 0:\n logging.info(\n \"+++[Val]+++ Validating %s at train step %d\"\n % (args.exper_name, self.n_iter)\n )\n self.net.eval()\n assert self.net.training == False\n for j, sample_val in tqdm(enumerate(self.val_loader)):\n # if not self.check_num_of_matches(sample, thd=thd_corr): continue\n logging.info(\"+++[Val]+++ Validating batch %d\" % (j))\n # logging.info(f\"frame_id: {sample_val['frame_ids']}\")\n loss_val_out, dict_of_lists_in_val, _ = self.train_val_batch(\n train_params, sample_val,\n False, if_val=True, dict_of_lists=dict_of_lists_in_val,\n ) ##### check: in order to align val and training\n self.n_iter_val += 1\n if config[\"training\"][\"val_batches\"] != -1 and (\n j > config[\"training\"][\"val_batches\"]\n ): ##### check: how to limit the validation\n break\n print(dict_of_lists_in_val.keys())\n\n ## save valdiation result (dict)\n if len(config[\"exps\"][\"filename\"]) > 3:\n # print(f\"dict_of_lists_in_val: {dict_of_lists_in_val}\")\n def get_dict(key_layer1, key_layer2, dict_of_lists):\n dict_of_array = {}\n for k in key_layer1:\n dict_of_array[k] = np.stack(dict_of_lists[k][key_layer2])\n return dict_of_array\n\n our_name, base_name = (\n config[\"exps\"][\"our_name\"],\n config[\"exps\"][\"base_name\"],\n )\n\n print(f'save dict_of_lists_in_val to {config[\"exps\"][\"filename\"]}')\n # save our results\n dict_of_lists = get_dict(\n self.save_lists, our_name, dict_of_lists_in_val\n )\n dict_of_lists[\"epi_dists\"] = dict_of_lists[\"epi_dists\"][:, :10] ### only take part of it\n np.savez(\n f'{self.save_path[:-11]}/{our_name}_{config[\"exps\"][\"filename\"]}',\n **dict_of_lists,\n )\n # save base_name\n dict_of_lists = get_dict(\n self.save_lists, base_name, dict_of_lists_in_val\n )\n dict_of_lists[\"epi_dists\"] = dict_of_lists[\"epi_dists\"][:, :10] ### only take part of it\n np.savez(\n f'{self.save_path[:-11]}/{base_name}_{config[\"exps\"][\"filename\"]}',\n **dict_of_lists,\n )\n # output then flush\n dict_of_lists_in_val = self.flush_dict_of_lists(\n writer, \"validating\", self.n_iter, **dict_of_lists_in_val\n )\n\n # epoch_loss = np.mean(np.asarray(running_losses))\n\n # training iterations\n self.epoch += 1\n if self.n_iter > config[\"training\"][\"train_iter\"]:\n break\n return 0.0, self.clamp_cum, self.n_iter, self.n_iter_val", "def run_epoch(epoch, lr, best_val_ppl):\n epoch_start_time = time.time()\n train(model, corpus.train, epoch=epoch, lr=lr, weight_decay=args.weight_decay)\n val_ppl = evaluate(model, corpus.valid)\n logger.warning(\n '| end of epoch {:3d} | time: {:5.2f}s |'\n 'valid ppl {:8.2f}'.format(\n epoch,\n (time.time() - epoch_start_time),\n val_ppl)\n )\n torch.save(model, model_path + '.epoch_{}'.format(epoch))\n # Save the model if the validation loss is the best we've seen so far.\n if not best_val_ppl or val_ppl < best_val_ppl:\n torch.save(model, model_path)\n best_val_ppl = val_ppl\n else:\n # Anneal the learning rate if no improvement has been seen in the\n # validation dataset.\n lr /= args.lr_decay\n return lr, best_val_ppl", "def run_epoch(session, model, input_, is_training, global_step, summary_writer, config, epoch_rem=0, COUNT=0):\n loss_acum = 0.0\n acc_acum = 0.0\n if FLAGS.only_val:\n matrices = np.zeros([config.num_actions, config.num_actions], dtype=np.int32)\n n_mistakes = np.zeros([1, config.num_actions], dtype=np.int32)\n mistake_labs = np.empty([0, 4], dtype=np.int32)\n\n fetches = {\n \"loss\": model.loss,\n # \"final_state\": model.final_state,\n \"accuracy\": model.accuracy\n }\n if is_training:\n fetches.update({\n \"train_op\": model.train_op,\n \"global_step\": global_step,\n \"learning_rate\": model.lr\n })\n if FLAGS.only_val:\n fetches.update({\n \"confusion_matrix\": model.confusion_matrix,\n \"mistakes_per_class\": model.mistakes_per_class,\n \"mistake_labs\": model.mistake_labs,\n \"logits\": model.logits,\n \"labels\": model.labels,\n \"idxs\": model.idxs\n })\n of = open('/tmp/%02d.txt' % COUNT, 'w')\n\n epoch_size = epoch_rem if epoch_rem > 0 else (input_.train_epoch_size if is_training else input_.val_epoch_size)\n t = trange(epoch_size, disable=not FLAGS.verbose, dynamic_ncols=True)\n for batch in t:\n options = None\n run_metadata = None\n if is_training and (batch % epoch_size == epoch_size - 1) and FLAGS.run_metadata: # Record execution stats\n options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n\n vals = session.run(fetches=fetches, options=options, run_metadata=run_metadata)\n loss = vals[\"loss\"]\n # state = vals[\"final_state\"]\n accuracy = vals[\"accuracy\"]\n\n if is_training:\n learning_rate = vals[\"learning_rate\"]\n global_step_val = vals[\"global_step\"]\n\n loss_acum += loss\n acc_acum += accuracy\n if FLAGS.only_val:\n matrices += np.squeeze(vals[\"confusion_matrix\"], axis=(0,3))\n n_mistakes += np.squeeze(vals[\"mistakes_per_class\"], axis=(0,3))\n mistake_labs = np.concatenate((mistake_labs,vals[\"mistake_labs\"]), axis=0)\n for logits, label, idx in zip(vals[\"logits\"], vals[\"labels\"], vals[\"idxs\"]):\n out = \", \".join(str(l) for l in logits)\n out += \", \" + str(label)\n out += \", \" + str(idx)\n print(out, file=of)\n\n postfix = {\n 'loss': \"%.3f\" % (loss_acum / (batch + 1)),\n 'acc': \"%.3f\" % (acc_acum / (batch + 1))\n }\n if is_training:\n postfix.update({'l_rate': \"%.1E\" % learning_rate})\n if run_metadata is not None:\n summary_writer.add_run_metadata(run_metadata, 'S:%d' % global_step_val)\n\n t.set_postfix(postfix)\n\n if FLAGS.only_val:\n import matplotlib\n matplotlib.use('Agg')\n from matplotlib import pyplot as plt\n plt.imshow(matrices/epoch_size, interpolation='nearest')\n plt.savefig(FLAGS.save_path+'/confusion_matrix.png')\n\n plt.imshow(n_mistakes/epoch_size, interpolation='nearest')\n plt.savefig(FLAGS.save_path+'/mistakes_per_class.png')\n\n np.savetxt(FLAGS.save_path+'/mistake_labs.txt', mistake_labs, fmt='%i,%i,%i,%i')\n print(np.histogram(mistake_labs[:,1], bins=np.arange(config.num_subjects)))\n print(np.histogram(mistake_labs[:,2], bins=np.arange(config.num_actions)))\n print(np.mean(mistake_labs[:,3]))\n of.close()\n\n return loss_acum / epoch_size, acc_acum / epoch_size", "def run_epoch(session, model, eval_op=None, verbose=False):\n\n # Clean initialization\n start_time = time.time()\n costs = 0.0\n iters = 0\n output = None\n state = session.run(model.initial_state)\n\n # Values to extract from running the graph (cost, final state, and may be eval_op)\n fetches = {'cost': model.cost,\n 'final_state': model.final_state}\n if eval_op is not None:\n fetches[\"eval_op\"] = eval_op\n if not model.is_training:\n fetches['output'] = model.seq_outputs\n\n # Run the model epoch_size times\n for step in range(model.input.epoch_size):\n feed_dict = {}\n # feed zero values to a the RNN's hidden state\n # the below might be confusing without some background on LSTM's state implementation\n # For more info see: http://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html\n try:\n for i, (c, h) in enumerate(model.initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n except TypeError:\n feed_dict[model.initial_state] = state\n\n # Evaluate fetches by running the graph\n # THIS IS WHERE THE ACTION OCCURS\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n if 'output' in vals.keys():\n output = vals['output']\n\n\n costs += cost\n iters += model.input.num_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / model.input.epoch_size, np.exp(costs / iters),\n iters * model.input.batch_size / (time.time() - start_time)))\n\n return np.exp(costs / iters), output", "def run_epoch(self, sess, inputs, labels):\n n_minibatches, total_loss = 0, 0\n for input_batch, labels_batch in get_minibatches([inputs, labels], self.config.batch_size):\n n_minibatches += 1\n total_loss += self.train_on_batch(sess, input_batch, labels_batch)\n return total_loss / n_minibatches", "def train_epoch(self):\n for batch, targets in self.training_dataloader:\n self.training_step(batch, targets)\n self.calculate_training_loss()\n self.epochs_trained += 1\n LOGGER.info(\n \"Training loss after {} epochs: {}\".format(str(self.epochs_trained), str(self.training_average_loss))\n )", "def _run_training_loop(self, m, curr_epoch):\n start_time = time.time()\n while True:\n try:\n with self._new_session(m):\n train_accuracy = helper_utils.run_epoch_training(\n self.session, m, self.data_loader, curr_epoch)\n tf.logging.info('Saving model after epoch')\n self.save_model(step=curr_epoch)\n break\n except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:\n tf.logging.info('Retryable error caught: %s. Retrying.', e)\n tf.logging.info('Finished epoch: {}'.format(curr_epoch))\n tf.logging.info('Epoch time(min): {}'.format(\n (time.time() - start_time) / 60.0))\n return train_accuracy", "def run(num_epochs, encoded_dim):\n # for patient_ in get_patient_ids():\n for patient_ in ['16']:\n print(\"Starting on index: \" + str(patient_))\n training_ae(num_epochs, encoded_dim, patient_, True)\n print(\"Completed \" + str(patient_) + \" reconstruction and encoding, saved test data to assess performance\")", "def train_one_epoch(self):\n prog_bar = tqdm(enumerate(self.train_data), total=len(self.train_data))\n self.model.train()\n with autocast():\n for idx, inputs in prog_bar:\n ids = inputs['inputs'].to(self.device, dtype=torch.long)\n mask = inputs['attention_mask'].to(self.device, dtype=torch.long)\n targets = inputs['targets'].to(self.device, dtype=torch.float)\n\n outputs = self.model(input_ids=ids, attention_mask=mask) \n\n loss = self.loss_fn(outputs.squeeze(1), targets)\n prog_bar.set_description('loss: {:.2f}'.format(loss.item()))\n\n Config.scaler.scale(loss).backward()\n Config.scaler.step(self.optimizer)\n Config.scaler.update()\n self.optimizer.zero_grad()\n self.scheduler.step()", "def run_epoch(session,\n model,\n dataset,\n is_train=False,\n plot_attention_weights=False):\n assert dataset is not None\n n_words = len([word for sample in dataset for word in sample if word > 0])\n epoch_size = int(math.ceil(len(dataset) / model.batch_size))\n # producer = lm_data_producer(dataset, model.batch_size, model.num_steps)\n\n fetches = {\"step_cost\": model.batch_loss, \"niters\": model.nwords}\n if is_train:\n fetches[\"eval_op\"] = model.train_op\n if plot_attention_weights:\n fetches[\"weights\"] = model.attention_weights\n\n costs = 0.0\n iters = 0\n start_time = time.time()\n # for step, (x, y) in enumerate(producer):\n for step in range(epoch_size):\n step_time = time.time()\n vals = session.run(fetches, {})\n step_cost = vals[\"step_cost\"]\n costs += step_cost\n # iters += np.sum(x > 0)\n iters += vals[\"niters\"]\n\n # print information regarding the current training process\n if is_train:\n if step % (epoch_size // 20) == 10:\n print(\"{:.3f} - aprox. loss {:.8f} - approx. speed: {:.0f} wps\".format(\n step * 1.0 / epoch_size, costs / (step + 1),\n iters / (time.time() - start_time)))\n # print information regarding the current training process\n else:\n if step % (epoch_size // 10) == 5:\n print(\"{:.3f} - approx. speed: {:.0f} wps\".format(\n step * 1.0 / epoch_size, iters / (time.time() - start_time)))\n\n return np.exp(costs / n_words)", "def epoch():\n return datetime2epoch(datetime.now())", "def step(self, epoch):\n # pylint: disable=unused-argument\n self.scheduler.step()", "def run(self):\n for _ in range(self.epoch, conf.FX_MAX_EPOCHS):\n self.train()\n\n with torch.no_grad():\n self.test()\n\n self.epoch += 1\n self.save_ck()\n\n self.show_completion_msg()", "def iter_epoch(self):\n\n # set to train mode\n self._set_train()\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n self._batch_iter(source, target, i)\n\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }\")\n\n # update epoch and reset the epoch_loss\n self.epoch_loss.reset()\n self.epoch += 1", "def run_epoch(self, sess, epoch_num, validate=True):\n total_loss = 0\n accuracies = []\n for i in range(self.batches_per_epoch):\n batch = self.loader.get_batch()\n if self.config.print_every and i % self.config.print_every == 0:\n if validate:\n val_accuracy = self.eval_validation_accuracy()\n print(\"step {}, validation accuracy {:.3f}\".format(i, val_accuracy))\n accuracies.append((i + epoch_num * self.batches_per_epoch, val_accuracy))\n else:\n if self.include_coverage and self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2], batch[3])\n elif self.include_coverage:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n elif self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n else:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1])\n print(\"step {}, training accuracy {:.3f}\".format(i, train_accuracy))\n \n if self.include_coverage and self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.e: batch[2], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_coverage:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.e: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n else:\n attention, _, loss_val = sess.run([self.attention, self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.y_: batch[1],\n self.keep_prob: 1-self.config.dropout_prob})\n\t\tpdb.set_trace()\n\t\tnp.savetxt(\"a.csv\", attention[0], delimiter=\",\")\n total_loss += loss_val\n\n return total_loss / self.batches_per_epoch, accuracies", "def set_train_epoch(self, epoch: int):\n if hasattr(self, 'cls_head'):\n self.cls_head.set_train_epoch(epoch)", "def begin_epoch(self, epoch, model):\n super().begin_epoch(epoch, model)\n if hasattr(self.criterion, \"set_epoch\"):\n self.criterion.set_epoch(epoch)", "def run_epoch(self, dataloader, train=True):\n losses = []\n accs = []\n for imgs, targets in dataloader:\n imgs, targets = imgs.to(self.device), targets.to(self.device)\n\n # calc. the losses\n output = self.resnet(imgs)\n loss = self.criterion(output, targets)\n\n if train:\n # update the parameters\n self.optimizer.zero_grad() # initialize gradients\n loss.backward()\n self.optimizer.step()\n\n # save training results\n if self.total_steps % 10 == 0:\n accuracy = self.calc_batch_accuracy(output, targets)\n accs.append(accuracy.item())\n losses.append(loss.item())\n self.log_performance(self.summary_writer,\n {'loss': loss.item(), 'acc': accuracy.item()},\n self.epoch,\n self.total_steps)\n\n if self.total_steps % 100 == 0:\n self.save_module_summary(\n self.summary_writer, self.resnet.module, self.total_steps)\n\n self.total_steps += 1\n else: # no training - validation\n accuracy = self.calc_batch_accuracy(output, targets)\n accs.append(accuracy.item())\n losses.append(loss.item())\n\n avg_loss = sum(losses) / len(losses)\n avg_acc = sum(accs) / len(accs)\n return avg_loss, avg_acc", "def run_epoch(self):\n print(\"Training\")\n self.set_train()\n\n for batch_idx in range(0, self.num_total_batch):\n\n before_op_time = time.time()\n # Choosing the dataloader for training model\n if self.choosing_dataset_to_train_with(batch_idx):\n # Synthetic dataset\n self.syn_or_real = 'syn'\n try:\n inputs = self.syn_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the synthetic dataloader')\n self.syn_train_iter = iter(self.syn_train_loader)\n inputs = self.syn_train_iter.__next__()\n else:\n # Real dataset\n self.syn_or_real = 'real'\n try:\n inputs = self.real_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the real dataloader')\n self.real_train_iter = iter(self.real_train_loader)\n inputs = self.real_train_iter.__next__()\n\n # Move all available tensors to GPU memory\n for key, ipt in inputs.items():\n if type(key) == tuple or key == \"depth_gt\":\n inputs[key] = ipt.to(self.device)\n\n # log less frequently after the first 2000 steps to save time & disk space\n self.step += 1\n self.early_phase = batch_idx % self.opt.log_frequency == 0\n self.mid_phase = False and self.step % self.opt.save_frequency == 0\n self.late_phase = self.num_total_batch - 1 == batch_idx\n\n outputs, losses = {}, {}\n # Depth estimation\n outputs_d, losses_d = self.process_batch(inputs)\n outputs.update(outputs_d)\n losses.update(losses_d)\n\n # No more if else conditions, just combine all losses based on availability of gradients\n final_loss = torch.tensor(0.).to(self.device)\n for k, v in losses.items():\n if ('d_' not in k) and v.requires_grad and ('/' not in k):\n final_loss += v\n final_loss.backward()\n losses[\"loss\"] = final_loss\n\n if (batch_idx + 1) % 2 == 0:\n self.model_optimizer.step()\n self.model_optimizer.zero_grad()\n self.zero_grad()\n\n duration = time.time() - before_op_time\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n if self.early_phase or self.mid_phase or self.late_phase:\n self.log(\"train\", inputs, outputs, losses)\n self.val(\"real\")\n self.val(\"syn\")\n\n if (batch_idx + 1) % 2 == 0:\n current_lr = self.update_learning_rate(self.model_optimizer, self.opt.learning_rate)", "def run_epoch(session, m, x_data, y_data, writer=None, run_options=None, run_metadata=None, verbose=False,\n category=False):\n epoch_size = ((len(x_data) // m.batch_size) - 1) // m.num_steps\n start_time = time.time()\n costs = 0.0\n iters = 0\n epsilon = 1e-8\n delta_cost = 0.5\n prev_cost = 0.0\n\n losses = []\n\n merged = tf.merge_all_summaries()\n for step, (x, y, e, a) in enumerate(semeval_itterator(x_data,\n y_data,\n m.batch_size,\n m.num_steps, category=category)):\n # if delta_cost < epsilon:\n # print(\"delta: \", delta_cost, \" epsilon: \", epsilon)\n # break\n\n if writer:\n cost, loss, summary, _ = session.run([m.cost, m.loss, merged, m.optimizer],\n {m.input_data: x,\n m.targets: y},\n options=run_options,\n run_metadata=run_metadata)\n else:\n cost, loss, _ = session.run([m.cost, m.loss, m.optimizer],\n {m.input_data: x,\n m.targets: y})\n\n # writer.add_run_metadata(run_metadata, 'step%03d' % step)\n if writer:\n writer.add_summary(summary, step)\n\n delta_cost = abs(cost - prev_cost)\n prev_cost = cost\n costs += cost\n iters += m.num_steps\n losses.append(loss)\n\n # print(\"iterations: %d cost %.4f loss %.6f\" % (iters, cost, loss))\n # print(\"updating?\", w)\n\n #if verbose and iters % (m.batch_size * 5) == 0:\n # print(\"step %.3f loss : %.6f speed: %.0f wps\" %\n # (step * 1.0 / epoch_size, loss, iters * m.batch_size / (time.time() - start_time)))\n\n return np.exp(abs(costs) / iters), loss, losses", "def next_epoch(self, state):\n return self.reset(state)", "def run(self):\n self.log.overall('Starting run')\n run_start = time()\n for epoch in xrange(self.n_epochs):\n self.agent.reset()\n self.n_epoch = epoch\n self._run_epoch()\n self.log.overall('End of run ({:.2f} s)'.format(time() - run_start))", "def train_one_epoch(self):\n self.model.train()\n for batch_idx, (imgs, labels) in enumerate(self.tr_loader):\n imgs, labels = imgs.to(self.device), labels.to(self.device)\n self.optimizer.zero_grad()\n\n outputs, aux_outputs = self.model(imgs).values()\n loss1 = self.criterion(outputs, labels)\n loss2 = self.criterion(aux_outputs, labels)\n self.loss = loss1 + 0.3*loss2\n\n _, preds = torch.max(outputs, 1)\n acc = preds.eq(labels.view_as(preds)).sum().item() / self.cfg.bs\n\n self.loss.backward()\n self.optimizer.step()\n \n self.summary_writer.add_scalars('scalar_group', \n { 'loss_end' : loss1.item(),\n 'loss_aux' : loss2.item(),\n 'loss_total' : self.loss.item(),\n 'accuracy' : acc},\n self.current_iteration)\n\n if batch_idx % self.cfg.log_interval == 0:\n info_1 = 'Epochs {} [{}/{} ({:.0f}%)] | Loss: {:.6f}' .format(\n self.current_epoch, \n batch_idx * len(imgs), \n len(self.tr_loader.dataset), \n 100. * batch_idx / len(self.tr_loader),\n self.loss.item())\n info_2 = 'Batch Accuracy : {:.2f}'.format(acc)\n self.logger.info('{} | {}'.format(info_1, info_2))\n self.save_checkpoint('{}_epoch{}_iter{}.pt'.format(\n self.cfg.exp_name,\n self.current_epoch, \n self.current_iteration)\n )\n self.current_iteration += 1", "def on_epoch_end(self, epoch, logs=None):", "def on_epoch_end(self, epoch, logs=None):", "def test(self, curr_epoch):\n if not self.config.full_test_flag and (curr_epoch % self.config.test_step == 0 or\n curr_epoch == 0 or\n curr_epoch == self.config.epochs - 1):\n self.evaluator.test(curr_epoch)\n else:\n if curr_epoch == self.config.epochs - 1:\n self.evaluator.test(curr_epoch)", "def run_epoch(session, model, eval_op=None, verbose=False, epoch_size=1):\n start_time = time.time()\n all_words = 0\n costs = 0.0\n predicts = []\n\n fetches = {\n \"cost\": model.cost,\n \"mask\": model.mask,\n \"predict\": model.predicts,\n \"seqlen\": model.seq_len,\n \"loss\": model.loss,\n \"label\": model.label,\n \"label_flat\": model.label_flat,\n \"not_space\": model.not_space\n }\n if eval_op is not None:\n fetches[\"eval_op\"] = eval_op\n # if debug:\n # fetches[\"inputs\"] = model.Dinputs\n # fetches[\"states\"] = model.Dstates\n # fetches[\"outputs\"] = model.Doutput\n\n logging.info(\"Epoch size: %d\" % epoch_size) \n print_idx = 0\n for step in range(epoch_size):\n vals = session.run(fetches)\n cost = vals[\"cost\"]\n mask = vals[\"mask\"]\n predict = vals[\"predict\"]\n label = vals[\"label\"]\n np.set_printoptions(threshold=np.nan)\n \n if eval_op is None:\n \n # if step > 497:\n # #for i in range(len(mask)):\n # # print(mask[i])\n # print(np.sum(mask, axis=1))\n # print(vals[\"seqlen\"])\n mask = np.array(np.round(mask), dtype=np.int32)\n shape = mask.shape\n # if step > 10 and step < 20:\n # print(predict)\n # print(np.argmax(predict, 1))\n predict = np.reshape(np.argmax(predict, 1), shape).tolist()\n mask = np.sum(mask, axis=1).tolist()\n for i in range(shape[0]):\n predicts.append(predict[i][:mask[i]])\n\n costs += cost\n words = np.sum(mask)\n all_words += words\n\n if epoch_size < 100:\n verbose = False\n\n if (step * 10 / epoch_size) > print_idx and eval_op is not None:\n print_idx = step * 10 / epoch_size + 1\n logging.info(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / epoch_size, np.exp(costs / step),\n num_gpus * all_words / (time.time() - start_time)))\n predict = np.argmax(predict, 1)\n label_flat = np.reshape(label, [-1])\n all_label_equal = np.equal(predict, label_flat)\n not_space_label = np.not_equal(label_flat, np.zeros(np.shape(label_flat)))\n not_space_equal = all_label_equal * not_space_label\n not_space_label_count = np.sum(not_space_label)\n not_space_equal_count = np.sum(not_space_equal)\n none_space_accuracy = not_space_equal_count / not_space_label_count\n logging.info(\"not space label: %d\" % not_space_label_count)\n logging.info(\"not space correct: %d\" % not_space_equal_count)\n logging.info(\"not space accuracy: %.3f\" % none_space_accuracy)\n logging.info(\"cost: %.3f\" % (costs / step))\n if np.isnan(np.exp(costs / step)):\n print(\"perplexity is nan\")\n print(\"cost: %f step: %d\" % (costs, step))\n return np.exp(costs / step)\n\n if eval_op is None:\n predict = np.reshape(predict, [-1])\n label_flat = np.reshape(label, [-1])\n all_label_equal = np.equal(predict, label_flat)\n not_space_label = np.not_equal(label_flat, np.zeros(np.shape(label_flat)))\n not_space_equal = all_label_equal * not_space_label\n not_space_label_count = np.sum(not_space_label)\n not_space_equal_count = np.sum(not_space_equal)\n none_space_accuracy = not_space_equal_count / not_space_label_count\n logging.info(\"not space label: %d\" % not_space_label_count)\n logging.info(\"not space correct: %d\" % not_space_equal_count)\n logging.info(\"not space accuracy: %.3f\" % none_space_accuracy)\n logging.info(\"cost: %.3f\" % (costs / step))\n return np.exp(costs / epoch_size), predicts\n # elif get_post:\n # # Keep in mind, when get_post, num_steps=1, batch_size=1\n # return np.exp(costs / iters), posteriors\n else:\n return np.exp(costs / epoch_size)", "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def test_faster_rcnn_train_one_epoch(config, dataset):\n writer = MagicMock()\n\n # XXX This is just a hot fix to prevent a mysterious folder such as:\n # <MagicMock name='mock.logdir' id='140420520377936'> showed up after\n # running this test.\n writer.logdir = tmp_name\n\n kfp_writer = MagicMock()\n checkpointer = MagicMock()\n estimator = FasterRCNN(\n config=config,\n writer=writer,\n checkpointer=checkpointer,\n kfp_writer=kfp_writer,\n logdir=\"/tmp\",\n no_cuda=True,\n )\n estimator.writer = writer\n estimator.kfp_writer = kfp_writer\n estimator.checkpointer = checkpointer\n estimator.device = torch.device(\"cpu\")\n train_dataset = dataset\n is_distributed = False\n train_sampler = FasterRCNN.create_sampler(\n is_distributed=is_distributed, dataset=train_dataset, is_train=True\n )\n train_loader = dataloader_creator(\n config, train_dataset, train_sampler, TRAIN, is_distributed\n )\n params = [p for p in estimator.model.parameters() if p.requires_grad]\n optimizer, lr_scheduler = FasterRCNN.create_optimizer_lrs(config, params)\n accumulation_steps = config.train.get(\"accumulation_steps\", 1)\n epoch = 1\n estimator.train_one_epoch(\n optimizer=optimizer,\n data_loader=train_loader,\n epoch=epoch,\n lr_scheduler=lr_scheduler,\n accumulation_steps=accumulation_steps,\n )\n writer.add_scalar.assert_called_with(\n \"training/lr\", config.optimizer.args.get(\"lr\"), epoch\n )", "def __new_epoch(self):\n self.epoch += 1\n indices = np.arange(self.data.shape[0])\n np.random.shuffle(indices)\n self.q = list(indices)", "def fit_one_epoch(self):\n preds, labels = [], []\n for batch_idx, data in tqdm(enumerate(self.primary_dataloader)):\n losses_report, train_preds, train_labels = self.forward_one_batch(\n data)\n preds.append(train_preds)\n labels.append(train_labels)\n\n self._optimize(losses_report)\n self._update_losses(losses_report, train=True)\n\n self.iter += 1\n\n # log/check point\n with torch.no_grad():\n if self.iter % self.log_iter == 0:\n # TODO: track train\n preds = np.concatenate(preds, axis=0)\n labels = np.concatenate(labels, axis=0)\n if IS_REG:\n preds = disc(preds)\n\n metrics_report = self.evaluate_metrics(preds, labels)\n self._update_metrics(metrics_report, train=True)\n preds, labels = [], []\n\n if self.valid_dataloader:\n self.validate()\n\n self.log_meters()\n self.save_checkpoint()\n self.reset_meters()", "def on_epoch_begin(self, epoch, logs={}):\n if epoch > 0 and epoch % self.eval_frequency == 0:\n # Unhappy hack to work around h5py not being able to write to GCS.\n # Force snapshots and saves to local filesystem, then copy them over to GCS.\n model_path_glob = 'checkpoint.*'\n if not self.job_dir.startswith('gs://'):\n model_path_glob = os.path.join(self.job_dir, model_path_glob)\n checkpoints = glob.glob(model_path_glob)\n if len(checkpoints) > 0:\n checkpoints.sort()\n census_model = load_model(checkpoints[-1])\n census_model = model.compile_model(census_model, self.learning_rate)\n loss, acc = census_model.evaluate_generator(\n model.generator_input(self.eval_files, chunk_size=CHUNK_SIZE),\n steps=self.steps)\n print('\\nEvaluation epoch[{}] metrics[{:.2f}, {:.2f}] {}'.format(\n epoch, loss, acc, census_model.metrics_names))\n if self.job_dir.startswith('gs://'):\n copy_file_to_gcs(self.job_dir, checkpoints[-1])\n else:\n print('\\nEvaluation epoch[{}] (no checkpoints found)'.format(epoch))", "def _valid_epoch(self, epoch=0, tb=True):\n\t\tstart = time.time()\n\t\tself.model.eval()\n\n\t\tvalid_loss = 0.0\n\t\tcorrect = 0.0\n\n\t\tall_predictions = []\n\t\tall_targets = []\n\t\tfor images, labels in self.valid_loader:\n\n\t\t\timages, labels = images.to(self.config.DEVICE), labels.to(self.config.DEVICE)\n\n\t\t\toutputs = self.model(images)\n\t\t\tloss = self.criterion(outputs, labels)\n\n\t\t\tvalid_loss += loss.item() * images.size(0)\n\t\t\t_, preds = outputs.max(1)\n\t\t\tcorrect += preds.eq(labels).sum()\n\n\t\t\tall_predictions.extend(preds.cpu().tolist())\n\t\t\tall_targets.extend(labels.cpu().tolist())\n\n\t\tfinish = time.time()\n\t\tmatrix = confusion_matrix(all_targets, all_predictions)\n\n\t\tfig = plot_confusion_matrix(matrix, self.config.CLASS_NAMES, normalize=True)\n\t\tfig.savefig(os.path.join(self.logger_setup['plots_dir'],'confusion_matrix_epoch_'+str(epoch)+'.png'), bbox_inches='tight')\n\t\tprint(\"all targets\",all_targets)\n\t\tprint(\"all predictions\", all_predictions)\n\t\tprint(\"set(targets) - set(predictions)\", set(all_targets)-set(all_predictions))\n\t\tsave_report(all_targets, all_predictions, self.config.CLASS_NAMES, self.logger_setup['reports_dir'], epoch)\n\t\tweighted_kappa = quadratic_weighted_kappa(matrix)\n\t\tself.logger_setup['writer'].add_figure('Test/Confusion Matrix', fig, epoch)\n\n\t\tprint('Evaluating Network.....')\n\t\tprint('Validation set: Epoch: {}, Average loss: {:.4f}, Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(\n\t\t\tepoch,\n\t\t\tvalid_loss / self.total_valid_samples,\n\t\t\tcorrect.float() / self.total_valid_samples,\n\t\t\tfinish - start))\n\t\tprint()\n\t\tif tb:\n\t\t\tself.logger_setup['writer'].add_scalar('Test/Average loss', valid_loss / self.total_valid_samples, epoch)\n\t\t\tself.logger_setup['writer'].add_scalar('Test/Accuracy', correct.float() / self.total_valid_samples, epoch)\n\t\t\tself.logger_setup['writer'].add_scalar('Test/Quadratic weighted kappa', weighted_kappa, epoch)\n\n\t\treturn (correct.float() / self.total_valid_samples, weighted_kappa)", "def new_epoch(self):\n self._curr_batch = 0\n if self.shuffle_order:\n self.shuffle()", "def on_epoch_start(self, epoch, logs: Optional[Dict] = None):\n pass", "def train_epoch(model,\n\t \ttrain_generator,\n\t \toptimizer,\n\t \tcallback=None):\n model.train()\n for it, (batch_of_x, batch_of_y) in enumerate(train_generator):\n train_on_batch(model, batch_of_x, batch_of_y, optimizer)\n\n if callback is not None:\n callback(model)\n return", "def run_epoch(sess, m, data, eval_op, verbose=False):\n epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps\n start_time = time.time()\n costs = 0.0\n iters = 0\n state = m.initial_state.eval()\n # print(len(data), m.batch_size, m.num_steps) # 929589 20 20\n\n # for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,\n # m.num_steps)):\n for step, (x, y) in enumerate(ptb_iterator(data, m.batch_size,\n m.num_steps)):\n cost, state, _ = sess.run([m.cost, m.final_state, eval_op],\n {m.input_data: x,\n m.targets: y,\n m.initial_state: state})\n costs += cost\n iters += m.num_steps\n\n if verbose and step % (epoch_size // 10) == 10:\n print(\" %.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / epoch_size, np.exp(costs / iters),\n iters * m.batch_size / (time.time() - start_time)))\n\n return np.exp(costs / iters)", "def train_epoch(self, epoch, evaluator, optimizer, perf_path, perf_trace, state_fpath, writer_tensorboard):\n\n # Train an epoch\n self.model.train()\n print('Start epoch', epoch)\n train_itr = iter(self.loader_train)\n total_err = 0\n total_acc = 0\n\n for index, (data_pixel, data_labels) in enumerate(train_itr):\n\n # compute\n input_data = data_pixel.float().cuda()\n data_labels = data_labels.cuda()\n\n # Use the model the produce the classification\n grapheme_logits, vowel_logits, consonant_logits = self.model(input_data)\n\n # produce evaluator results\n eval_result = evaluator(grapheme_logits, vowel_logits, consonant_logits, data_labels)\n\n # set optimizer to zero.\n optimizer.zero_grad()\n\n # back propogate the evaluation results.\n eval_result['loss'].backward()\n\n # optimizer take step forward.\n optimizer.step()\n\n # tabulate the steps from the evaluation\n eval_result = {k: eval_result[k].item() for k in eval_result}\n\n # update every hundreds' of\n if index % 100 == 0:\n print(index, eval_result['loss'], eval_result['acc'])\n train_result = evaluator.evalulate_on_cache()\n train_total_err = train_result['loss']\n writer_tensorboard.add_scalar('Loss/Train', train_total_err, global_step=epoch)\n # log_metric('loss', train_total_err)\n train_total_acc = train_result['acc']\n writer_tensorboard.add_scalar('Accuracy/Train', train_total_acc, global_step=epoch)\n # log_metric('acc', train_total_acc)\n train_kaggle_score = train_result['kaggle_score']\n writer_tensorboard.add_scalar('Kaggle_Score/Train', train_kaggle_score, global_step=epoch)\n # log_metric('kaggle_score', train_kaggle_score)\n dict_metrics_train = {\n 'Loss/Train': train_total_err,\n 'Accuracy/Train': train_total_acc,\n 'Kaggle_Score/Train': train_kaggle_score,\n }\n log_metrics(dict_metrics_train, step=epoch)\n print(f\"Epoch {epoch} Training, Loss {train_total_err}, Acc {train_total_acc}\")\n evaluator.clear_cache()\n # compute validation error\n self.model.eval()\n val_itr = iter(self.loader_val)\n with torch.no_grad():\n for index, (data_pixel, data_labels) in enumerate(val_itr):\n input_data = data_pixel.float().cuda()\n data_labels = data_labels.cuda()\n grapheme_logits, vowel_logits, consonant_logits = self.model(input_data)\n eval_result = evaluator(grapheme_logits, vowel_logits, consonant_logits, data_labels)\n eval_result = {k: eval_result[k].item() for k in eval_result}\n total_err += eval_result['loss']\n total_acc += eval_result['acc']\n # print(total_err / (1 + input_index), total_acc / (1 + input_index))\n val_result = evaluator.evalulate_on_cache()\n val_total_err = val_result['loss']\n writer_tensorboard.add_scalar('Loss/Val', val_total_err, global_step=epoch)\n val_total_acc = val_result['acc']\n writer_tensorboard.add_scalar('Accuracy/Val', val_total_acc, global_step=epoch)\n val_kaggle_score = val_result['kaggle_score']\n writer_tensorboard.add_scalar('Kaggle_Score/Val', val_kaggle_score, global_step=epoch)\n dict_metrics_val = {\n 'Loss/Validation': val_total_err,\n 'Accuracy/Validation': val_total_acc,\n 'Kaggle_Score/Validation': val_kaggle_score,\n }\n log_metrics(dict_metrics_val, step=epoch)\n # Write to disk.\n writer_tensorboard.flush()\n print(f\"Epoch {epoch} Eval, Loss {val_total_err}, Acc {val_total_acc}\")\n evaluator.clear_cache()\n print(\"Saving the model (epoch %d)\" % epoch)\n torch.save({\n \"epoch\": epoch + 1,\n \"model_state\": self.model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n }, state_fpath)\n print(f\"Making a backup (step {epoch})\")\n backup_fpath = os.path.join(self.backup_dir, f\"model_bak_{epoch}.pt\")\n torch.save({\n \"epoch\": epoch + 1,\n \"model_state\": self.model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n }, backup_fpath)\n # Dump the traces\n perf_trace.append(\n {\n 'epoch': epoch,\n 'train_err': train_total_err,\n 'train_acc': train_total_acc,\n 'train_kaggle_score': train_kaggle_score,\n 'val_err': val_total_err,\n 'val_acc': val_total_acc,\n 'val_kaggle_score': val_kaggle_score\n }\n )\n pickle.dump(perf_trace, open(perf_path, 'wb'))\n # store epoch full result separately\n epoch_result = {\n 'epoch': epoch,\n 'train_result': train_result,\n 'val_result': val_result\n }\n pickle.dump(epoch_result, open(os.path.join(self.results_dir, 'result_epoch_{0}.p'.format(epoch)), 'wb'))", "def train_epoch(self, epoch_info: EpochInfo, interactive=True) -> None:\n epoch_info.on_epoch_begin()\n\n if interactive:\n iterator = tqdm.trange(epoch_info.batches_per_epoch, file=sys.stdout, desc=\"Training\", unit=\"batch\")\n else:\n iterator = range(epoch_info.batches_per_epoch)\n\n for batch_idx in iterator:\n batch_info = BatchInfo(epoch_info, batch_idx)\n\n batch_info.on_batch_begin()\n self.train_batch(batch_info)\n batch_info.on_batch_end()\n\n epoch_info.result_accumulator.freeze_results()\n epoch_info.on_epoch_end()", "def _evaluate_during_fit(self, test_loader, epoch):" ]
[ "0.7596425", "0.7297351", "0.71258926", "0.71189123", "0.7038193", "0.70254976", "0.69980335", "0.69980335", "0.6921019", "0.69073707", "0.69073707", "0.69073707", "0.69073707", "0.6906141", "0.6859562", "0.6859317", "0.6823435", "0.6796138", "0.67864937", "0.67839265", "0.6775237", "0.6775237", "0.6770645", "0.6770529", "0.6680905", "0.6647951", "0.66363484", "0.66274977", "0.6615157", "0.6593437", "0.65886694", "0.65546405", "0.65473616", "0.6543509", "0.6526242", "0.6525452", "0.6504917", "0.6497753", "0.6493223", "0.6486272", "0.648187", "0.6479165", "0.6468471", "0.646626", "0.64553374", "0.6447646", "0.6430612", "0.64271826", "0.6417328", "0.641731", "0.6412232", "0.6410404", "0.6403878", "0.63945174", "0.638679", "0.63794994", "0.6378148", "0.6371226", "0.6362714", "0.6362081", "0.6359412", "0.6357165", "0.6355901", "0.6336884", "0.6329348", "0.6326258", "0.63020635", "0.6300544", "0.6269148", "0.6254543", "0.62494785", "0.62431514", "0.6233451", "0.6231668", "0.6217373", "0.6217134", "0.6192369", "0.6167296", "0.6164353", "0.6163567", "0.61321443", "0.6117203", "0.6113406", "0.61082304", "0.6091991", "0.6091991", "0.6089786", "0.6085783", "0.6085036", "0.60626334", "0.6056147", "0.60500485", "0.6047036", "0.6044068", "0.6042031", "0.60394853", "0.6014582", "0.6014392", "0.6008359", "0.6007879", "0.60044575" ]
0.0
-1
Convert a text to a format ROUGE understands. The text is assumed to contain one sentence per line.
def convert_text_to_rouge_format(text, title="dummy title"): sentences = text.split("\n") sent_elems = [ "<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>" "{text}</a>".format(i=i, text=sent) for i, sent in enumerate(sentences, start=1) if sent != ''] html = """<html> <head> <title>{title}</title> </head> <body bgcolor="white"> {elems} </body> </html>""".format(title=title, elems="\n".join(sent_elems)) return html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert(text):\n return NewDocument.from_rst(text).format()", "def preprocess(self, text):\r\n return text", "def preprocess(self, text):\n if self.model_name == \"bert-base-arabert\":\n return self._old_preprocess(\n text,\n do_farasa_tokenization=True,\n )\n\n if self.model_name == \"bert-base-arabertv01\":\n return self._old_preprocess(text, do_farasa_tokenization=False)\n\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n\n if self.replace_urls_emails_mentions:\n # replace all possible URLs\n for reg in url_regexes:\n text = re.sub(reg, \" [رابط] \", text)\n # REplace Emails with [بريد]\n for reg in email_regexes:\n text = re.sub(reg, \" [بريد] \", text)\n # replace mentions with [مستخدم]\n text = re.sub(user_mention_regex, \" [مستخدم] \", text)\n\n if self.remove_html_markup:\n # remove html line breaks\n text = re.sub(\"<br />\", \" \", text)\n # remove html markup\n text = re.sub(\"</?[^>]+>\", \" \", text)\n\n # remove repeated characters >2\n if self.remove_elongation:\n text = self._remove_elongation(text)\n\n # insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets\n if self.insert_white_spaces:\n text = re.sub(\n \"([^0-9\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u0669a-zA-Z\\[\\]])\",\n r\" \\1 \",\n text,\n )\n\n # insert whitespace between words and numbers or numbers and words\n text = re.sub(\n \"(\\d+)([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)\", r\" \\1 \\2 \", text\n )\n text = re.sub(\n \"([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)(\\d+)\", r\" \\1 \\2 \", text\n )\n\n # remove unwanted characters\n if self.keep_emojis:\n emoji_regex = \"\".join(list(self.emoji.UNICODE_EMOJI[\"en\"].keys()))\n rejected_chars_regex2 = \"[^%s%s]\" % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, \" \", text)\n else:\n text = re.sub(rejected_chars_regex, \" \", text)\n\n # remove extra spaces\n text = \" \".join(text.replace(\"\\uFE0F\", \"\").split())\n\n if (\n self.model_name == \"bert-base-arabertv2\"\n or self.model_name == \"bert-large-arabertv2\"\n ):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI[\"en\"].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = \" \".join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n\n # ALl the other models dont require Farasa Segmentation\n return text", "def nltk_text(self, text):\n text = nltk.Text(word_tokenize(text))\n return text", "def processText(text):\n print(type(text))\n for line in text:\n print(line)\n return text", "def convert_txt_to_data():\n pass", "def process_text(model_name: str, text: str) -> spacy.tokens.Doc:\r\n nlp = load_model(model_name)\r\n return nlp(text)", "def normalize(self, text: str) -> str:", "def format_ocr_text(self, page):\n \n #read out of the text file that tesseract made\n ocr_text = open(self.ocr_text, 'r')\n \n # write into this file\n djvu_text = open( self.djvu_text, 'w' )\n \n text = \"(page 0 0 1 1\\n\"\n \n self.out_text.write('\\n## Page %d ###\\n\\n' % page )\n \n for line in ocr_text:\n \n #write to the human readable file\n self.out_text.write(line)\n \n # add each line of text\n # escaping \" to \\\" as we go\n text += '(line 0 0 1 1 \"%s\")\\n' % line.replace('\"', r'\\\"').strip()\n \n text += \")\\n\"\n \n djvu_text.write( text )\n \n ocr_text.close()\n djvu_text.close()", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def process_text(self, text, language):", "def process_text(text):\n text = text.strip()\n textList = text.split('\\n')\n newText = ''\n addNewline = True\n for line in textList:\n # Remove duplicate white space\n temp = ' '.join(line.split())\n # Trim any beginning non-alphabet letters\n temp = trim(temp)\n # Remove overly short lines, but keep ends of sentences\n # Add a newline if gap detected\n if len(temp) < 40 and not '.' in temp:\n if addNewline:\n newText += '\\n'\n addNewline = False\n continue\n # Add line to growing string\n newText += temp + ' '\n addNewline = True\n return newText", "def format_text(text):\n\n\ttext = ' '.join(text).lower()\n\ttext = re.sub(r\"[^a-zA-Z.?!]\", \" \", text)\n\ttext = re.sub(r' +', ' ', text)\n\ttext = word_tokenize(text)\n\ttext = pos_tag(text)\n\n\treturn text", "def normalize_text(text):\n\n text = text.lower().strip().replace(\"\\n\", \" \").replace(\"\\r\", \"\")\n\n text = replace_money_token(text)\n text = replace_urls_token(text)\n text = fix_unicode_quotes(text)\n text = format_large_numbers(text)\n text = pad_punctuation(text)\n return text.strip()", "def convert_all(text):\r\n\tpig_tokens = ''\r\n\r\n\t#tokenizes the text\r\n\ttokens = word_tokenize(text)\r\n\r\n\t#regex for non-alphabetical characters\r\n\tpattern = re.compile(r'[^a-zA-Z]')\r\n\r\n\t#converts the words to pig latin and appends them to the sentence.\r\n\tfor token in tokens:\r\n\t\tif not re.findall(pattern, token):\r\n\t\t\tword = word_to_pig_latin(token)\r\n\r\n\t\t\tif re.findall(r'[A-Z]', word):\r\n\t\t\t\tword = word.lower()\r\n\t\t\t\tword = word.capitalize()\r\n\t\t\tpig_tokens += ' ' + word\r\n\t\telse:\r\n\t\t\tpig_tokens += token\r\n\r\n\tpig_text = ''.join(pig_tokens)\r\n\r\n\treturn pig_text", "def proc_text(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for tokObj in doc:\n if self._remove_punct and tokObj.is_punct:\n continue\n lemma = tokObj.lemma_\n text = tokObj.text\n if self._keep_only_alpha_num and not is_alpha_num(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopwords or tok2 in self._stopwords:\n continue\n\n if self._lower_case:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)", "def norm_text(self, text):\n\n # encode to apply utf-8 and decode to remove initial 'b'\n text = str(text.encode('utf-8').decode('utf-8'))\n text = text.lower()\n\n # Clean the text\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", text)\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"cannot \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"\\.\", \" \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text)\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text", "def rich(text):\n return full(text, False)", "def process_text(text):\n no_split_dict = {'u . s': 'u.s', 'u . n': 'u.n', 'u . k': 'u.k', 'l . a': 'l.a', 'j . k': 'j.k', 'a . m': 'a.m',\n 'p . m': 'p.m', 'd . j': 'd.j', 'd . a': 'd.a'}\n\n text = re.sub(\".*--\", \"\", text, count=1) # Removing cnn from start of text\n if text.startswith('(CNN)'): # Remove cnn from articles that starts with only cnn\n text = re.sub('\\(CNN\\)', '', text, count=1)\n text = re.sub(r'(?<=[^?!.0-9])(?=[.,!?])', ' ', text) # 4\n text = re.sub(r'(?![0-9])(?<=[.,])(?=[^\\s])', r' ', text) # 4\n text = text.lower() # 2\n text = re.sub('[^A-Za-z0-9 .!?,øæå]+', '', text) # 3\n text = re.sub(r'((?<=[a-z])(?=[.]))|((?=[a-z])(?<=[.]))(?=[^\\s])', r' ', text) # space a-z.a-z\n text = re.sub(r'((?=[0-9])(?<=[a-z]))|((?=[a-z])(?<=[0-9]))(?=[^\\s])', r' ', text) # space 0-9a-z\n for key in no_split_dict:\n text = text.replace(key, no_split_dict[key]) # Fixing word splits\n text = re.sub('[0-9]', '#', text) # 8\n text = \" \".join(text.split()) # 5, 6, 7 - i think\n return text", "def text_level_normalizer(self, sentence: str, *args: Any, **kwargs: Any) -> str:\n text = sentence\n return text", "def convert_pattern_format(text):\n parsed_text = []\n # parse text via Pattern's parser\n pattern_parsed_text = Text(parse(text, relations=True, lemmata=True))\n for sentence in pattern_parsed_text:\n s = Sentence()\n s.string = remove_blanks(sentence.string)\n for word in sentence:\n # Patterns tags for each word in the sentence are stored in a new Word-object\n w = Word()\n w.string = word.string\n w.lemma = word.lemma\n w.index = word.index\n w.tag = word.type\n w.entity = \"\"\n # each word is appended to a Sentence-object\n s.words.append(w)\n # each Sentence-object is appended to an array\n parsed_text.append(s)\n return parsed_text", "def serialize_text(text):\n return serialize_plaintext(text)", "def convert_srt_to_txt(text, join=False):\n lines = text.split('\\n')\n result = []\n for line in lines:\n if not line.strip(): # Skipping empty lines\n continue\n elif line.strip().isdigit(): # Skip lines containing only numbers\n continue\n elif (line.startswith(\"WEBVTT\") or\n line.startswith(\"Kind: captions\") or\n line.startswith(\"Language: en\")): # Skipping lines containing service information\n continue\n # We skip lines with the format \"00:00:00,000 --> 00:00:03,090\"\n elif re.match(r\"\\d{2}:\\d{2}:\\d{2}.\\d{3} --> \\d{2}:\\d{2}:\\d{2}.\\d{3}\", line.strip()):\n continue\n else:\n result.append(line.strip())\n if join:\n out = join_lines(result) # Combining strings into sentences\n else:\n out = \"\\n\".join(result) # Combining strings without parsing into sentences\n return out", "def parse_text(self, text):\n self._text_paragraph = text.split(\"\\n\")\n self._render()", "def preprocess_text(text: str) -> Tuple[List[str], Dict]:\n raise NotImplementedError", "def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text", "def text_prepare(txt):\n print(txt)\n txt = re.sub(r\"[^\\w\\s]\", \" \", str(txt).lower().strip())\n txt = txt.split()\n nltk.corpus.stopwords.words(\"english\")\n txt = [word for word in txt if word not in nltk.corpus.stopwords.words(\"english\")]\n lem = nltk.stem.wordnet.WordNetLemmatizer()\n txt = [lem.lemmatize(word) for word in txt]\n txt = \" \".join(txt)\n return txt", "def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = handle_emojis(text)\n text = clean_number(text)\n text = spacing_punctuation(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n #text = stop(text)# if changing this, then chnage the dims \n #(not to be done yet as its effecting the embeddings..,we might be\n #loosing words)...\n return text", "def parse_text(self, text):\r\n MAXLEN = 100\r\n sentences = []\r\n punct = [\",\",\":\",\";\",\".\",\"–\",\"?\",\"!\",\"(\",\")\"] # Interpunctuation marks\r\n text = text.replace(\"\\r\", \" \").replace(\"\\t\", \" \") # Remove CR and tabs\r\n words = text.split(\" \") if len(text) > MAXLEN else []\r\n sentence = \"\" if len(text) > MAXLEN else text\r\n\r\n # Preprocess list for silence markers\r\n if conf.SilenceMarker in text:\r\n words_new = []\r\n if not words and sentence: # Was too short to be cut initially\r\n words = text.split(\" \")\r\n sentence = \"\"\r\n for w in filter(None, words):\r\n if conf.SilenceMarker not in w.lower():\r\n words_new.append(w)\r\n else:\r\n text_chunks = w.lower().split(conf.SilenceMarker)\r\n for i, part in enumerate(text_chunks):\r\n if part:\r\n words_new.append(part)\r\n if i < len(text_chunks) - 1:\r\n words_new.append(conf.SilenceMarker)\r\n else:\r\n if words_new and conf.SilenceMarker in words_new[-1]:\r\n words_new[-1] += conf.SilenceMarker\r\n else:\r\n words_new.append(conf.SilenceMarker)\r\n words = words_new\r\n\r\n for w in words:\r\n if conf.SilenceMarker in w:\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n sentences.append(w)\r\n sentence = \"\"\r\n elif w[-1] in punct or w[0] in punct: # Encountered punctuation\r\n if w[-1] in punct and (len(sentence) + len(w) + 1 < MAXLEN):\r\n # Word ends with punct and sentence can still be added to\r\n sentences.append(sentence.strip() + \" \" + w.strip())\r\n sentence = \"\" # Save sentence and word, start new sentence\r\n elif w[0] in punct and w[-1] not in punct:\r\n # Word starts with punctuation, like '('\r\n sentences.append(sentence.strip()) # Save current sentence\r\n sentence = w # Start a new sentence with punct and word\r\n else: # word ends with punct and sentence already long enough\r\n sentences.extend([sentence.strip(), w.strip()])\r\n sentence = \"\" \r\n else:\r\n if (len(sentence) + len(w) + 1 < MAXLEN): # Sentence still\r\n sentence += \" \" + w # short enough\r\n else: # Sentence too long\r\n sentences.append(sentence.strip())\r\n sentence = w # Start a new sentence with the word\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n return sentences", "def process_text(self):\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))", "def graphtextdetextor(image_path):\n img=cv2.imread(image_path)\n\n #img=image_filter.rotate_anticlockwise(img)\n\n\n custom_config_number=r'--oem 3 --psm 6 outputbase digits'\n custom_config=r'--oem 3 --psm 6'\n\n custom_config1=r'--oem 3 --psm 1'\n\n custom_config2=r'--oem 3 --psm 4'\n\n text=pytesseract.image_to_string(img,config=custom_config)\n text2=pytesseract.image_to_string(img,config=custom_config1)\n text3=pytesseract.image_to_string(img,config=custom_config2)\n\n\n\n d=pytesseract.image_to_data(img,config=custom_config,output_type=Output.DICT)\n\n #print(text3)\n return [text,text2,text3]", "def reformat_text(self, text):\n xml = BeautifulSoup(text)\n self.remove_header_and_footer(xml)\n self.process_superscripts(xml)\n self.remove_footnotes(xml)\n text = xml.get_text() # Strip XML tags.\n text = self.join_hyphenated_words(text)\n text = self.remove_linebreaks(text)\n return text", "def parse_text(text):\n return str(str(text).encode(\"ascii\", \"ignore\")).replace(\"\\\\n\",\"\\n\").replace(\"b'\",\"\")", "def process_text(infile, lower,\n longs, ocr, # lemmatization,\n umlauts, punctuation,\n numbers, stopwords):\n # read file\n text = infile.read() # TODO: read via iterator? ('for line in file') (no nltk.sent_tokenize)\n\n processed_sentences = []\n\n # delete linebreaks\n text = text.replace('¬\\n', '').strip() # remove seperators and merge parts of word\n text = text.replace('-\\n', '')\n text = text.replace('\\n', ' ') # remove linebreak for sentence recognition\n\n # load stopwords\n if not umlauts:\n # take original stopwords\n stop_words = nltk.corpus.stopwords.words('german')\n else:\n # convert umlauts in stopwords to digraphs\n stop_words = [replace_umlauts(word) for word in nltk.corpus.stopwords.words('german')]\n\n \"\"\"\n if lemmatization:\n nlp = spacy.load('de_core_news_sm') # for lemmatization\n else:\n nlp = None\n \"\"\"\n\n # get sentences from text\n sentences = nltk.sent_tokenize(text, language='german')\n\n # process each sentence\n for sentence in sentences:\n if lower:\n sentence = sentence.lower()\n if ocr:\n sentence = replace_ocr_mistakes(sentence)\n if longs:\n sentence = replace_long_s(sentence)\n \"\"\"\n if lemmatization:\n sentence = nlp(sentence)\n sentence = ' '.join([word.lemma_ for word in sentence]) # rechenintensiv!\n \"\"\"\n if umlauts:\n sentence = replace_umlauts(sentence)\n if punctuation:\n sentence = sentence.translate(str.maketrans('', '', string.punctuation))\n sentence = sentence.replace('“', '') # not in string.punctuation\n sentence = sentence.replace('„', '') # not in string.punctuation\n sentence = sentence.replace('—', '') # not in string.punctuation\n if numbers:\n sentence = sentence.translate(str.maketrans('', '', string.digits))\n # TODO: How to handle ²,³, ⁴, ⁵,⁶,⁷,⁸?\n if stopwords:\n words = nltk.word_tokenize(sentence)\n words = [x for x in words if x not in stop_words]\n sentence = ' '.join(words)\n if len(sentence) > 1:\n processed_sentences.append(sentence)\n return processed_sentences", "def preprocess_training_text(text, accented_chars=True, \n convert_num=False, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=True):\n \n\n \"\"\"preprocess text with default option set to true for all steps\"\"\"\n if remove_html == True: #remove html tags\n text = strip_html_tags(text)\n if extra_whitespace == True: #remove extra whitespaces\n text = remove_whitespace(text)\n if accented_chars == True: #remove accented characters\n text = remove_accented_chars(text)\n if lowercase == True: #convert all characters to lowercase\n text = text.lower()\n \n \n doc = nlp(text) #tokenise text\n\n\n clean_text = []\n for token in doc:\n flag = True\n edit = token.text\n # print(\"Word: \", edit, \" Type: \", token.pos_)\n # remove stop words\n if stop_words == True and token.is_stop and token.pos_ != 'NUM': \n flag = False\n # remove punctuations\n if punctuations == True and (token.pos_ == 'PUNCT') and flag == True: \n flag = False\n \n # remove 'X' characters:\n if token.pos_ == 'X':\n flag = False\n # remove special characters\n if special_chars == True and token.pos_ == 'SYM' and flag == True: \n flag = False\n # remove numbers\n if remove_num == True and (token.pos_ == 'NUM' or token.text.isnumeric()) \\\n and flag == True:\n flag = False\n # convert number words to numeric numbers\n if convert_num == True and token.pos_ == 'NUM' and flag == True:\n edit = w2n.word_to_num(token.text)\n # convert tokens to base form\n elif lemmatization == True and token.lemma_ != \"-PRON-\" and flag == True:\n edit = token.lemma_\n # append tokens edited and not removed to list \n if edit != \"\" and flag == True:\n clean_text.append(edit)\n \n # Convert back to string:\n new_text = ' '.join(clean_text)\n regex = re.compile('[^a-zA-Z]')\n new_text = regex.sub(' ', new_text)\n words = re.findall(r'\\w+.', new_text)\n return ' '.join(words)", "def convert_chn_text(detail=True):\n p = {\n \"data_path\": \"../data/data_literature\",\n \"output_dir\": \"../data/converted_data\"\n }\n if detail:\n gen_params_info(p)\n\n os.system(\"rm -rf %s\" % p[\"output_dir\"])\n os.system(\"mkdir -p %s\" % p[\"output_dir\"])\n files = os.listdir(p[\"data_path\"])\n for file_name in files:\n if detail:\n print(\"to process %s\" % file_name)\n file_path = \"%s/%s\" % (p[\"data_path\"], file_name)\n out_file_path = \"%s/%s\" % (p[\"output_dir\"], file_name)\n fh_in = codecs.open(filename=file_path, mode=\"r\", encoding='utf8')\n fh_out = codecs.open(filename=out_file_path, mode=\"w\", encoding='utf8')\n line_idx = 1\n verb = \"\"\n for line in fh_in:\n line = line.lstrip()\n if line.find(\"\\t\") < 0:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n items = line.split(\"\\t\")\n if len(items) != 4:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO 4 TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n frame_id = items[0]\n if frame_id.find(\".\") >= 0:\n frame_id = frame_id.split(\".\")[0]\n verb = items[2].strip()\n left_sent = items[1].strip()\n right_sent = items[3].strip()\n out_line = \"%s\\t%s\\t%s\\t%s\"\\\n % (frame_id, left_sent, verb, right_sent)\n print(out_line, file=fh_out)\n\n line_idx += 1\n\n fh_in.close()\n fh_out.close()", "async def ascii(self, ctx, *, text):\n text = text.replace(' ', '\\n')\n \n if not text:\n await ctx.send(f\"{ctx.tick(False)} You need to specify the text you want to convert!\")\n \n _fig = figlet_format(text.replace(' ', '\\n'))\n \n if len(_fig) > 1300:\n await ctx.send(f\"{ctx.tick(False)} That message is too long!\")\n await ctx.send(f\"{ctx.tick(True)} Done!\")\n await ctx.send(f\"```{_fig}```\")", "def convert_image_to_text(self,path):\r\n pytesseract.pytesseract.tesseract_cmd = 'C:\\\\Users\\\\Kyle\\\\Documents\\\\Tesseract-OCR\\\\tesseract'\r\n\r\n im = Image.open(path)\r\n basewidth = 2200\r\n wpercent = (basewidth/float(im.size[0]))\r\n hsize = int((float(im.size[1])*float(wpercent)))\r\n im = im.resize((basewidth,hsize), Image.ANTIALIAS)\r\n text = pytesseract.image_to_string(im, lang = \"eng\")\r\n\r\n\r\n return text.split()", "def preprocess_text(text: str, normalize=True, lowercase=True, stopwords=True, contractions=True, vulgar_words=True,\n emails=True, punctuation=True, ngrams='uni', ngrams_model_func: Callable = None, lemmatize=True,\n stem=False, apostrophes=True, chars=True) -> str:\n\n if normalize:\n # TODO: Problem: Here we can have 'USA,' and the 'USA' in the .txt file doesn't match that.\n text = normalize_words(text)\n if lowercase:\n text = to_lowercase(text)\n if stopwords:\n text = remove_stopwords(text)\n if contractions:\n text = expand_contractions(text)\n if vulgar_words:\n text = substitute_vulgar_words(text)\n if emails:\n text = remove_emails(text)\n if punctuation:\n text = substitute_punctuation(text)\n if stopwords:\n text = remove_stopwords(text)\n if ngrams == 'bi' or ngrams == 'tri':\n text = ' '.join(ngrams_model_func(text.split()))\n if lemmatize:\n text = lemmatize_words(text)\n elif stem:\n text = stem_words(text)\n if apostrophes: # TODO: Move below substitute_punctuation?\n text = remove_apostrophes(text)\n if chars:\n text = remove_single_chars(text)\n\n return text", "def handleFormatText(paragraphContent):\n # We tokenize and remove the stop word\n words = tokenizeWord(paragraphContent) \n \n stemWords = []\n # We loop on each word.\n for word in words:\n stemWord = STEMMER.stem(word)\n \n # Selection on a part of string.\n stemWord = re.sub(\"[*\\'\\.+:,\\`:/]\", '', stemWord)\n if stemWord.isdigit() or len(stemWord) < 2:\n continue\n \n stemWords.append(stemWord)\n my_r_string = stemWords.pop(0)\n for word in stemWords:\n my_r_string += \" \"+str(word)\n return my_r_string", "def convert_text(text):\n global indic_char_map\n\n text_length = len(text)\n roman_tokens = []\n phonetic_tokens = []\n\n if indic_char_map is None:\n indic_char_map = load_mapping_tables()\n\n for i in range(text_length):\n current_char = text[i]\n\n if current_char == ' ':\n roman_tokens.append(' ')\n phonetic_tokens.append(' ')\n continue\n try:\n current_char_map = indic_char_map[current_char]\n except KeyError:\n # Unknown indic character. Default to printing it out as\n # it is. Assume it can't be pronounced.\n roman_tokens.append(current_char)\n continue\n\n current_char_type = current_char_map['char_type']\n current_char_roman_form = current_char_map['roman_form']\n current_char_phonetic_form = current_char_map['phonetic_form']\n\n if current_char_type in ('i', 'x'):\n # Ignore\n continue\n\n elif current_char_type == 'p':\n # Punctuation\n roman_tokens.append(current_char_roman_form)\n\n elif current_char_type in ('fv', 'v', 'n', 'd'):\n # Simple mapping\n roman_tokens.append(current_char_roman_form)\n phonetic_tokens.append(current_char_phonetic_form)\n\n elif current_char_type == 'ag':\n # Vowel lengthener\n\n # If previous character was a vowel (but not full vowel),\n # repeat it in phonetic form, not in romanized\n # form. Otherwise ignore this char\n\n if i > 0:\n prev_char = text[i - 1]\n try:\n prev_char_map = indic_char_map[prev_char]\n except KeyError:\n # Ignore error\n continue\n prev_char_type = prev_char_map['char_type']\n prev_char_phonetic_form = prev_char_map['phonetic_form']\n if prev_char_type == 'v':\n phonetic_tokens.append(prev_char_phonetic_form)\n\n elif current_char_type == 'gn':\n # Context dependent nasal\n if i == text_length - 1:\n # current char is last char\n roman_tokens.append('m')\n phonetic_tokens.append('m')\n else:\n next_char = text[i + 1]\n try:\n next_char_map = indic_char_map[next_char]\n except KeyError:\n roman_tokens.append('m')\n phonetic_tokens.append('m')\n continue\n next_char_roman_form = next_char_map['roman_form']\n next_char_roman_beginning = next_char_roman_form[0]\n if next_char_roman_beginning in \"kg\":\n roman_tokens.append('n')\n phonetic_tokens.append('ng')\n elif next_char_roman_beginning in \"cjtdn\":\n roman_tokens.append('n')\n phonetic_tokens.append('n')\n else:\n roman_tokens.append('m')\n phonetic_tokens.append('m')\n\n elif current_char_type == 'c':\n try:\n next_char = text[i + 1]\n except IndexError:\n # We are already at last character\n roman_tokens.append(current_char_roman_form)\n phonetic_tokens.append(current_char_phonetic_form)\n\n end_v, end_p = determine_inherent_ending_vowel(\n current_char_type,\n current_char)\n\n if end_v:\n roman_tokens.extend(end_v)\n if end_p:\n phonetic_tokens.extend(end_p)\n\n continue\n\n try:\n next_char_map = indic_char_map[next_char]\n except KeyError:\n roman_tokens.append(current_char_roman_form)\n phonetic_tokens.append(current_char_phonetic_form)\n\n end_v, end_p = determine_inherent_ending_vowel(\n current_char_type,\n current_char)\n\n if end_v:\n roman_tokens.extend(end_v)\n if end_p:\n phonetic_tokens.extend(end_p)\n continue\n\n next_char_type = next_char_map['char_type']\n if next_char_type in ('v', 'x', 'p', 'i') or next_char in \" .,\":\n roman_tokens.append(current_char_roman_form)\n phonetic_tokens.append(current_char_phonetic_form)\n else:\n # No vowel coming up next, so add one\n roman_tokens.extend([current_char_roman_form, 'a'])\n phonetic_tokens.extend([current_char_phonetic_form, 'ah0'])\n else:\n print(\"Unknown char type: %s\" % current_char_type, file=sys.stderr)\n sys.exit(1)\n\n roman_text = ''.join(roman_tokens)\n phonetic_text = ' '.join(phonetic_tokens)\n\n return {'roman_form': roman_text,\n 'phonetic_form': phonetic_text}", "def treat_new_line(self,text):\n text=text.replace('.\\n','. ')\n text=re.sub(r'(\\n\\s*)+\\n+', '\\n\\n',text )\n \n lw=text.split('\\n\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n \n for i in range(1,len(lw)):\n try:\n\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','') !='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n\n\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1][-1].replace(' ','')!='':\n\n if lw[i-1][-1].replace(' ','')[-1]!='-':\n lw[i-1]+=\"\"\n else:\n\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n else:\n lw[i-1]+=\"\\n\\n\"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n \n text=\"\".join(lw)\n \n lw=text.split('\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n for i in range(1,len(lw)):\n try:\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1]==\"-\":\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n\n\n\n else:\n lw[i-1]+=\" \"\n else:\n lw[i-1]+=\" \"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n text=\"\".join(lw)\n return text", "def convert_to_markdown(self, text: str) -> str:", "def process_text(txt):\n\n # Make text all lowercase, remove line breaks and tabs\n txt = txt.lower()\n txt = sub(\"\\n\", \" \", txt)\n txt = sub(\"\\t\", \" \", txt)\n txt = sub(\"/\", \" \", txt)\n txt = sub(\"’\", \"\", txt)\n\n # Convert numbers, urls, email addresses, and dollar signs\n txt = sub(\"[0-9]+\", \"number\", txt)\n txt = sub(\"(http|https)://[^\\s]*\", \"httpaddr\", txt)\n txt = sub(\"[^\\s]+@[^\\s]+\", \"emailaddr\", txt)\n txt = sub(\"[$]+\", \"dollar\", txt)\n\n # Remove additional punctuation\n table = str.maketrans({key: None for key in punctuation})\n txt = txt.translate(table)\n\n return txt", "def process_text(text):\n return [token.text for token in nlp(text) if not token.is_stop]", "def process_text(document):\n return preprocess_string(document,\n filters=[strip_tags, strip_punctuation,\n strip_multiple_whitespaces,\n strip_numeric, remove_stopwords,\n strip_short]\n )", "def lemmatize_text_rus(text):\n text_lemm, text_sent = lemmatize_texts_rus([text])\n text_lemm, text_sent = text_lemm[0], text_sent[0]\n return text_lemm, text_sent", "def process_text(text):\n text = re.sub(r'<@>\\s+|<s>\\s+|</s>\\s+|<p>\\s+|</p>\\s+|\\s+\\,|\\'s|\\'|\\;|\\(|\\)|\\-\\-\\s+|\\s+\\.', '', text)\n text = re.sub(r'\\.\\,', '. ,', text)\n text = re.sub(r'\\,', '', text)\n text = re.sub(r'\\$', '$ ', text)\n text = re.sub(r'\\%', ' %', text)\n text = re.sub(r'\\s\\\"\\s', ' ', text)\n text = re.sub(r'\\.\\s+', '. ', text)\n text = text.lower()\n return text", "def parselite(text):\n p = BaseParser()\n return p.parse(text)", "def analyse(self):\n logging.info(\"transferring text to CorpusCook...\")\n\n paragraphs = self.text.split('\\n\\n')\n print(\"mean length of splitted lines\", (mean([len(p) for p in paragraphs])))\n\n # If TIKA resolved '\\n'\n if (mean([len(p) for p in paragraphs])) > 80:\n paragraphs = [re.sub(r\"- *\\n\", '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', \" \") for p in paragraphs]\n paragraphs = [p.replace(';', \" \") for p in paragraphs]\n joiner = \" \"\n else:\n # If TIKA did not\n joiner = \" \"\n\n processed_text = joiner.join([p\n for p in paragraphs\n if\n p and\n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold\n ]\n )\n\n return processed_text.strip()[:self.length_limit]", "def entity_recognition(text: str) -> spacy:\n nlp = spacy.load('en_core_web_sm')\n document = nlp(text)\n return document", "def entities_text(text):\n if len(text) == 0:\n return None\n\n client = language.LanguageServiceClient()\n\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n\n # Instantiates a plain text document.\n document = types.Document(\n content=text,\n type=enums.Document.Type.PLAIN_TEXT)\n\n # Detects entities in the document. You can also analyze HTML with:\n # document.type == enums.Document.Type.HTML\n entities = client.analyze_entities(document).entities\n\n # entity types from enums.Entity.Type\n entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION',\n 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER')\n\n for entity in entities:\n print(u'{:<16}\\t{:8}\\t{}'.format(entity.salience, entity.name, entity_type[entity.type]))\n \"\"\"print('=' * 20)\n print(u'{:<16}: {}'.format('name', entity.name))\n print(u'{:<16}: {}'.format('type', entity_type[entity.type]))\n print(u'{:<16}: {}'.format('metadata', entity.metadata))\n print(u'{:<16}: {}'.format('salience', entity.salience))\n print(u'{:<16}: {}'.format('wikipedia_url',\n entity.metadata.get('wikipedia_url', '-')))\"\"\"\n\n return entities", "def encode_text(self, text):\n text = clip.tokenize(text).to(self.device)\n text_features = self.clip_model.encode_text(text)\n return text_features.cpu().detach().numpy()", "def scene_to_text(scenes):\n scene_text_dict = []\n scene_text_list = []\n for i, scene in enumerate(scenes):\n if len(scene['frame_data']) == 0:\n break\n scene_image = Image.fromarray(scene['frame_data'])\n str_text = pytesseract.image_to_string(scene_image)\n #list_text = list(filter(('').__ne__, re.split(\" |\\n|, |. |:|.\\n|\\x0c\", str_text)))\n list_text = list(filter(('').__ne__, re.split(\" |\\n\", str_text)))\n bag_of_word = collections.Counter(list_text)\n scene_text_dict.append(\n {'start': scene['start'], \n 'end': scene['end'], \n 'bag_of_word': dict(bag_of_word)\n })\n scene_text_list.append(list_text)\n return scene_text_dict, scene_text_list", "def normalize(self, text: str) -> str:\n\n raise NotImplementedError()", "def text_to_corpus(text, accented_chars=True,\n convert_num=True, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=True): \n \n\n \"\"\"preprocess text with default option set to true for all steps\"\"\"\n if remove_html == True: #remove html tags\n text = strip_html_tags(text)\n if extra_whitespace == True: #remove extra whitespaces\n text = remove_whitespace(text)\n if accented_chars == True: #remove accented characters\n text = remove_accented_chars(text)\n if lowercase == True: #convert all characters to lowercase\n text = text.lower()\n \n # add a period to the end of the text:\n if len(text) > 0 and text[-1] != '.':\n text += '.'\n \n doc = nlp(text) #tokenise text \n clean_text = []\n \n for token in doc:\n \n flag = True\n edit = token.text\n # print(\"Word: \", edit, \" Type: \", token.pos_)\n \n # remove stop words\n if stop_words == True and token.is_stop and token.pos_ != 'NUM': \n flag = False\n \n # remove punctuations\n if punctuations == True and (token.pos_ == 'PUNCT' and not token.tag_ == '.') and flag == True: \n flag = False\n \n # remove 'X' characters:\n if token.pos_ == 'X':\n flag = False\n \n # remove special characters\n if special_chars == True and token.pos_ == 'SYM' and flag == True: \n flag = False\n \n # remove numbers\n if remove_num == True and (token.pos_ == 'NUM' or token.text.isnumeric()) \\\n and flag == True:\n flag = False\n \n # convert number words to numeric numbers\n if convert_num == True and token.pos_ == 'NUM' and flag == True:\n edit = w2n.word_to_num(token.text)\n \n # convert tokens to base form\n elif lemmatization == True and token.lemma_ != \"-PRON-\" and flag == True:\n edit = token.lemma_\n \n # convert all closing punctuation ('.', '!', '?', '...' to periods)\n if token.tag_ == '.' and flag == True:\n clean_text.append('.')\n \n # add text lemmas to the clean text:\n elif edit != \"\" and flag == True:\n clean_text.append(edit)\n \n return ' '.join(clean_text)", "def __call__(self, text):\n\n return self._nlp(SpacyTextParser._basic_clean(text))", "def normalized_text(self, text: str):\n self.__normalized_text = self.normalize_text(text)\n self.__generate_default_token_counter()", "def lemmatize_text(text):\n text = nlp(text)\n text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-' else word.text for word in text])\n return text", "def post_process_text(self, text):\n\t\treturn text", "def analyse_text(custom_text, classifier, Resource, threshold, language='en'):\n return [(bytes(custom_text, 'utf-8'),\n _minimal_analysis(bytes(custom_text, 'utf-8'), classifier, Resource, threshold, language))]", "def classify_text(text):\n\tclient = language.LanguageServiceClient()\n\n\tif isinstance(text, six.binary_type):\n\t\ttext = text.decode('utf-8')\n\n\tdocument = types.Document(\n\t\tcontent=text.encode('utf-8'),\n\t\ttype=enums.Document.Type.PLAIN_TEXT)\n\n\tcategories = client.classify_text(document).categories\n\n\tfor category in categories:\n\t\tprint(u'=' * 20)\n\t\tprint(u'{:<16}: {}'.format('name', category.name))\n\t\tprint(u'{:<16}: {}'.format('confidence', category.confidence))\n\n\tprint('\\n\\n')\n\n\treturn categories", "def frontend(text):\n text = pyopenjtalk.g2p(text, kana=False)\n print(f\"Cleaned text: {text}\")\n charseq = text.split(\" \")\n idseq = []\n for c in charseq:\n if c.isspace():\n idseq += [char_to_id[\"<space>\"]]\n elif c not in char_to_id.keys():\n idseq += [char_to_id[\"<unk>\"]]\n else:\n idseq += [char_to_id[c]]\n idseq += [idim - 1] # <eos>\n return torch.LongTensor(idseq).view(-1).to(device)", "def get_text(self):\n txt = self.lang.tool.image_to_string(\n self.image,\n lang=self.lang,\n builder=pyocr.builders.TextBuilder()\n )\n return txt", "def normalize_text(text, skip_weather=True, defluff=False):\n\n if only_numbers(text) <4:\n return \"dud_drop_me\"\n\n if test_hindi(text):\n return \"dud_drop_me\"\n\n if skip_weather:\n if \"weather\" in text or 'rain' in text:\n return \"dud_drop_me\"\n\n if 'http' in text:\n return \"dud_drop_me\"\n\n text = manual_corrections(text)\n text = remove_special_characters(text)\n text = remove_stopwords(text)\n #text = \" \".join([spell.correction(word) for word in text.split()]) # LV based spell correction\n text = lemmatize_text(text)\n text = stem_text(text)\n\n\n if defluff==True:\n text = remove_fluff(text)\n\n text = text.replace(\" \",\" \") # clean up spaces\n text = tokenizer.tokenize(text) #word_tokenize(text)\n if np.random.uniform(0,1) <0.000001: # print random snippets as progress\n print(\"sample:\", text)\n return text", "def load_simplified_conversation_text(filename, conv_number):\n pass", "def preprocess_text(text: str):\n # remove trailing/leading whitespace\n text = text.strip()\n\n # .lower() depends on model so doing this in collate function\n\n # TODO other preprocessing - punctuation/ascii etc.\n text = text.replace(\"\\\\n\", \" \")\n # text = text.replace(\"\\\\'\", \"\\'\")\n # text = text.replace('\\\\\"', \"\\'\")\n text = text.encode('ascii', 'ignore').decode()\n\n return text", "def txt(input):\n output=atpic.cleaner_alex.txtclean(input)\n return output", "def convert(self, text):\r\n # Main function. The order in which other subs are called here is\r\n # essential. Link and image substitutions need to happen before\r\n # _EscapeSpecialChars(), so that any *'s or _'s in the <a>\r\n # and <img> tags get encoded.\r\n\r\n # Clear the global hashes. If we don't clear these, you get conflicts\r\n # from other articles when generating a page which contains more than\r\n # one article (e.g. an index page that shows the N most recent\r\n # articles):\r\n self.reset()\r\n\r\n if not isinstance(text, unicode):\r\n #TODO: perhaps shouldn't presume UTF-8 for string input?\r\n text = unicode(text, 'utf-8')\r\n\r\n if self.use_file_vars:\r\n # Look for emacs-style file variable hints.\r\n emacs_vars = self._get_emacs_vars(text)\r\n if \"markdown-extras\" in emacs_vars:\r\n splitter = re.compile(\"[ ,]+\")\r\n for e in splitter.split(emacs_vars[\"markdown-extras\"]):\r\n if '=' in e:\r\n ename, earg = e.split('=', 1)\r\n try:\r\n earg = int(earg)\r\n except ValueError:\r\n pass\r\n else:\r\n ename, earg = e, None\r\n self.extras[ename] = earg\r\n\r\n # Standardize line endings:\r\n text = re.sub(\"\\r\\n|\\r\", \"\\n\", text)\r\n\r\n # Make sure $text ends with a couple of newlines:\r\n text += \"\\n\\n\"\r\n\r\n # Convert all tabs to spaces.\r\n text = self._detab(text)\r\n\r\n # Strip any lines consisting only of spaces and tabs.\r\n # This makes subsequent regexen easier to write, because we can\r\n # match consecutive blank lines with /\\n+/ instead of something\r\n # contorted like /[ \\t]*\\n+/ .\r\n text = self._ws_only_line_re.sub(\"\", text)\r\n\r\n # strip metadata from head and extract\r\n if \"metadata\" in self.extras:\r\n text = self._extract_metadata(text)\r\n\r\n text = self.preprocess(text)\r\n\r\n if self.safe_mode:\r\n text = self._hash_html_spans(text)\r\n\r\n # Turn block-level HTML blocks into hash entries\r\n text = self._hash_html_blocks(text, raw=True)\r\n\r\n # Strip link definitions, store in hashes.\r\n if \"footnotes\" in self.extras:\r\n # Must do footnotes first because an unlucky footnote defn\r\n # looks like a link defn:\r\n # [^4]: this \"looks like a link defn\"\r\n text = self._strip_footnote_definitions(text)\r\n text = self._strip_link_definitions(text)\r\n\r\n text = self._run_block_gamut(text)\r\n\r\n if \"footnotes\" in self.extras:\r\n text = self._add_footnotes(text)\r\n\r\n text = self.postprocess(text)\r\n\r\n text = self._unescape_special_chars(text)\r\n\r\n if self.safe_mode:\r\n text = self._unhash_html_spans(text)\r\n\r\n text += \"\\n\"\r\n\r\n rv = UnicodeWithAttrs(text)\r\n if \"toc\" in self.extras:\r\n rv._toc = self._toc\r\n if \"metadata\" in self.extras:\r\n rv.metadata = self.metadata\r\n return rv", "def normalize_with_audio(self, text: str, verbose: bool = False) -> str:\n text = text.strip()\n if not text:\n if verbose:\n print(text)\n return text\n text = pynini.escape(text)\n\n def get_tagged_texts(text):\n tagged_lattice = self.find_tags(text)\n tagged_texts = self.select_all_semiotic_tags(tagged_lattice)\n return tagged_texts\n\n tagged_texts = set(get_tagged_texts(text))\n normalized_texts = []\n\n for tagged_text in tagged_texts:\n self.parser(tagged_text)\n tokens = self.parser.parse()\n tags_reordered = self.generate_permutations(tokens)\n for tagged_text_reordered in tags_reordered:\n tagged_text_reordered = pynini.escape(tagged_text_reordered)\n\n verbalizer_lattice = self.find_verbalizer(tagged_text_reordered)\n if verbalizer_lattice.num_states() == 0:\n continue\n\n verbalized = self.get_all_verbalizers(verbalizer_lattice)\n for verbalized_option in verbalized:\n normalized_texts.append(verbalized_option)\n\n if len(normalized_texts) == 0:\n raise ValueError()\n\n normalized_texts = [post_process(t) for t in normalized_texts]\n normalized_texts = set(normalized_texts)\n return normalized_texts", "def lines_to_blocks(text):\n n_sep = text.count('\\n\\n')\n n_lines = text.count('\\n')\n #approximate ratio of double newlines vs single newline: 40\n if int(n_sep/n_lines*100) > 40:\n text = re.sub('\\n\\n', '\\n',text)\n #try to split it up with topic indicators such as numbers or bullet points\n text = re.sub(r'[0-9]+[.]', '\\n',text)\n text = re.sub('•', '\\n',text)\n return text", "def text_standardization(text_in):\n stand_text = text_in.strip()\n stand_text = ' '.join(stand_text.split())\n stand_text = stand_text.replace(u'(', u'(')\n stand_text = stand_text.replace(u')', u')')\n stand_text = stand_text.replace(u':', u':')\n return stand_text", "def parse_sentence(self, text):\n l = []\n tokens = word_tokenize(text)\n print(tokens)\n skip = 0\n i = -1 # index of token in tokens list\n for token in tokens:\n i += 1\n if skip:\n skip -= 1\n # CORONA TERMS:\n elif token.lower() in corona_words:\n l.append('covid')\n elif is_flag_emoji(token):\n try:\n l.append(flag.ISO3166[flag.dflagize(token)[1:3]])\n except:\n continue\n # HASHTAGS:\n elif token == '#' and i+1 < len(tokens):\n parse_hashtage(tokens[i+1], l, tokens)\n skip += 1\n # TAGS:\n elif token == '@' and i+1 < len(tokens):\n parst_tag(tokens[i+1], l)\n skip = True\n # Size AS A WORD:\n elif token.lower() in sizes.keys():\n l.append(parse_number('1', token))\n elif check_if_term_is_fraction(token):\n if i < len(tokens)-1 and tokens[i+1].lower() in percent:\n l.append(token + '%')\n skip += 1\n else:\n l.append(token)\n # NUMBERS:\n elif isNumber(token):\n token = clean_number(token)\n if (i < len(tokens) - 2) and (tokens[i+1].lower() in sizes.keys()) and (tokens[i+2].lower() in percent):\n l.append(parse_number(token, tokens[i+1]) + '%')\n skip += 2\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in percent:\n l.append(parse_number(token) + '%')\n skip += 1\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in sizes.keys():\n l.append(parse_number(token, tokens[i+1]))\n skip += 1\n elif (i < len(tokens) - 1) and check_if_term_is_fraction(tokens[i+1]):\n l.append(token +' '+ tokens[i+1])\n skip += 1\n else:\n l.append(parse_number(token))\n elif isNumber(token[0:len(token) - 1]) and token[len(token)-1].lower() in sizes:\n tokens.append(token[0:len(token) - 1])\n tokens.append(token[len(token)-1])\n # OTHER TOKENS:\n else:\n cleaning(token, tokens, l)\n\n text_tokens_without_stopwords = [w for w in l if w.lower() not in stop_words]\n print(text_tokens_without_stopwords)\n return text_tokens_without_stopwords", "def basic(text):\n lines = text.split(\"\\n\")\n result = []\n\n for line in lines:\n result.append(_inline(line))\n\n return \"\\n\".join(result)", "def ocr_core(img):\n return pytesseract.image_to_string(img, lang='eng')", "def create_return_english_general_sentiment_encoded_form(self,\r\n text):\r\n # The base uri for api requests\r\n query_builder = Configuration.BASE_URI\r\n \r\n # Prepare query string for API call\r\n query_builder += \"/sentiment\"\r\n\r\n # Validate and preprocess url\r\n query_url = APIHelper.clean_url(query_builder)\r\n\r\n # Prepare headers\r\n headers = {\r\n\r\n \"X-Mashape-Key\": self.__x_mashape_key, \"user-agent\": \"APIMATIC 2.0\",\r\n \"accept\": \"application/json\" \"X-Mashape-Key\": self.__x_mashape_key\r\n }\r\n\r\n # Prepare parameters\r\n parameters = {\r\n \"text\": text\r\n }\r\n # The body will be multipart data, so set the header\r\n headers['Content-Type'] = 'multipart/form-data'\r\n\r\n # Prepare and invoke the API call request to fetch the response\r\n response = unirest.post(query_url, headers=headers, params=parameters)\r\n\r\n # Error handling using HTTP status codes\r\n if response.code < 200 or response.code > 206: # 200 = HTTP OK\r\n raise APIException(\"HTTP Response Not OK\", response.code, response.body) \r\n \r\n return response.body", "def split_text(text: str) -> List[Dict[str, str]]:\n # split into paragraphs\n lines = text.splitlines()\n groups = common.group_list(lines, lambda a, _: a.strip() == '')\n paras = ['\\n'.join(item) for empty_line, item in groups if not empty_line]\n\n def _fallback(p, type):\n logging.warn(f'Wrong {type} format:\\n' + p)\n cells.append({'type': 'text', 'source': p})\n\n cells = []\n for p in paras:\n lines = p.splitlines() + ['']\n p += '\\n'\n if p.startswith('#'):\n # parse title\n if not _is_mark(lines[1:]):\n _fallback(p, 'title')\n else:\n m = re.match(r'#+ *', lines[0])\n cells.append({\n 'type': 'title',\n 'prefix': m[0],\n 'source': lines[0][m.span()[1]:],\n 'mark': '\\n'.join(lines[1:])})\n elif p.startswith('$$'):\n # parse equations\n m = re.findall(r'\\$\\$', p)\n if len(m) != 2:\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'equation', 'source': p})\n elif p.startswith('!['):\n # parse images\n if not lines[0].strip().endswith(')') or not _is_mark(lines[1:]):\n _fallback(p, 'image')\n else:\n cells.append({'type': 'image', 'source': p})\n elif p.startswith('|'):\n # parse table\n for i, l in enumerate(lines):\n if not l.startswith('|'):\n break\n if not _is_mark(lines[i:]):\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'table', 'source': p})\n else:\n groups = common.group_list(lines, _list)\n for prefix, item in groups:\n if len(prefix.split('__')) == 2:\n prefix = prefix.split('__')[0]\n source = '\\n'.join(item)[len(prefix):]\n if prefix == '':\n cells.append({'type': 'text', 'source': source})\n else:\n cells.append({\n 'type': 'list',\n 'prefix': prefix,\n 'source': source})\n return cells", "def text_to_images(text):\n top_keywords = text_to_keywords(text)\n images = keyword_to_images(top_keywords)\n return images", "def normalize_text(text):\n text = re.sub(r'[ \\t]+', ' ', text)\n text = re.sub(r'\\r', '', text)\n\n # Remove whitespace in the middle of text.\n text = re.sub(r'[ \\t]+\\n', '\\n', text)\n # Remove whitespace at the end of the text.\n text = text.rstrip()\n\n return text", "def reach_process_text():\n response = request.body.read().decode('utf-8')\n body = json.loads(response)\n text = body.get('text')\n rp = reach.process_text(text)\n if rp and rp.statements:\n stmts = stmts_to_json(rp.statements)\n res = {'statements': stmts}\n return res\n else:\n res = {'statements': []}\n return res", "def get_text_summarization_gensim(text, summary_ratio=0.4):\n sentences = extract_sentences(text)\n text = ' '.join(sentences)\n summary = summarize(text, split=True, ratio=summary_ratio)\n return summary", "def preprocess_text(text, tokenize=False, ner=False, stem=False, stopw=False, all_lower=False, strip_punct=True):\n\n # Clean the text\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"cannot \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"i\\.e\\.\", \"\", text)\n text = re.sub(r\"\\.\", \" . \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r'\"', \" \", text)\n text = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\"^e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\"^b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"^u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text)\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n text = re.sub(r\"\\b[a-zA-Z]\\b\", \"\", text)\n\n if ner:\n tokenized_text = word_tokenize(text)\n tagged_text = pos_tag(tokenized_text)\n chunked_text = ne_chunk(tagged_text, binary=True)\n\n named_entities = extract_entity_names(chunked_text)\n for named_entity in named_entities:\n entity = named_entity.replace(\".\", \"\")\n entity = re.sub(r'\\s+', \"_\", entity)\n text = text.replace(named_entity, entity)\n\n if all_lower:\n text = text.lower()\n\n if stopw:\n global stops\n if stops is None:\n try:\n stops = set(stopwords.words(\"english\"))\n except Exception as e:\n print(\"%s - Please download english stopwords from NLTK\" % e)\n exit()\n text = [word.strip() for word in text.split() if word not in stops]\n text = \" \".join(text)\n\n if tokenize:\n text = word_tokenize(text)\n text = \" \".join(text)\n\n # shorten words to their stems\n if stem:\n text = text.split()\n stemmer = SnowballStemmer('english')\n stemmed_words = [stemmer.stem(word) for word in text]\n text = \" \".join(stemmed_words)\n\n if strip_punct:\n text = text.translate(str.maketrans('', '', string.punctuation))\n\n text = text.strip()\n\n # Empty string\n if text == '':\n return EMPTY_TOKEN\n\n return text", "def from_text(text):\n\n return _from_text(text, _by_text)", "def pegasus_eval(text, params):\n model, tokenizer, torch_device = params\n batch = tokenizer.prepare_seq2seq_batch([text], truncation=True, padding='longest', return_tensors=\"pt\").to(torch_device)\n translated = model.generate(**batch)\n output = tokenizer.batch_decode(translated, skip_special_tokens=True)[0]\n output = output.replace('<n>', ' ')\n return output", "def parsingconvtext(retrievedtext,customtextlist):\r\n if not retrievedtext: #in case empty text \r\n retrievedtext=changenonetostr(retrievedtext)\r\n newtext=BeautifulSoup(retrievedtext).get_text() \r\n #newtext=changenonetostr(retrievedtext)\r\n #newtext=BeautifulSoup(newtext).get_text() \r\n #remove http links\r\n newtext=re.sub(r'http\\S+', '', newtext)\r\n newtext=re.sub(r'\\r\\r\\r\\n', ' ', newtext)\r\n #remove LL specific text\r\n if customtextlist:\r\n for i in customtextlist:\r\n newtext=re.sub(i, '', newtext)\r\n return newtext", "def preprocess_training_text_with_stops(text, convert=False):\n return preprocess_training_text(text, accented_chars=True,\n convert_num=False, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=False)", "def process_text(text, stemmer=SnowballStemmer(\"english\"), min_length=3):\n text = text.lower()\n text = re.sub('dictated.*', '', text, flags=re.MULTILINE|re.DOTALL)\n text = re.sub('.*:\\s+', '', text)\n text = re.sub('\\n', ' ', text)\n text = re.sub('\\[.*?\\]', '', text)\n text = re.sub('\\s\\s+', ' ', text)\n text = re.sub('[,.]', '', text)\n text = re.sub('[/-]', ' ', text)\n tokens = word_tokenize(text)\n return \" \".join([stemmer.stem(t) for t in tokens if t not in stop_words\n and len(t) >= min_length])", "def from_text(text):\n return parse(text)", "def trans(monitext):\n result = ''\n last_line = 'empty'\n\n while monitext:\n # newline character or empty line(s)\n matched = re.match(r'\\n+', monitext, re.M)\n\n if matched:\n result += matched.group()\n if len(matched.group()) > 1:\n last_line = 'empty'\n elif last_line == 'title':\n result += '\\n'\n last_line = 'empty'\n monitext = monitext[matched.end():]\n continue\n\n # code block\n matched = re.match(r'{{{.*?\\n((\\n|.)*?)\\n}}}', monitext, re.M)\n\n if matched:\n body = matched.groups()[0]\n result += '\\n\\t' + '\\n\\t'.join(body.split('\\n'))\n monitext = monitext[matched.end():]\n last_line = 'code'\n continue\n\n # header\n matched = re.match(r'^(=+) (.+) (=+)', monitext)\n\n if matched:\n title = matched.groups()[1]\n level = len(matched.groups()[0])\n\n if last_line != 'empty':\n result += '\\n'\n\n if level < 4:\n underscore = {2 : '=', 3 : '-'}[level] * mbstrlen(title)\n result += title + os.linesep + underscore\n else:\n result += ('#' * level) + \" \" + title\n monitext = monitext[matched.end():]\n\n last_line = 'title'\n\n continue\n\n # link\n matched = re.match(r'(.*)\\[([^\\s]+[ \\t]+)?(.+)\\]', monitext)\n\n if matched:\n pre = matched.groups()[0]\n url = matched.groups()[1]\n if url:\n url = url.strip()\n name = matched.groups()[2]\n\n if url:\n replaced = \"%s[%s](%s)\" % (pre, name, url)\n else:\n replaced = \"%s[%s](%s)\" % (pre, name, name)\n\n monitext = monitext[:matched.start()] + replaced\\\n + monitext[matched.end():]\n\n # important\n monitext = re.sub(r'\\'\\'\\'(.*?)\\'\\'\\'', r'**\\1**', monitext)\n\n # italic\n monitext = re.sub(r'\\'\\'(.*?)\\'\\'', r'_\\1_', monitext)\n\n # list\n matched = re.match(r'^(\\s*)\\* (.*)', monitext)\n\n if matched:\n depth = len(matched.groups()[0])\n body = matched.groups()[1]\n result += (depth - 1) * '\\t' + '* ' + body\n monitext = monitext[matched.end():]\n\n last_line = 'others'\n\n try:\n # Go to the next line\n index = monitext.index('\\n')\n result += monitext[:index]\n monitext = monitext[index:]\n except ValueError:\n result += monitext\n break\n\n return result", "def process_text(text):\n doc = spacy_model(text.lower())\n result = []\n for token in doc:\n if token.text in spacy_model.Defaults.stop_words:\n continue\n if token.is_punct:\n continue\n if token.lemma_ == '-PRON-':\n continue\n result.append(token.lemma_)\n return \" \".join(result)", "def _from_text_to_crf(self, message, entities=None):\n crf_format = []\n tokens = self.nlp(message[\"text\"]) \n for i, token in enumerate(tokens):\n pattern = {}\n entity = entities[i] if entities else \"N/A\"\n tag = None\n custom_ner_features = None\n crf_format.append((token.text, tag, entity, pattern, custom_ner_features))\n return crf_format", "def PigToEnglish(str):\r\n\r\n # TODO: Your code here\r\n\r\n\r\n # Change the return to return the converted string\r\n return(\"\")", "def _get_raw_annotations_for_text(text, ontologies='MESH', semantic_types=None):\n\n if semantic_types is None:\n semantic_types = ()\n\n params = {}\n params['text'] = text\n params['ontologies'] = ontologies\n params['semantic_types'] = ','.join(semantic_types)\n response = _make_api_call('http://data.bioontology.org/annotator', params)\n raw_annotations = response.json()\n return raw_annotations", "def parse(self, text):\n return self.dict.txt2vec(text)", "def _convert_speech_to_text(self, storage_uri):\n\n responses = self._convert_speech_to_text_by_api(storage_uri)\n texts = [result.alternatives[0].transcript for result in responses.results]\n self._texts = '\\n\\n'.join(texts)", "def preprocessing_a1(self, text):\n # clean description\n cleaned_text = self.text_cleaning(text)\n # preprocess description\n preprocessed_text = self.text_preprocessing_a1(cleaned_text)\n\n return preprocessed_text", "def detect_text(img):\n \n with io.open(img, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n response = client.text_detection(image=image) # returns TextAnnotation\n df = pd.DataFrame(columns=['description'])\n texts = response.text_annotations\n for text in texts:\n df = df.append(\n dict(\n \n description= clean_text (text.description)\n ),\n ignore_index=True\n )\n \n porter = PorterStemmer()\n\n try:\n text= (df['description'][0])\n text = porter.stem(text)\n except IndexError:\n text = 'i am neutral'\n # print (analyze(text))\n \n \n # print(df['description'])\n print(text)\n if len (text.split())<3:\n text = 'i am neutral'\n\n sentiment_dict= analyze2(text) \n if sentiment_dict >= 0.008: \n Category.append('Positive') \n return('Positive') \n\n elif (sentiment_dict > - 0.008) & (sentiment_dict < 0.008): \n Category.append('Random')\n return('Random')\n\n elif (sentiment_dict <= -0.008):\n Category.append('Negative')\n return('Negative')", "def _format_text(self, text) :\n text_width = self.width - self.current_indent\n indent = \" \"*self.current_indent\n output_text = []\n paragraphs = text.split('\\n')\n for p in paragraphs :\n output_text.append(textwrap.fill(p,\n text_width,\n initial_indent=indent,\n subsequent_indent=indent))\n return '\\n'.join(output_text)", "def save_annotated_text_to_txt(self):\n #initialise file to write the output\n outfile = open(('annotated_text_' + self.lang + '_' + self.method +\n '.txt'), 'w')\n #counter for the sentences\n counter_sentence = 0\n #counter for the paragrafhs\n counter_paragraph = 0\n #open txt file\n with open(self.lang + '.txt') as file:\n for paragraph in file:\n sentences = tokenize.sent_tokenize(paragraph)\n for sentence in sentences:\n #build lists with the ends of the tokens with NE and the NEs\n end_list = [0]\n end_list += [i[2] for i in \n self.named_entity_list_total[counter_sentence]]\n ne_list = [i[3] for i in \n self.named_entity_list_total[counter_sentence]]\n counter_sentence += 1\n #build new string\n new_string = ''\n for i in range(len(end_list)-1):\n new_string += (sentence[end_list[i]:end_list[i+1]]+\n '<annotation class=\"'+ne_list[i]+'\">')\n new_string += sentence[end_list[-1]:len(sentence)]\n #add new_string to outfile\n outfile.write(new_string + '\\n')\n #add additional space after abstract\n if counter_paragraph == 2:\n outfile.write('\\n') \n counter_paragraph += 1\n outfile.close()\n return", "def textoutput(text):\n\n lines = text.split(\"\\n\")\n result = []\n\n protocols = \"https?|ftp|sftp|file|afs|nfs\"\n savane_tags = \"verbatim|nomarkup\"\n\n for line in lines:\n # Handle named hyperlink.\n line = re.sub(\n # find the opening brace '['\n\t\t '\\['\n # followed by the protocol, either http:// or https://\n\t\t + '((' + protocols + '):\\/\\/'\n # match any character except whitespace or the closing\n # brace ']' for the actual link\n\t\t + '[^\\s\\]]+)'\n # followed by at least one whitespace\n\t\t + '\\s+'\n # followed by any character (non-greedy) and the\n # next closing brace ']'\n\t\t + '(.+?)\\]', '\\\\3 <\\\\1>', line)\n \n # Remove savane-specific tags\n line = re.sub('\\+(' + savane_tags + ')\\+', '', line)\n line = re.sub('-(' + savane_tags + ')-', '', line)\n result.append(line)\n\n return \"\\n\".join(result)" ]
[ "0.66363716", "0.61474895", "0.6118832", "0.6097702", "0.609217", "0.60894656", "0.6057062", "0.60051125", "0.6003964", "0.59820795", "0.59751576", "0.5925597", "0.5902075", "0.5848546", "0.5844513", "0.5837306", "0.58300614", "0.58283144", "0.5815858", "0.5792594", "0.5778242", "0.57480145", "0.57458895", "0.5734289", "0.569558", "0.56917804", "0.5674859", "0.5668577", "0.5655869", "0.56327575", "0.5625962", "0.56073576", "0.5605461", "0.56039375", "0.5598402", "0.55878896", "0.5587465", "0.5586224", "0.5583719", "0.55676633", "0.5565393", "0.5540405", "0.5539813", "0.5529377", "0.5525011", "0.5519682", "0.5515444", "0.55116284", "0.54856193", "0.5476458", "0.5467546", "0.5463749", "0.54634273", "0.5457838", "0.5454935", "0.5454135", "0.54462796", "0.5443667", "0.53999984", "0.5387119", "0.53853834", "0.53594375", "0.53575885", "0.53568", "0.5352479", "0.53499365", "0.53488964", "0.53447264", "0.534245", "0.53357273", "0.53298485", "0.5319866", "0.53089476", "0.5304378", "0.5303721", "0.53023404", "0.5299362", "0.52991325", "0.52976", "0.529214", "0.5287563", "0.5285161", "0.5278127", "0.5275741", "0.5275618", "0.5273768", "0.5272649", "0.5270465", "0.5268924", "0.5266941", "0.52648497", "0.5258628", "0.5250754", "0.5246488", "0.52442825", "0.5236278", "0.5233339", "0.5229646", "0.5227695", "0.52225095" ]
0.6489249
1
Bin calculation for x and y Calculates the bin edges for the given data arrays x and y.
def get_2D_bins(x, y, bins, same_bins=False): # precalculated bins [np.ndarray, np.ndarray]: do nothing and return the same bins if isinstance(bins, list): if isinstance(bins[0], np.ndarray) and isinstance(bins[1], np.ndarray): pass elif 'uniform_counts' in bins: try: n = int(bins[1]) bins_x = np.fromiter( (np.nanpercentile(x, (i / n) * 100) for i in range(1, n + 1)), dtype=float) bins_y = np.fromiter( (np.nanpercentile(y, (i / n) * 100) for i in range(1, n + 1)), dtype=float) bins = [bins_x, bins_y] except: raise ValueError(f"Please define number of bins for binning method uniform_counts: bins = ['uniform_bins', n_bins]") else: # calculate bins with np.histogram_bin_edges(), even_width option == int if bins in ['fd', 'doane', 'scott', 'stone', 'rice', 'sturges', 'sqrt'] or isinstance(bins, int): if same_bins: bins_xy = np.histogram_bin_edges([x, y], bins) bins = [bins_xy, bins_xy] else: bins_x = np.histogram_bin_edges(x, bins) bins_y = np.histogram_bin_edges(y, bins) bins = [bins_x, bins_y] elif bins == 'uniform_counts': raise ValueError(f"Please define number of bins for binning method uniform_bins: bins = ['uniform_bins', n_bins]") elif bins == 'unique_values': if same_bins: bins_xy = np.unique([x, y]) bins = [bins_xy, bins_xy] else: bins_x = np.unique(x) bins_y = np.unique(y) bins = [bins_x, bins_y] else: raise ValueError(f"Binning option {bins} not know.") # always return bins as bin edges: [np.ndarray, np.ndarray] return bins
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get2DBins(x, y, binSizeX, binSizeY):\n\n result = []\n xlength = len(x)\n ylength = len(y)\n\n i = 0\n xcount = 0\n for i1 in range(0, xlength, binSizeX):\n i2 = i1 + binSizeX\n if i2 >= xlength:\n i2 = xlength - 1\n xcount += 1\n ycount = 0\n for j1 in range(0, ylength, binSizeY):\n j2 = j1 + binSizeY\n if j2 >= ylength:\n j2 = ylength - 1\n result.append((i1, i2, j1, j2))\n ycount += 1\n return result, xcount, ycount", "def bin_binarise(self):\n pass", "def histogram2d(x, y, bins_x, bins_y):\n # x-range\n x_max, x_min = x.max(), x.min()\n delta_x = 1 / ((x_max - x_min) / bins_x)\n # y-range\n y_max, y_min = y.max(), y.min()\n delta_y = 1 / ((y_max - y_min) / bins_y)\n # compute histogram 2d\n xy_bin = np.zeros((np.int64(bins_x), np.int64(bins_y)), dtype=np.int64)\n for t in range(len(x)):\n i = (x[t] - x_min) * delta_x\n j = (y[t] - y_min) * delta_y\n if 0 <= i < bins_x and 0 <= j < bins_y:\n xy_bin[int(i), int(j)] += 1\n return xy_bin", "def d2_bin(self, x, y):\n \n KD = KernelDensity(bandwidth=self.bandwidth,kernel=self.kernel)\n KD.fit(np.column_stack((x,y)))\n grid1 = np.linspace(np.min(x),np.max(x),self.bins)\n grid2 = np.linspace(np.min(y),np.max(y),self.bins)\n mesh = np.meshgrid(grid1,grid2)\n data = np.column_stack((mesh[0].reshape(-1,1),mesh[1].reshape(-1,1)))\n samp = KD.score_samples(data)\n samp = samp.reshape(self.bins,self.bins)\n p = np.exp(samp)/np.sum(np.exp(samp))\n\n return p", "def binvec(x):\n edge1 = x[0] - (x[1]-x[0])/2\n edge2 = x[-1] + (x[-1]-x[-2])/2\n return np.linspace(edge1, edge2, len(x)+1)", "def _get_bin_edges(a, bins, range):\n # parse the overloaded bins argument\n n_equal_bins = None\n bin_edges = None\n\n if isinstance(bins, str):\n raise NotImplementedError(\n 'only integer and array bins are implemented')\n elif isinstance(bins, cupy.ndarray) or numpy.ndim(bins) == 1:\n # TODO(okuta): After #3060 is merged, `if cupy.ndim(bins) == 1:`.\n if isinstance(bins, cupy.ndarray):\n bin_edges = bins\n else:\n bin_edges = numpy.asarray(bins)\n\n if (bin_edges[:-1] > bin_edges[1:]).any(): # synchronize! when CuPy\n raise ValueError(\n '`bins` must increase monotonically, when an array')\n if isinstance(bin_edges, numpy.ndarray):\n bin_edges = cupy.asarray(bin_edges)\n elif numpy.ndim(bins) == 0:\n try:\n n_equal_bins = operator.index(bins)\n except TypeError:\n raise TypeError(\n '`bins` must be an integer, a string, or an array')\n if n_equal_bins < 1:\n raise ValueError('`bins` must be positive, when an integer')\n\n first_edge, last_edge = _get_outer_edges(a, range)\n else:\n raise ValueError('`bins` must be 1d, when an array')\n\n if n_equal_bins is not None:\n # numpy's gh-10322 means that type resolution rules are dependent on\n # array shapes. To avoid this causing problems, we pick a type now and\n # stick with it throughout.\n bin_type = cupy.result_type(first_edge, last_edge, a)\n if cupy.issubdtype(bin_type, cupy.integer):\n bin_type = cupy.result_type(bin_type, float)\n\n # bin edges must be computed\n bin_edges = cupy.linspace(\n first_edge, last_edge, n_equal_bins + 1,\n endpoint=True, dtype=bin_type)\n return bin_edges", "def hbinavg(x,y,bins):\n\n binx = bins[:-1] + (bins[1:] - bins[:-1])/2.\n bsum = ( np.histogram(x,bins=bins,weights=y) )[0]\n bn = ( np.histogram(x,bins=bins) )[0]\n biny = bsum/bn\n\n return binx,biny", "def bws(x, y, **kwargs):\n\tx.sort()\n\ty.sort()\n\tnpx = np.array(x)\n\tnpy = np.array(y)\n\n\txs = np.unique(npx)\n\tys = np.unique(npy)\n\txys = set(xs).union(set(ys))\n\taxy = np.array(list(xys))\n\taxy.sort()\n\n\tG = np.array([len(axy[np.where(axy <= xi)]) for xi in xs])\n\tH = np.array([len(axy[np.where(axy <= yi)]) for yi in ys])\n\n\tn = len(G)\n\tm = len(H)\n\tfn = float(n)\n\tfm = float(m)\n\n\tN = np.linspace(1,n,num=n)\n\tM = np.linspace(1,m,num=m)\n\n\txt1 = np.power(G - N*(fm + fn)/fn, 2.0)\n\txtt = N/(fn+1.0)\n\txt2 = xtt*(1 - xtt)*(fm * (fm+fn)/fn)\n\tBx = np.sum(xt1/xt2)/fn\n\t\n\tyt1 = np.power(H - M*(fm + fn)/fm, 2.0)\n\tytt = M/(fm+1.0)\n\tyt2 = ytt*(1 - ytt)*(fn * (fm+fn)/fm)\n\tBy = np.sum(yt1/yt2)/fm\n\n\tB = (Bx+By)/2.0\n\n\tprint \"B = \", B\n\t\n\tJ = 3\n\tif \"j\" in kwargs:\n\t\tJ = kwargs[\"j\"]\n\t\n\treturn compute_xi(B, J)", "def bapply(x,y,bins,func):\n \n assert bins[0] <= min(x),'range'\n assert bins[-1] > max(x),'range'\n\n bid = np.digitize(x,bins) \n nbins = bins.size-1\n yapply = np.zeros(nbins)\n\n for id in range(1,nbins):\n yb = y[bid==id]\n yapply[id-1] = func(yb)\n\n return yapply", "def find_bin(self, x):\n return (x - self.bin_edges[0]) // self.bin_width", "def find_bin_edges(bin_centres):\n\n if not isinstance(bin_centres, np.ndarray):\n bin_centres = np.asarray(bin_centres)\n\n edges = bin_centres[:-1] + 0.5 * (bin_centres[1:] - bin_centres[:-1])\n bins = np.concatenate(([2 * bin_centres[0] - edges[0]], edges,\n [2 * bin_centres[-1] - edges[-1]]))\n\n return bins", "def binning():\n def r(x):\n return 1 << (x & 7)\n\n def w(x):\n return 0x11 * (x >> 1)\n return r, w", "def rebin2D(h, binsx, binsy):\n new_h = TH2D(\"%s_rebin_\"%(h.GetName()), \"%s_rebin_\"%(h.GetName()),\n len(binsx)-1, array.array('d', binsx), len(binsy)-1, array.array('d', binsy))\n new_h.Sumw2()\n for i in xrange(h.GetNbinsX()):\n bin_to_fill_x = new_h.GetXaxis().FindBin(h.GetXaxis().GetBinCenter(i+1))\n for j in xrange(h.GetNbinsY()):\n bin_to_fill_y = new_h.GetYaxis().FindBin(h.GetYaxis().GetBinCenter(j+1))\n new_h.SetBinContent(bin_to_fill_x, bin_to_fill_y, h.GetBinContent(i+1,j+1) + new_h.GetBinContent(bin_to_fill_x, bin_to_fill_y))\n new_h.SetBinError(bin_to_fill_x, bin_to_fill_y,\n TMath.Sqrt(h.GetBinError(i+1, j+1)*h.GetBinError(i+1,j+1) + new_h.GetBinError(bin_to_fill_x, bin_to_fill_y)*new_h.GetBinError(bin_to_fill_x, bin_to_fill_y) ) )\n return new_h", "def test_bin_edges(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np:\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df1 = pd.DataFrame({'A': [0, 1, 2, 3, 4, 3, 2, 1, 1, 1]})\n df2 = pd.DataFrame({'A': [2, 3, 4, 5, 7, 4, 6, 5, 7, 8]})\n\n # building test histograms\n hist2 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist3 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist4 = hg.Bin(num=10, low=0.0, high=10., quantity=unit('A'))\n hist5 = hg.Bin(num=10, low=0.0, high=10., quantity=unit('A'))\n hist6 = hg.Bin(num=201, low=0.0, high=1.005)\n\n # fill them\n hist2.fill.numpy(df1)\n hist3.fill.numpy(df2)\n hist4.fill.numpy(df1)\n hist5.fill.numpy(df2)\n\n import numpy as np\n np.testing.assert_array_equal(hist2.bin_edges(), [0., 1., 2., 3., 4., 5.])\n np.testing.assert_array_equal(hist3.bin_edges(), [2., 3., 4., 5., 6., 7., 8., 9.])\n np.testing.assert_array_equal(hist4.bin_edges(), [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])\n np.testing.assert_array_equal(hist5.bin_edges(), [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])\n\n np.testing.assert_array_equal(hist2.bin_edges(low=2.1, high=11.9), [\n 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.])\n np.testing.assert_array_equal(hist3.bin_edges(low=1.1, high=6), [1., 2., 3., 4., 5., 6.])\n np.testing.assert_array_equal(hist4.bin_edges(low=2.1, high=11.9), [\n 2., 3., 4., 5., 6., 7., 8., 9., 10.])\n np.testing.assert_array_equal(hist5.bin_edges(low=1.1, high=5.4), [1., 2., 3., 4., 5., 6.])\n\n assert len(hist6.bin_edges()) == 202\n assert len(hist6.bin_edges(low=0.2089, high=0.9333)) == 147\n assert len(hist6.bin_edges(low=0.205, high=0.935)) == 147", "def felix_binning(xs, ys, delta=1):\n \n #bins = np.arange(start, end, delta)\n #occurance = np.zeros(start, end, delta)\n BIN_STEP = delta\n BIN_START = xs.min()\n BIN_STOP = xs.max()\n\n indices = xs.argsort()\n datax = xs[indices]\n datay = ys[indices]\n\n print(\"In total we have: \", len(datax), ' data points.')\n #do the binning of the data\n bins = np.arange(BIN_START, BIN_STOP, BIN_STEP)\n print(\"Binning starts: \", BIN_START, ' with step: ', BIN_STEP, ' ENDS: ', BIN_STOP)\n\n bin_i = np.digitize(datax, bins)\n bin_a = np.zeros(len(bins)+1)\n bin_occ = np.zeros(len(bins)+1)\n\n for i in range(datay.size):\n bin_a[bin_i[i]] += datay[i]\n bin_occ[bin_i[i]] += 1\n\n binsx, data_binned = [], []\n for i in range(bin_occ.size-1):\n if bin_occ[i] > 0:\n binsx.append(bins[i]-BIN_STEP/2)\n data_binned.append(bin_a[i]/bin_occ[i])\n\n #non_zero_i = bin_occ > 0\n #binsx = bins[non_zero_i] - BIN_STEP/2\n #data_binned = bin_a[non_zero_i]/bin_occ[non_zero_i]\n\n return binsx, data_binned", "def extEucBin(x, y):\n g = 1\n while isEven(x) and isEven(y):\n x, y, g = x >> 1, y >> 1, g << 1\n u, v, A, B, C, D = x, y, 1, 0, 0, 1\n while True:\n while isEven(u):\n u >>= 1\n if isEven(A) and isEven(B):\n A, B = A >> 1, B >> 1\n else:\n A, B = (A + y) >> 1, (B - x) >> 1\n while isEven(v):\n v >>= 1\n if isEven(C) and isEven(D):\n C, D = C >> 1, D >> 1\n else:\n C, D = (C + y) >> 1, (D - x) >> 1\n if u >= v:\n u, A, B = u - v, A - C, B - D\n else:\n v, C, D = v - u, C - A, D - B\n if u == 0:\n return (C, D, g * v)", "def _make_bins(start, stop, step):\n bin_edges = np.arange(start, stop + step, step)\n\n return bin_edges", "def create_bin_boundaries(config, epoch_df, data_type, obs_per_bin, verbose=False):\n \n edges = create_edges_set(config, epoch_df, data_type)\n \n boundaries = []\n for edge in edges:\n start, end, freq = edge\n bin_size = freq * obs_per_bin\n boundaries.append(np.arange(start, end, bin_size))\n boundaries = np.concatenate(boundaries)\n \n return boundaries", "def bin_stats(x,y,xbins,stat='average'):\n nbins=len(xbins)\n if stat=='average' or stat=='mean': func=mean\n elif stat=='median': func=median\n elif stat=='rms' or stat=='std' : func=std\n elif stat=='std_robust' or stat=='rms_robust': func=std_robust\n elif stat=='mean_robust': func=mean_robust\n elif stat=='median_robust': func=median_robust\n elif stat=='sum': func=sum\n results=[]\n for i in range(nbins):\n if i<nbins-1:\n good=(greater_equal(x,xbins[i])\n *less(x,xbins[i+1]))\n else: good=(greater_equal(x,xbins[-1]))\n if sum(good)>1.: results.append(func(compress(good,y)))\n else:\n results.append(0.)\n print('Bin starting at xbins[%i] has %i points' % (i,sum(good)))\n return array(results)", "def crazy_histogram2d(x, y, bins=10, weights=None, reduce_w=None, NULL=None, reinterp=None):\n # define the bins (do anything you want here but needs edges and sizes of the 2d bins)\n try:\n nx, ny = bins\n except TypeError:\n nx = ny = bins\n\n # values you want to be reported\n if weights is None:\n weights = np.ones(x.size)\n\n if reduce_w is None:\n reduce_w = np.sum\n else:\n if not hasattr(reduce_w, '__call__'):\n raise TypeError('reduce function is not callable')\n\n # culling nans\n finite_inds = (np.isfinite(x) & np.isfinite(y) & np.isfinite(weights))\n _x = np.asarray(x)[finite_inds]\n _y = np.asarray(y)[finite_inds]\n _w = np.asarray(weights)[finite_inds]\n\n if not (len(_x) == len(_y)) & (len(_y) == len(_w)):\n raise ValueError('Shape mismatch between x, y, and weights: {}, {}, {}'.format(_x.shape, _y.shape, _w.shape))\n\n xmin, xmax = _x.min(), _x.max()\n ymin, ymax = _y.min(), _y.max()\n dx = (xmax - xmin) / (nx - 1.0)\n dy = (ymax - ymin) / (ny - 1.0)\n\n # Basically, this is just doing what np.digitize does with one less copy\n xyi = np.vstack((_x, _y)).T\n xyi -= [xmin, ymin]\n xyi /= [dx, dy]\n xyi = np.floor(xyi, xyi).T\n\n # xyi contains the bins of each point as a 2d array [(xi,yi)]\n\n d = {}\n for e, k in enumerate(xyi.T):\n key = (k[0], k[1])\n\n if key in d:\n d[key].append(_w[e])\n else:\n d[key] = [_w[e]]\n\n _xyi = np.array(d.keys()).T\n _w = np.array([ reduce_w(v) for v in d.values() ])\n\n # exploit a sparse coo_matrix to build the 2D histogram...\n _grid = sparse.coo_matrix((_w, _xyi), shape=(nx, ny))\n\n if reinterp is None:\n # convert sparse to array with filled value\n # grid.toarray() does not account for filled value\n # sparse.coo.coo_todense() does actually add the values to the existing ones, i.e. not what we want -> brute force\n if NULL is None:\n B = _grid.toarray()\n else: # Brute force only went needed\n B = np.zeros(_grid.shape, dtype=_grid.dtype)\n B.fill(NULL)\n for (x, y, v) in zip(_grid.col, _grid.row, _grid.data):\n B[y, x] = v\n else: # reinterp\n xi = np.arange(nx, dtype=float)\n yi = np.arange(ny, dtype=float)\n B = griddata(_grid.col.astype(float), _grid.row.astype(float), _grid.data, xi, yi, interp=reinterp)\n\n return B, (xmin, xmax, ymin, ymax), (dx, dy)", "def crazy_histogram2d(x, y, bins=10, weights=None, reduce_w=None, NULL=None, reinterp=None):\n # define the bins (do anything you want here but needs edges and sizes of the 2d bins)\n try:\n nx, ny = bins\n except TypeError:\n nx = ny = bins\n\n #values you want to be reported\n if weights is None:\n weights = np.ones(x.size)\n\n if reduce_w is None:\n reduce_w = np.sum\n else:\n if not hasattr(reduce_w, '__call__'):\n raise TypeError('reduce function is not callable')\n\n # culling nans\n finite_inds = (np.isfinite(x) & np.isfinite(y) & np.isfinite(weights))\n _x = np.asarray(x)[finite_inds]\n _y = np.asarray(y)[finite_inds]\n _w = np.asarray(weights)[finite_inds]\n\n if not (len(_x) == len(_y)) & (len(_y) == len(_w)):\n raise ValueError('Shape mismatch between x, y, and weights: {}, {}, {}'.format(_x.shape, _y.shape, _w.shape))\n\n xmin, xmax = _x.min(), _x.max()\n ymin, ymax = _y.min(), _y.max()\n dx = (xmax - xmin) / (nx - 1.0)\n dy = (ymax - ymin) / (ny - 1.0)\n\n # Basically, this is just doing what np.digitize does with one less copy\n xyi = np.vstack((_x, _y)).T\n xyi -= [xmin, ymin]\n xyi /= [dx, dy]\n xyi = np.floor(xyi, xyi).T\n\n #xyi contains the bins of each point as a 2d array [(xi,yi)]\n\n d = {}\n for e, k in enumerate(xyi.T):\n key = (k[0], k[1])\n\n if key in d:\n d[key].append(_w[e])\n else:\n d[key] = [_w[e]]\n\n _xyi = np.array(d.keys()).T\n _w = np.array([ reduce_w(v) for v in d.values() ])\n\n # exploit a sparse coo_matrix to build the 2D histogram...\n _grid = sparse.coo_matrix((_w, _xyi), shape=(nx, ny))\n\n if reinterp is None:\n #convert sparse to array with filled value\n ## grid.toarray() does not account for filled value\n ## sparse.coo.coo_todense() does actually add the values to the existing ones, i.e. not what we want -> brute force\n if NULL is None:\n B = _grid.toarray()\n else: # Brute force only went needed\n B = np.zeros(_grid.shape, dtype=_grid.dtype)\n B.fill(NULL)\n for (x, y, v) in zip(_grid.col, _grid.row, _grid.data):\n B[y, x] = v\n else: # reinterp\n xi = np.arange(nx, dtype=float)\n yi = np.arange(ny, dtype=float)\n B = griddata(_grid.col.astype(float), _grid.row.astype(float), _grid.data, xi, yi, interp=reinterp)\n\n return B, (xmin, xmax, ymin, ymax), (dx, dy)", "def _bin(self, X):\n H = np.linspace(0, 1, self.Nbin)\n return np.maximum(1 - (abs(X[..., None] - H)) / (H[1] - H[0]) , 0)", "def bin_data(x, y, yerr, npts):\n mod, nbins = len(x) % npts, len(x) / npts\n if mod != 0:\n x, y, yerr = x[:-mod], y[:-mod], yerr[:-mod]\n xb, yb, yerrb = [np.zeros(nbins) for i in range(3)]\n for i in range(npts):\n xb += x[::npts]\n yb += y[::npts]\n yerrb += yerr[::npts]**2\n x, y, yerr = x[1:], y[1:], yerr[1:]\n return xb/npts, yb/npts, yerrb**.5/npts", "def test_numpy_bins(self):\n # Load the data from the fixture\n data = load_occupancy(return_dataset=True)\n X, y = data.to_numpy()\n\n visualizer = BalancedBinningReference()\n visualizer.fit(y)\n visualizer.finalize()\n self.assert_images_similar(visualizer, tol=0.5)", "def bin_data(x, N_bins=100, xmin=0.0, xmax=1.0, density=False):\n\n hist_y, hist_edges = np.histogram(x, bins=N_bins, range=(xmin, xmax), density=density)\n hist_x = 0.5 * (hist_edges[1:] + hist_edges[:-1])\n hist_sy = np.sqrt(hist_y)\n hist_mask = hist_y > 0\n\n return hist_x, hist_y, hist_sy, hist_mask", "def histogram2d(x,y, bins=10, range=None, normed=False, weights=None):\r\n from numpy import histogramdd\r\n\r\n try:\r\n N = len(bins)\r\n except TypeError:\r\n N = 1\r\n\r\n if N != 1 and N != 2:\r\n xedges = yedges = asarray(bins, float)\r\n bins = [xedges, yedges]\r\n hist, edges = histogramdd([x,y], bins, range, normed, weights)\r\n return hist, edges[0], edges[1]", "def mi_bin(x, y, bins_x, bins_y):\n if bins_y == 0:\n bins_y = len(np.unique(y))\n # compute probabilities\n p_x = histogram(x, bins_x)\n p_y = histogram(y, bins_y)\n p_xy = histogram2d(x, y, bins_x, bins_y)\n p_x = p_x / p_x.sum()\n p_y = p_y / p_y.sum()\n p_xy = p_xy / p_xy.sum()\n # compute entropy\n h_x = entropy(p_x.astype(np.float32))\n h_y = entropy(p_y.astype(np.float32))\n h_xy = entropy(p_xy.ravel().astype(np.float32))\n # compute mutual information\n i = h_x + h_y - h_xy\n\n return i", "def _bin_numbers(col1, col2, bin_n):\n col1 = col1[~col1.map(lambda x: is_null_flag(x))].reset_index(drop=True)\n col2 = col2[~col2.map(lambda x: is_null_flag(x))].reset_index(drop=True)\n comb = pd.Series(pd.np.concatenate([col1, col2])).sort_values(inplace=False).reset_index(drop=True)\n bin_size = int(len(comb) / bin_n)\n bin_dict1, bin_dict2 = {}, {}\n for i in range(bin_n - 1): # last bin only needs bin_min\n bin_low = comb[i*bin_size]\n bin_high = comb[(i+1)*bin_size]\n bin_dict1[i] = sum((col1 >= bin_low) & (col1 < bin_high))\n bin_dict2[i] = sum((col2 >= bin_low) & (col2 < bin_high))\n # print bin_low, bin_high\n # Highest bin\n bin_dict1[i+1] = sum(col1 >= bin_high)\n bin_dict2[i+1] = sum(col2 >= bin_high)\n return bin_dict1, bin_dict2", "def calculate_bin_edges(n_bins, geo):\n #Gefittete offsets: x,y,factor: factor*(x+x_off)\n #[6.19, 0.064, 1.0128]\n \n #print \"Reading detector geometry in order to calculate the detector dimensions from file \" + fname_geo_limits\n #geo = np.loadtxt(fname_geo_limits)\n\n # derive maximum and minimum x,y,z coordinates of the geometry input [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]\n geo_limits = np.nanmin(geo, axis = 0), np.nanmax(geo, axis = 0)\n #print ('Detector dimensions [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]: ' + str(geo_limits))\n\n x_bin_edges = np.linspace(geo_limits[0][1] - 9.95, geo_limits[1][1] + 9.95, num=n_bins[0] + 1) #try to get the lines in the bin center 9.95*2 = average x-separation of two lines\n y_bin_edges = np.linspace(geo_limits[0][2] - 9.75, geo_limits[1][2] + 9.75, num=n_bins[1] + 1) # Delta y = 19.483\n z_bin_edges = np.linspace(geo_limits[0][3] - 4.665, geo_limits[1][3] + 4.665, num=n_bins[2] + 1) # Delta z = 9.329\n\n #offset_x, offset_y, scale = [6.19, 0.064, 1.0128]\n #x_bin_edges = (x_bin_edges + offset_x )*scale\n #y_bin_edges = (y_bin_edges + offset_y )*scale\n\n #calculate_bin_edges_test(geo, y_bin_edges, z_bin_edges) # test disabled by default. Activate it, if you change the offsets in x/y/z-bin-edges\n\n return x_bin_edges, y_bin_edges, z_bin_edges", "def bin(serie, bins):\n return serie.apply(lambda x: _bin(bins, x))", "def binning(x, y, xmin=None, xmax=None, dx=1 / 12.,\r\n window=3 / 12., interp=False, median=False):\r\n if xmin is None:\r\n xmin = np.nanmin(x)\r\n if xmax is None:\r\n xmax = np.nanmax(x)\r\n\r\n steps = np.arange(xmin, xmax, dx) # time steps\r\n bins = [(ti, ti + window) for ti in steps] # bin limits\r\n\r\n N = len(bins)\r\n yb = np.full(N, np.nan)\r\n xb = np.full(N, np.nan)\r\n eb = np.full(N, np.nan)\r\n nb = np.full(N, np.nan)\r\n sb = np.full(N, np.nan)\r\n\r\n for i in range(N):\r\n\r\n t1, t2 = bins[i]\r\n idx, = np.where((x >= t1) & (x <= t2))\r\n\r\n if len(idx) == 0:\r\n xb[i] = 0.5 * (t1 + t2)\r\n continue\r\n\r\n ybv = y[idx]\r\n\r\n if median:\r\n yb[i] = np.nanmedian(ybv)\r\n else:\r\n yb[i] = np.nanmean(ybv)\r\n\r\n xb[i] = 0.5 * (t1 + t2)\r\n eb[i] = mad_std(ybv)\r\n nb[i] = np.sum(~np.isnan(ybv))\r\n sb[i] = np.sum(ybv)\r\n\r\n if interp:\r\n try:\r\n yb = np.interp(x, xb, yb)\r\n eb = np.interp(x, xb, eb)\r\n sb = np.interp(x, xb, sb)\r\n xb = x\r\n except:\r\n pass\r\n\r\n return xb, yb, eb, nb, sb", "def conv_gauss_custom(x, y, fwhm, dwindow=2):\n # fwhm = sigma * 2 * np.sqrt(2 * np.log(2))\n\n # Check if fwhm is a number or a list\n if isinstance(fwhm, (int, float)):\n # If fwhm is a number, make an array with fwhm in each entry\n fwhm = np.ones_like(x) * fwhm\n else:\n # Check fwhm has same dimensions as x\n if len(fwhm) != len(x):\n sys.exit('Array `fwhm` has different length than `x`: len(fwhm)={}, len(x)={}'.format(len(fwhm), len(x)))\n\n # Number of total datapoints\n nx = len(x)\n\n # -----------------------\n\n # For each datapoint define a \"bin\" or \"pixel\"\n # E.g. for the datapoint x_3:\n # - Bin center: value of the datapoint: x_3\n # - Bin left edge: half the distance between the current datapoint and the previous one: x_3 - (x_3 - x_2) * 0.5\n # - Bin right edge: half the distance between the current datapoint and the next one: x_3 + (x_4 - x_3) * 0.5\n\n # Distances between center of each bin\n bin_distance = x[1:] - x[:-1] # length = len(x) - 1\n # Define left/right edge of each bin as half the distance to the bin previous/next to it\n bin_edgesmiddle = x[:-1] + 0.5 * bin_distance # middle points\n bin_edgesfirst = x[0] - 0.5 * bin_distance[0] # first point\n bin_edgeslast = x[-1] + 0.5 * bin_distance[-1] # last point\n edges = np.concatenate(([bin_edgesfirst], bin_edgesmiddle, [bin_edgeslast]), axis=0) # length = len(x) + 1\n\n # Width of each bin\n # If the input array x is equally spaced, `bin_width` will be equal to `bin_distance`\n bin_width = edges[1:] - edges[:-1] # length = len(x)\n\n # -----------------------\n\n # Convert FWHM from wavelength units to bins -> Number of bins per FWHM\n fwhm_bin = fwhm / bin_width\n # Round number of bins per FWHM\n nbins = np.ceil(fwhm_bin) #npixels\n\n ## Convert sigma from wavelength units to bins -> Number of bins per sigma\n #sigma_bin = sigma / bin_width\n ## Round number of bins per sigma\n #nbins = np.ceil(sigma_bin) #npixels\n\n # -----------------------\n\n yconv = np.zeros_like(x)\n for i, x_i in enumerate(x):\n\n # Slow method -> THIS IS WHAT MAKES THE OTHER FUNCTION SLOW!\n # # Select kernel window\n # dwindow = 2 * fwhm #2 * fwhm\n # x1 = (np.argmin(np.abs(x - (x_i - dwindow))))\n # x2 = (np.argmin(np.abs(x - (x_i + dwindow))))\n # irang = slice(x1, x2+1)\n\n # Number of pixels at each side of x_i:\n dx = dwindow * nbins[i] * 0.5\n i1 = int(max(0, i - dx))\n i2 = int(min(nx, i + dx + 1))\n irang = slice(i1, i2 + 1)\n\n # Gaussian kernel\n kernel = 1./(np.sqrt(2*np.pi)*fwhm[i]) * np.exp(- ((x[irang] - x_i)**2) / (2 * fwhm[i]**2))\n kernel = kernel / np.sum(kernel)\n\n # Convolve\n yconv[i] = np.sum(y[irang] * kernel)\n\n return yconv", "def binarize(X, *, threshold=..., copy=...):\n ...", "def asBinEdges(bins, data, scale='lin'):\n data = np.asarray(data)\n ndim = data.ndim\n edges = ndim * [None]\n\n try:\n M = len(bins)\n if M != ndim:\n flag_1d = really1d(bins)\n if flag_1d:\n bins = [np.asarray(bins, float)]\n\n if not flag_1d or len(bins) != ndim:\n raise ValueError('The dimension of bins must be equal '\n 'to the dimension of the sample x.')\n except TypeError:\n bins = ndim * [bins]\n\n # If `scale` is a single value for all dimensions\n if(np.ndim(scale) == 0):\n scale = ndim * [scale]\n # otherwise `scale` must be specified for each dimension\n elif(np.size(scale) != ndim):\n raise ValueError(\"`scale` must be a single value or a value for each dimension.\")\n\n # Find range for each dimension\n smin = np.atleast_1d(np.array(data.min(axis=0), float))\n smax = np.atleast_1d(np.array(data.max(axis=0), float))\n # Make sure the bins have a finite width.\n for i in range(len(smin)):\n if smin[i] == smax[i]:\n smin[i] = smin[i] - 0.5\n smax[i] = smax[i] + 0.5\n\n # Create arrays describing edges of bins\n for ii in range(ndim):\n if np.isscalar(bins[ii]):\n edges[ii] = spacing([smin[ii], smax[ii]], scale[ii], num=bins[ii] + 1)\n else:\n edges[ii] = np.asarray(bins[ii], float)\n\n if(ndim == 1):\n edges = edges[0]\n\n return edges", "def binning ( self , axis , name = '' ) :\n assert isinstance ( axis , ROOT.TAxis ),\\\n 'Invalid axis %s/%s' % ( axis , type ( axis ) )\n\n ## uniform binning?\n if not axis.IsVariableBinSize() : \n return ROOT.RooFit.Binning ( axis.GetNbins() , axis.GetXmin() , axis.GetXmax() )\n ##\n xbins = axis.GetXbins().GetArray()\n rb = ROOT.RooBinning ( axis.GetNbins() , xbins , name )\n ##\n self.aux_keep.append ( rb )\n ##\n return ROOT.RooFit.Binning ( rb )", "def bin_spec_y(self, start, end):\n #print(self.spec_x.tolist())\n start_spec_x = closest_value_index(start, self.spec_x.tolist())\n i = 0\n bin_sum = 0\n while(start_spec_x + i < len(self.spec_x) and self.spec_x[start_spec_x + i] <= end):\n bin_sum += self.spec_y[start_spec_x + i]\n i += 1\n average = bin_sum / (i+1)\n return average", "def histogram2d(x, y, bins=10, range=None, weights=None, flow=False, cons_var=False):\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n x = np.asarray(x)\n y = np.asarray(y)\n if weights is not None:\n weights = np.asarray(weights)\n if N != 1 and N != 2:\n bins = np.asarray(bins)\n return var2d(x, y, bins, bins, weights=weights, flow=flow, cons_var=cons_var)\n\n elif N == 1:\n return fix2d(\n x,\n y,\n bins=bins,\n range=range,\n weights=weights,\n flow=flow,\n cons_var=cons_var,\n )\n\n elif N == 2:\n if isinstance(bins[0], int) and isinstance(bins[1], int):\n return fix2d(x, y, bins=bins, range=range, weights=weights, flow=flow)\n else:\n b1 = np.asarray(bins[0])\n b2 = np.asarray(bins[1])\n return var2d(x, y, b1, b2, weights=weights, flow=flow)\n\n else:\n raise ValueError(\"bins argument is not compatible\")", "def histogram2d(x, y, bins=10, range=None, weights=None, density=None):\n try:\n n = len(bins)\n except TypeError:\n n = 1\n\n if n != 1 and n != 2:\n if isinstance(bins, cupy.ndarray):\n xedges = yedges = bins\n bins = [xedges, yedges]\n else:\n raise ValueError('array-like bins not supported in CuPy')\n\n hist, edges = histogramdd([x, y], bins, range, weights, density)\n return hist, edges[0], edges[1]", "def bin_data(y, num_bins, std_away):\n mean = np.mean(y)\n std = np.std(y)\n pitch_shifts = np.arange(-num_bins, num_bins + 1)\n thresholds = (std * std_away) * pitch_shifts + mean\n\n result = []\n for point in y:\n if point < thresholds[0]:\n result.append(pitch_shifts[0] - 1)\n elif point > thresholds[-1]:\n result.append(pitch_shifts[-1] + 1)\n else:\n for i in range(len(thresholds) - 1):\n if point >= thresholds[i] and point < thresholds[i + 1]:\n result.append(i - num_bins)\n return np.array(result)", "def _fast_hist_2d(data, bin_edges):\n # Yes, I've tested this against histogramdd().\n xassign = np.digitize(data[:,0], bin_edges[1:-1]) \n yassign = np.digitize(data[:,1], bin_edges[1:-1])\n nbins = len(bin_edges) - 1\n flatcount = np.bincount(xassign + yassign * nbins, minlength=nbins*nbins)\n return flatcount.reshape((nbins, nbins))", "def createBin(image_object, num=8):\n image_array = sitk.GetArrayFromImage(image_object)\n _, bin_edges = np.histogram(image_array.flatten(), bins=num)\n bin_edges[-1] += 1\n for i in range(num):\n image_array[(image_array >= bin_edges[i]) & (image_array < bin_edges[i+1])] = i+1\n image_object_bin = sitk.GetImageFromArray(image_array)\n return image_object_bin", "def getLinBins(nbins, low, high):\n x = float(low)\n dx = float(high-low)/nbins\n\n return np.array([x+i*dx for i in range(nbins+1)], dtype=float)", "def xy_bin(xo, yo, n=100, mode='log', bins=None):\n \n assert(len(xo)==len(yo))\n \n if bins is None:\n if mode == 'log':\n x = np.logspace(np.log10(xo[xo>0].min()), np.log10(xo.max()), n+1)\n elif mode == 'lin':\n x = np.linspace(xo.min(), xo.max(), n+1)\n else:\n x = np.sort(bins)\n \n y = (np.histogram(xo, x, weights=yo)[0] /\n np.histogram(xo, x)[0])\n \n return x[:-1], y", "def _bresenham_pairs(x0: int, y0: int, x1: int, y1: int) -> np.ndarray:\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n dim = max(dx, dy)\n pairs = np.zeros((dim, 2), dtype=np.int64)\n x, y = x0, y0\n sx = -1 if x0 > x1 else 1\n sy = -1 if y0 > y1 else 1\n if dx > dy:\n err = dx // 2\n for i in range(dx):\n pairs[i, 0] = x\n pairs[i, 1] = y\n err -= dy\n if err < 0:\n y += sy\n err += dx\n x += sx\n else:\n err = dy // 2\n for i in range(dy):\n pairs[i, 0] = x\n pairs[i, 1] = y\n err -= dx\n if err < 0:\n x += sx\n err += dy\n y += sy\n return pairs", "def binning_axis(self) -> int:\r\n return 0", "def bin_discretize(self, variables=[], bins=3,\n min_const_samples_bin_size=1.0/3):\n self.edges=np.zeros((self.arity.size,bins+1))\n for i in variables:\n un_cnt=np.unique(self.data[:,i],return_counts=True)\n constvals=un_cnt[0][un_cnt[1]>self.data.shape[0]*min_const_samples_bin_size]\n mask=np.ones(self.data.shape[0],dtype=bool)\n if constvals.size>0:\n for j,cv in enumerate(constvals):\n mask*=(self.data[:,i]!=cv)\n self.data[self.data[:,i]==cv,i]=j\n\n size=np.sum(mask)/bins\n sorted_i=np.argsort(self.data[mask,i])\n edges=[self.data[mask,i][sorted_i[int(size*num)-1]] for num in range(1,bins)]\n self.edges[i]=[self.data[mask,i][sorted_i[0]]]+edges+[self.data[mask,i][sorted_i[-1]]]\n self.data[mask,i]=np.searchsorted(edges,self.data[mask,i])+constvals.size\n self.arity[i]=len(edges)+1+constvals.size", "def rebin_2d(x_in, data, x_new, statistic='mean'):\n edges = binvec(x_new)\n datai = np.zeros((len(x_new), data.shape[1]))\n data = ma.masked_invalid(data) # data may contain nan-values\n for ind, values in enumerate(data.T):\n mask = ~values.mask\n if ma.any(values[mask]):\n datai[:, ind], _, _ = stats.binned_statistic(x_in[mask],\n values[mask],\n statistic=statistic,\n bins=edges)\n datai[~np.isfinite(datai)] = 0\n return ma.masked_equal(datai, 0)", "def binnedAverage(x, y, bins=20):\n xbins, step = np.linspace(np.min(x), np.max(x), num=bins, retstep=True)\n xbins = (xbins + step/2)[:-1]\n emptyBins = []\n ymeans = []\n for xbi, xb in enumerate(xbins):\n ytotal = 0\n ycount = 0\n for y_i, y_ in enumerate(y):\n if xb - step/2 < x[y_i] < xb + step/2:\n ytotal += y_\n ycount += 1\n if ycount >= 1:\n ymeans.append(ytotal/ycount)\n else:\n emptyBins.append(xbi)\n xbins = np.delete(xbins, emptyBins)\n return xbins, np.array(ymeans)", "def mapBinEdgesAndDataToXYZStandard(binEdgesArray, binDataArray):\n\t#TODO: Check that this is actually a 2-d array (need +1 since number of edges is +1 more than number of bins)\n\tdimX, dimY = len(binEdgesArray)+1, len(binEdgesArray[0])+1\n\n\t#We get a ValueError later anyway (at time of writing) but better to raise it here\n\tif len(binEdgesArray.shape)-2 != 2:\n\t\tnDims = len(binEdgesArray.shape)-2\n\t\traise ValueError(\"Input array seems to be {} dimensions; we need 2 dimensions\".format(nDims))\n\n\toutX = np.zeros( (dimX,dimY) )\n\toutY = np.zeros( (dimX,dimY) )\n\toutZ = copy.deepcopy(binDataArray)\n\n\tfor idxX in range(dimX):\n\t\tfor idxY in range(dimY):\n\t\t\t#Edge case where we take the upper edge of the previous bin (instead of lower edge of current)\n\t\t\tif (idxX==dimX-1) and (idxY==dimY-1):\n\t\t\t\toutX[idxX][idxY] = binEdgesArray[idxX-1][idxY-1][0][1] #Is this correct????\n\t\t\t\toutY[idxX][idxY] = binEdgesArray[idxX-1][idxY-1][1][1] \n\t\t\telif idxX==dimX-1:\n\t\t\t\toutX[idxX][idxY] = binEdgesArray[idxX-1][idxY][0][1]\n\t\t\t\toutY[idxX][idxY] = binEdgesArray[idxX-1][idxY][1][0]\n\t\t\telif idxY==dimY-1:\n\t\t\t\toutX[idxX][idxY] = binEdgesArray[idxX][idxY-1][0][0]\n\t\t\t\toutY[idxX][idxY] = binEdgesArray[idxX][idxY-1][1][1]\t\t\t\n\t\t\telse:\n\t\t\t\toutX[idxX][idxY] = binEdgesArray[idxX][idxY][0][0]\n\t\t\t\toutY[idxX][idxY] = binEdgesArray[idxX][idxY][1][0]\n\n\treturn outX, outY, outZ", "def grid_to_bins(grid, start_bin_val, end_bin_val):\n bin_centers = (grid[1:] + grid[:-1])/2.0\n bins = np.concatenate([[start_bin_val], bin_centers, [end_bin_val]])\n return bins", "def __call__(self, n_bins, segment, elements):\n\n # n_bins\n assert type(n_bins) is int\n assert n_bins > 0\n\n # segment\n assert type(segment) is list or type(segment) is tuple\n assert len(segment) == 2\n assert np.isscalar(segment[0]) and np.isscalar(segment[1])\n assert segment[0] < segment[1]\n\n # elements\n assert type(elements) is np.ndarray, f\"elements should be an np.ndarray, instead of {type(elements)}\"\n assert elements.dtype == np.number\n\n sorted_elements = np.sort(elements)\n\n bin_card = int(floor(elements.shape[0]/n_bins))\n\n bin_boundaries = [segment[0]]\n\n for i in range(1, n_bins):\n boundary_l = sorted_elements[i*bin_card - 1]\n boundary_r = sorted_elements[i * bin_card]\n boundary = (boundary_l+boundary_r)/2\n\n bin_boundaries.append(boundary)\n\n bin_boundaries.append(segment[1])\n\n return np.array(bin_boundaries)", "def get_shape_bin_edges(shapes_path, datacard_bin):\n shapes_file = ROOT.TFile.Open(shapes_path)\n print 'tfile path is', shapes_path\n print 'list of keys'\n #ROOT.gDirectory.GetListOfKeys().ls()\n print 'gonnad cd', datacard_bin\n print shapes_file.cd(datacard_bin)\n shapes = ROOT.gDirectory.GetListOfKeys()\n #print 'list of Keys is', ROOT.gDirectory.GetListOfKeys().ls()\n # Since the nominal and varied shapes share the same binning,\n # take any of the histograms found in the shapes file.\n print 'debug0'\n shape = ROOT.gDirectory.Get(shapes[0].GetName())\n print 'debug'\n bin_edges = np.array(\n [shape.GetXaxis().GetBinLowEdge(i) for i in xrange(1, shape.GetNbinsX() + 1)],\n dtype=np.float64,\n )\n shapes_file.Close()\n print 'bin_edges for datacard_bin is', bin_edges\n #sys.exit()\n return bin_edges", "def test_binops(self):", "def mi_bin_conn_time(x, y, bins_x, bins_y):\n n_times, n_trials = x.shape\n mi = np.zeros((n_times), dtype=np.float32)\n for t in range(n_times):\n mi[t] = mi_bin(x[t, :], y[t, :], bins_x, bins_y)\n return mi", "def bincalc(nbin=0.1,bmin=5,bmax=2000):\n\n logbmin=np.log10(bmin)\n logbmax=np.log10(bmax)\n\n logbins=np.arange(logbmin,logbmax,nbin)\n\n bins=10**logbins\n\n #bins=np.linspace(bmin,bmax,60)\n return (bins)", "def getBinIndex(self, x):\n\t\tb = -1\n\t\tif x == self._max_val: # final bin is [low, high], where others are [low,high)\n\t\t\tb = len(self._bins)-1\n\t\telse:\n\t\t\tb = math.floor((x-self._min_val)/self._bin_width)\n\t\treturn int(b)", "def binning(data, low, high):\n if len(data) == 0: return 1\n\n mask1 = (data >= low)\n mask2 = (data < high)\n mask3 = numpy.logical_and(mask1, mask2)\n data = data[mask3]\n\n if len(data) == 0: return 10\n\n data.sort()\n q1 = data[int(math.floor(0.25*len(data)))]\n q3 = data[int(math.floor(0.75*len(data)))]\n binwidth = 2. * (q3 - q1) / len(data)**(1./3.)\n if binwidth > 0.:\n return max(10, int(math.ceil((high - low)/binwidth)))\n else:\n return 10", "def _calcBins(self, contribs, parValues, fraction, minReq):\n # single set of R for this calculation\n bins = np.zeros(self.binCount)\n binObs = np.zeros(self.binCount)\n for bi in range(self.binCount):\n val, obs = self._calcBin(\n self._binMask(bi, parValues),\n fraction, minReq)\n bins[bi] = val\n binObs[bi] = obs\n cdf = self._calcCDF(bins)\n return bins, binObs, cdf", "def bins_match (a, b):\n return 0 == (\n np.sum ((a.xbins - b.xbins)**2)\n + np.sum ((a.ybins - b.ybins)**2) )", "def mean_relationship_twoD(x, y, bins_values):\r\n sort_ind_x = np.argsort(x)\r\n x = x[sort_ind_x]\r\n y = y[:, sort_ind_x]\r\n hist, bin_edges = np.histogram(x, bins=bins_values)\r\n array_end = np.cumsum(hist)\r\n array_start = np.cumsum(hist) - hist\r\n y_x = np.zeros((len(y), len(array_start)))\r\n for i in np.arange(len(array_start)):\r\n y_x[:, i] = np.mean(y[:, array_start[i]:array_end[i]], axis=1)\r\n return y_x", "def calculateMetallicityBinEdges(self):\n\n if self.binInLogSpace:\n logMetallicities = np.log10(self.metallicityGrid)\n b= logMetallicities[:-1] + (logMetallicities[1:] - logMetallicities[:-1])/2.\n b = 10.**b #the boundaries for integration are not in log space so\n #convert to \"normal\" numbers.\n else:\n b= (self.metallicityGrid[1:] - self.metallicityGrid[:-1])/2. \\\n + self.metallicityGrid[:-1] \n\n self.metallicityBinEdges = np.zeros(len(b)+2)\n\n #the lowest/highest metallicity bin edge are set in options\n #the calculated b edges are all in between\n\n self.metallicityBinEdges[0] = self.metallicityLowerLimit\n self.metallicityBinEdges[-1] = self.metallicityUpperLimit\n self.metallicityBinEdges[1:-1] = b", "def binn_fft(self):\n bin_res = []\n for fft_bin in BINS:\n bin_res.append(self.bin_spec_y(fft_bin[0], fft_bin[1]))\n return bin_res", "def bin_matrix(x, binning):\n\n N = len(x)\n B = []\n for i in range(len(binning) - 1):\n\n line = np.zeros(N)\n for j in range(N):\n\n if x[j] >= binning[i] and x[j] < binning[i+1]:\n\n size = binning[i + 1] - binning[i]\n line[j] = 1 / size\n\n B.append(line)\n\n B = np.array(B)\n\n return(B)", "def binarize(self, image):\n\n [rows, columns] = np.shape(image)\n bin_img = np.zeros((rows, columns), dtype=int)\n print(\"############## Using to binarize an image ##############\")\n hist = self.compute_histogram(image)\n threshold = self.find_optimal_threshold(hist)\n for i in range(rows):\n for j in range(columns):\n if image[i, j] < threshold:\n bin_img[i, j] = 0\n else:\n bin_img[i, j] = 255\n # print(\"binary image: \\n\", bin_img)\n\n return bin_img", "def to_bin(value, edges):\n\n previous = 0\n for v in edges:\n if previous <= value <= v:\n return (previous, v)\n previous = v\n return (previous, None)", "def compute_histogram(samples, nbins=50, piecewise_constant=True):\n import sys\n if 'numpy' in sys.modules:\n y0, bin_edges = histogram(samples, bins=nbins, normed=True)\n h = bin_edges[1] - bin_edges[0] # bin width\n if piecewise_constant:\n x = zeros(2*len(bin_edges), type(bin_edges[0]))\n y = x.copy()\n x[0] = bin_edges[0]\n y[0] = 0\n for i in range(len(bin_edges)-1):\n x[2*i+1] = bin_edges[i]\n x[2*i+2] = bin_edges[i+1]\n y[2*i+1] = y0[i]\n y[2*i+2] = y0[i]\n x[-1] = bin_edges[-1]\n y[-1] = 0\n else:\n x = zeros(len(bin_edges)-1, type(bin_edges[0]))\n y = y0.copy()\n for i in range(len(x)):\n x[i] = (bin_edges[i] + bin_edges[i+1])/2.0\n return x, y", "def bin_input(neu, bin_size, overlap = 0):\n win_d = bin_size - overlap\n n_bins = int(((neu.shape[0]-bin_size)/win_d)+1)\n FRmat = np.empty([n_bins, neu.shape[1]])\n for i in range(n_bins):\n FRmat[i,:] = np.sum(neu[i*win_d:i*win_d+bin_size,:], axis = 0)\n \n return FRmat", "def getLogBins(nbins, low, high):\n\n x = float(low)\n dx = pow(high/low, 1.0/nbins);\n \n return np.array([x*pow(dx,i) for i in range(nbins+1)], dtype=float)", "def binary_binned_metric(model=None, X=None, positive_scores=None, Y=None, bins_weights=None):\n\n return specific_class_binned_metric(model=model, X=X, specific_scores=positive_scores,\n Y=Y, bins_weights=bins_weights, class_index=1)", "def compute_bin_indices(X_part, bin_limits=None, n_bins=20):\n if bin_limits is None:\n bin_limits = []\n for variable_data in range(X_part.shape[1]):\n bin_limits.append(numpy.linspace(numpy.min(variable_data), numpy.max(variable_data), n_bins + 1)[1: -1])\n\n bin_indices = numpy.zeros(len(X_part), dtype=numpy.int)\n for axis, bin_limits_axis in enumerate(bin_limits):\n bin_indices *= (len(bin_limits_axis) + 1)\n bin_indices += numpy.searchsorted(bin_limits_axis, X_part[:, axis])\n\n return bin_indices", "def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins", "def tcbin(x, y=8):\n if x >= (2**(y - 1)) or x < -(2**(y - 1) or y < 1):\n raise Exception(\"Argument outside of range.\")\n if x >= 0:\n binstr = bin(x)\n # pad with leading zeros\n while len(binstr) < y + 2:\n binstr = \"0b0\" + binstr[2:]\n return binstr\n return bin((2**y) + x) # x is negative", "def __init__(self, edges, yedges=None):\n if yedges is None:\n if len(edges) == 2:\n BinnedSet.__init__(self, edges)\n else:\n raise ValueError(\"'edges' does not contain exactly two bin \"\n \"edge arrays\")\n else:\n BinnedSet.__init__(self, (edges, yedges))", "def matrix2bin(X, y, filename):\n if len(X.shape) == 3:\n Xy = []\n for dimX, dimy in zip(X, y):\n ym = np.array([dimy])\n Xy.append(np.append(dimX, ym.T, axis=1))\n Xy = np.array(Xy)\n else:\n ym = np.array([y])\n Xy = np.append(X, ym.T, axis=1)\n np.save(filename, Xy)", "def bin_search(arr, x):\n \n low = 0\n hi = len(arr) - 1\n \n while(low <= hi): \n \n mid = int((low + hi) / 2) # find middle idx\n\n if( x >= arr[mid]): # if x on the right, change low idx and search right side\n low = mid + 1; \n else: # else search left side\n hi = mid - 1\n\n return hi", "def label_values_to_bins(array_to_bin:object, bin_count:int):\n\t\t# Make 1D for qcut.\n\t\tarray_to_bin = array_to_bin.flatten()\n\t\t# For really unbalanced labels, I ran into errors where bin boundaries would be duplicates all the way down to 2 bins.\n\t\t# Setting `duplicates='drop'` to address this.\n\t\tbin_numbers = pd.qcut(x=array_to_bin, q=bin_count, labels=False, duplicates='drop')\n\t\t# Convert 1D array back to 2D for the rest of the program.\n\t\tbin_numbers = np.reshape(bin_numbers, (-1, 1))\n\t\treturn bin_numbers", "def inner(x, y):\n\n return np.inner(x.ravel(), y.ravel())", "def bin_absorbers(dat, bin_attr='z', binsize=None, bin_start=None,\n bin_end=None, bins=None):\n output = []\n if not bins: # if no custom bins specified, then take equallly spaced bins\n bins = np.arange(bin_start, bin_end, binsize)\n bins = list(zip(bins, bins + binsize))\n\n for b in bins:\n output.append(\n [item for item in dat if b[0] <= getattr(item, bin_attr) < b[1]]\n )\n return output", "def binary_add(x, y):\n # Makes sure that the arrays have the same length.\n # Could be changed to padding on extra zeroes, if so desired.\n assert(len(x) == len(y))\n\n z = [0] * (len(x)+1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n # Makes sure that the array is a binary array.\n # Strictly speaking, not necessary. But nice.\n if i not in [0, 1]: return False\n if j not in [0, 1]: return False\n\n # if i and j are both 1 \n if i and j:\n z[a] += 0\n z[a+1] += 1\n # if only one of them is 1\n elif i or j:\n z[a] += 1\n # if they're both 0\n else: pass\n\n if z[a] == 2:\n z[a+1] += 1\n z[a] -= 2\n \n return z[::-1]", "def rebin(array, dimensions=None, scale=None):\n if dimensions is not None:\n if isinstance(dimensions, float):\n dimensions = [int(dimensions)] * len(array.shape)\n elif isinstance(dimensions, int):\n dimensions = [dimensions] * len(array.shape)\n elif len(dimensions) != len(array.shape):\n raise RuntimeError('')\n elif scale is not None:\n if isinstance(scale, float) or isinstance(scale, int):\n dimensions = map(int, map(round, map(lambda x: x * scale, array.shape)))\n elif len(scale) != len(array.shape):\n raise RuntimeError('')\n else:\n raise RuntimeError('Incorrect parameters to rebin.\\n\\trebin(array, dimensions=(x,y))\\n\\trebin(array, scale=a')\n import itertools\n # dY, dX = map(divmod, map(float, array.shape), dimensions)\n if np.shape(array) == dimensions: return array # no rebinning actually needed\n result = np.zeros(dimensions)\n for j, i in itertools.product(*map(xrange, array.shape)):\n (J, dj), (I, di) = divmod(j * dimensions[0], array.shape[0]), divmod(i * dimensions[1], array.shape[1])\n (J1, dj1), (I1, di1) = divmod(j + 1, array.shape[0] / float(dimensions[0])), divmod(i + 1,\n array.shape[1] / float(\n dimensions[1]))\n\n # Moving to new bin\n # Is this a discrete bin?\n dx, dy = 0, 0\n if (I1 - I == 0) | ((I1 - I == 1) & (di1 == 0)):\n dx = 1\n else:\n dx = 1 - di1\n if (J1 - J == 0) | ((J1 - J == 1) & (dj1 == 0)):\n dy = 1\n else:\n dy = 1 - dj1\n # Prevent it from allocating outide the array\n I_ = min(dimensions[1] - 1, I + 1)\n J_ = min(dimensions[0] - 1, J + 1)\n result[J, I] += array[j, i] * dx * dy\n result[J_, I] += array[j, i] * (1 - dy) * dx\n result[J, I_] += array[j, i] * dy * (1 - dx)\n result[J_, I_] += array[j, i] * (1 - dx) * (1 - dy)\n allowError = 0.1\n assert (array.sum() < result.sum() * (1 + allowError)) & (array.sum() > result.sum() * (1 - allowError))\n return result", "def create_bins(start, end, n_bins):\n bins = np.linspace(start, end, n_bins)\n return bins", "def bin_data(data, lat, lon, binsize=1, uv_data=False, pressure=None):\n\n # Create lats and lons based on binsize\n lonlen = 360\n latlen = 180\n\n lon_lowerlim = 0\n lon_upperlim = 360\n\n lat_lowerlim = -90\n lat_upperlim = 90\n\n if latlen % binsize == 0 and lonlen % binsize == 0:\n latbin = int(latlen/binsize)\n lonbin = int(lonlen/binsize)\n n_deg = binsize/2\n\n ll_lats = np.linspace(lat_lowerlim+(n_deg),\n lat_upperlim-(n_deg),\n latbin)\n\n ll_lons = np.linspace(lon_lowerlim+(n_deg),\n lon_upperlim-(n_deg),\n lonbin)\n\n else:\n print('ERROR: Binsize does not work for grid shape (180,360). Please use different binsize.')\n return\n\n paramlist = list(itertools.product(ll_lats, ll_lons))\n\n # Bin Data\n if uv_data == True:\n binned_u_data = np.full((latbin, lonbin), np.nan, dtype=object)\n binned_v_data = np.full((latbin, lonbin), np.nan, dtype=object)\n\n if pressure is not None:\n binned_pressure = np.full((latbin, lonbin), np.nan, dtype=object)\n\n for val in paramlist:\n # Get index of 1x1 grid lat and lon\n latidx = np.where(ll_lats == val[0])\n lonidx = np.where(ll_lons == val[1])\n # values of the 1x1 grid lat and lon\n binnedlons = val[1]\n binnedlats = val[0]\n\n # find instances where data is within 1x1 grid point of orginal data\n data_idx = np.where((lon >= binnedlons - n_deg) & (lon <= binnedlons + n_deg) &\n (lat >= binnedlats - n_deg) & (lat <= binnedlats + n_deg))\n\n latlon_idx = [latidx[0][0], lonidx[0][0]]\n\n # calculate stats if there is data at this grid point, else append np.nan\n if len(data_idx[0]) > 0:\n u = data['u'][data_idx]\n v = data['v'][data_idx]\n\n binned_u_data[latlon_idx[0], latlon_idx[1]] = u\n binned_v_data[latlon_idx[0], latlon_idx[1]] = v\n\n if pressure is not None:\n p = pressure[data_idx]\n binned_pressure[latlon_idx[0], latlon_idx[1]] = p\n\n if pressure is not None:\n return binned_u_data, binned_v_data, binned_pressure\n\n else:\n return binned_u_data, binned_v_data\n\n else:\n binned_data = np.full((latbin, lonbin), np.nan, dtype=object)\n if pressure is not None:\n binned_pressure = np.full((latbin, lonbin), np.nan, dtype=object)\n\n for val in paramlist:\n # Get index of grid lat and lon\n latidx = np.where(ll_lats == val[0])\n lonidx = np.where(ll_lons == val[1])\n # values of the 1x1 grid lat and lon\n binnedlons = val[1]\n binnedlats = val[0]\n\n # find instances where data is within 1x1 grid point of orginal data\n data_idx = np.where((lon >= binnedlons - n_deg) & (lon <= binnedlons + n_deg) &\n (lat >= binnedlats - n_deg) & (lat <= binnedlats + n_deg))\n\n latlon_idx = [latidx[0][0], lonidx[0][0]]\n\n # calculate stats if there is data at this grid point\n if len(data_idx[0]) > 0:\n d = data[data_idx]\n binned_data[latlon_idx[0], latlon_idx[1]] = d\n\n if pressure is not None:\n p = pressure[data_idx]\n binned_pressure[latlon_idx[0], latlon_idx[1]] = p\n\n if pressure is not None:\n return binned_data, binned_pressure\n\n else:\n return binned_data", "def test_pandas_bins(self):\n # Load the data from the fixture\n data = load_occupancy(return_dataset=True)\n X, y = data.to_pandas()\n\n visualizer = BalancedBinningReference()\n visualizer.fit(y)\n visualizer.finalize()\n self.assert_images_similar(visualizer, tol=0.5)", "def bin_center_to_edges(centers):\n edges = bin_edges_to_center(centers)\n edges = np.append(centers[0]-(edges[0]-centers[0]), edges)\n edges = np.append(edges, centers[-1]+(centers[-1]-edges[-1]))\n return edges", "def bin_the_data(neuron_spikes, first, last, bin_size):\n neuron_activity = []\n timebins = range(first, int(last) + int(last) % bin_size, bin_size)\n for spike in neuron_spikes:\n activity = []\n spike_time = spike[0]\n i = 0\n for bin_size in timebins:\n k = 0\n while spike_time < bin_size:\n i += 1\n if i >= np.size(spike):\n break\n spike_time = spike[i]\n k += 1\n activity.append(k)\n neuron_activity.append(activity)\n return neuron_activity, timebins", "def createBins():\n theBins = []\n startFreq = 60\n for a in range(32):\n endFreq = int(startFreq*1.12+12)\n theRange = (startFreq, endFreq)\n startFreq = endFreq\n theBins.append(theRange)\n return(theBins)", "def get_bin_means(X,Y,bin_edges=None,mean='median',error='sem',minimum_n=25):\n \n assert(X.shape == Y.shape)\n \n # Flatten if not vectors\n if X.ndim > 1:\n X = X.flatten()\n Y = Y.flatten()\n \n if (bin_edges == None).all():\n X_min = np.nanmin(X)\n X_max = np.nanmax(X)\n bin_edges = np.linspace(X_min,X_max,num=10)\n \n \n which_bin = np.digitize(X,bin_edges)\n Nbins = len(bin_edges)-1\n means = np.zeros(Nbins)\n stds = np.zeros(Nbins)\n bin_centers = np.zeros(Nbins)\n for b in range(Nbins):\n y = Y[which_bin == b+1]\n bin_centers[b] = (bin_edges[b] + bin_edges[b+1]) / 2\n # Suppress noisy bins\n if len(y) < minimum_n:\n means[b] = np.nan\n stds[b] = np.nan\n else:\n # Mean or median\n if mean == 'mean':\n means[b] = np.nanmean(y)\n elif mean == 'median':\n means[b] = np.nanmedian(y)\n \n if error == 'sem':\n stds[b] = np.nanstd(y) / np.sqrt(len(y))\n elif error == 'std':\n stds[b] = np.nanstd(y)\n \n return means", "def test_Bin(self):\n\n outcome_three = Outcome(\"00-0-1-2-3\", 6 )\n outcome_four = Outcome(\"D\", 2)\n outcome_five = Outcome(\"E\", 3)\n outcome_six = Outcome(\"F\", 4)\n\n bin_one = Bin(outcome_three, outcome_four)\n print 'what is bin one?: ', bin_one\n bin_two = Bin(outcome_five, outcome_six)\n print 'what is bin two?: ', bin_two", "def _binoms(kernel_size):\n if kernel_size > 1:\n curr_kernel = BASE_KERNEL\n for i in range(2, kernel_size):\n curr_kernel = np.convolve(curr_kernel, BASE_KERNEL)\n return curr_kernel\n return np.array([1])", "def to_bins(arr):\n result = np.zeros(len(arr)+1)\n result[1:-1] = 0.5 * (arr[1:] + arr[:-1])\n result[0] = arr[0] - 0.5*(arr[1] - arr[0])\n result[-1] = arr[-1] + 0.5*(arr[-1] - arr[-2])\n return result", "def filter_binaries_beamprofile(bin_arr, beamprofile, cutoff=0.75, dilate=0):\r\n bp_bool = beamprofile < cutoff * beamprofile.max()\r\n out_binary = np.empty_like(bin_arr, dtype=int)\r\n total_cells = 0\r\n removed_cells = 0\r\n\r\n for i, img in enumerate(bin_arr):\r\n labeled, n = mh.labeled.label(img)\r\n total_cells += n\r\n for l in np.unique(labeled)[1:]:\r\n selected_binary = multi_dilate(labeled == l, dilate)\r\n if np.any(np.logical_and(selected_binary, bp_bool)): # Cell lies outside of\r\n labeled[labeled == l] = 0\r\n removed_cells += 1\r\n out_binary[i] = labeled\r\n print('Removed {} cells out of a total of {} cells.'.format(removed_cells, total_cells))\r\n return out_binary", "def bin_statistics(data,bin_against,bin_edges,data_signal=[]):\n\n assert isinstance(data, pd.DataFrame), 'data must be of type pd.DataFram' \n try: bin_against = np.asarray(bin_against) \n except: 'bin_against must be of type np.ndarray'\n try: bin_edges = np.asarray(bin_edges)\n except: 'bin_edges must be of type np.ndarray' \n\n # Determine variables to analyze\n if len(data_signal)==0: # if not specified, bin all variables\n data_signal=data.columns.values\n else:\n assert isinstance(data_signal, list), 'must be of type list'\n\n # Pre-allocate list variables\n bin_stat_list = []\n bin_std_list = []\n\n # loop through data_signal and get binned means\n for signal_name in data_signal:\n # Bin data\n bin_stat = binned_statistic(bin_against,data[signal_name],\n statistic='mean',bins=bin_edges)\n # Calculate std of bins\n std = []\n stdev = pd.DataFrame(data[signal_name])\n stdev.set_index(bin_stat.binnumber,inplace=True)\n for i in range(1,len(bin_stat.bin_edges)):\n try:\n temp = stdev.loc[i].std(ddof=0)\n std.append(temp[0])\n except:\n std.append(np.nan)\n bin_stat_list.append(bin_stat.statistic)\n bin_std_list.append(std)\n \n # Convert to DataFrames\n bin_mean = pd.DataFrame(np.transpose(bin_stat_list),columns=data_signal)\n bin_std = pd.DataFrame(np.transpose(bin_std_list),columns=data_signal)\n\n # Check for nans \n if bin_mean.isna().any().any():\n print('Warning: some bins may be empty!')\n\n return bin_mean, bin_std", "def bin_data(bins, data2bin, bindata, mode='mean', nbinned=False):\n assert mode in ['mean', 'median', 'std', 'max', 'min'], \"mode not recognized: {}\".format(mode)\n digitized = np.digitize(bindata, bins)\n binned = np.zeros(len(bins)) * np.nan\n if nbinned: \n numbinned = np.zeros(len(bins))\n\n if mode == 'mean':\n for i, _ in enumerate(bins):\n binned[i] = np.nanmean(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])\n if nbinned:\n numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))\n elif mode == 'median':\n for i, _ in enumerate(bins):\n binned[i] = np.nanmedian(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])\n if nbinned:\n numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))\n elif mode == 'std':\n for i, _ in enumerate(bins):\n binned[i] = np.nanstd(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])\n if nbinned:\n numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))\n elif mode == 'max':\n for i, _ in enumerate(bins):\n binned[i] = np.nanmax(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])\n if nbinned:\n numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))\n elif mode == 'min':\n for i, _ in enumerate(bins):\n binned[i] = np.nanmin(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])\n if nbinned:\n numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))\n else:\n raise ValueError('mode must be mean, median, std, max, or min')\n \n if nbinned:\n return np.array(binned), np.array(numbinned)\n else:\n return np.array(binned)", "def get_bin_index(self, filter_bin):\n\n left_index = self.left_filter.get_bin_index(filter_bin[0])\n right_index = self.right_filter.get_bin_index(filter_bin[0])\n filter_index = left_index * self.right_filter.num_bins + right_index\n return filter_index", "def get_bin_index(self, filter_bin):\n\n left_index = self.left_filter.get_bin_index(filter_bin[0])\n right_index = self.right_filter.get_bin_index(filter_bin[0])\n filter_index = left_index * self.right_filter.num_bins + right_index\n return filter_index", "def compute_bb(self):\n all_shapes = list(self.parts.values()) + list(self.edges.values())\n bbox_vertices = cascaded_union(all_shapes).envelope.exterior.coords.xy\n min_x = min(bbox_vertices[0])\n max_x = max(bbox_vertices[0])\n min_y = min(bbox_vertices[1])\n max_y = max(bbox_vertices[1])\n return [min_x, max_x,min_y, max_y]", "def createBinsByEntropy(self, data, structure, colName, numOfBins):\n splits = self.miningCalculator.getBestSplitsInDataByInfoGain(data, structure, colName, numOfBins-1)\n splits.sort()\n bins = {\"value<=\"+str(splits[0]): lambda x: x <= splits[0]}\n if len(splits) > 1:\n for i in range(1, numOfBins-1):\n bins[str(splits[i-1]) + '<value<=' + str(splits[i])] = (lambda x: splits[i-1] < x <= splits[i])\n bins[\"value>\" + str(splits[len(splits)-1])] = (lambda x: x > splits[len(splits)-1])\n return bins", "def filter_binaries(bin_arr, remove_bordering=True, min_size=None, max_size=None, min_minor=None, max_minor=None,\r\n min_major=None, max_major=None):\r\n\r\n out = np.empty_like(bin_arr)\r\n for i, img in enumerate(bin_arr):\r\n if len(np.unique(img)) > 2: # Image is already labeled\r\n labeled = img\r\n else:\r\n labeled, n = mh.labeled.label(img)\r\n labeled, n = mh.labeled.filter_labeled(labeled, remove_bordering=remove_bordering, min_size=min_size, max_size=max_size)\r\n out[i] = (labeled > 0).astype(int) * labeled # Restore labels\r\n\r\n for j, img in enumerate(out):\r\n for i in np.unique(img)[1:]:\r\n selected_binary = (img == i).astype('int')\r\n min1, max1, min2, max2 = mh.bbox(selected_binary)\r\n selection = selected_binary[min1:max1, min2:max2]\r\n major, minor = mh.features.ellipse_axes(selection)\r\n\r\n if min_minor and minor < min_minor:\r\n img[img == i] = 0\r\n if max_minor and minor > max_minor:\r\n img[img == i] = 0\r\n if min_major and major < min_major:\r\n img[img == i] = 0\r\n if max_major and major > max_major:\r\n img[img == i] = 0\r\n\r\n return out", "def test_bins(self):\n\n for filename in ['%s/population_padang_1.asc' % TESTDATA,\n '%s/test_grid.asc' % TESTDATA]:\n\n R = read_layer(filename)\n rmin, rmax = R.get_extrema()\n\n for N in [2, 3, 5, 7, 10, 16]:\n linear_intervals = R.get_bins(N=N, quantiles=False)\n\n assert linear_intervals[0] == rmin\n assert linear_intervals[-1] == rmax\n\n d = (rmax - rmin) / N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], rmin + i * d)\n\n quantiles = R.get_bins(N=N, quantiles=True)\n A = R.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask)\n l2 = len(A)\n\n if filename == '%s/test_grid.asc' % TESTDATA:\n # Check that NaN's were removed\n assert l1 == 35\n assert l2 == 30\n\n # Assert that there are no NaN's\n assert not numpy.alltrue(numpy.isnan(A))\n\n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements / N\n\n # Count elements in each bin and check\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n\n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no\n # more than 1\n assert abs(count - refcount) <= 1\n assert abs(count - average_elements_per_bin) <= 3\n else:\n # The last bin is allowed vary by more\n pass\n\n i0 = i1", "def compute_bin_efficiencies(y_score, bin_indices, cut, sample_weight, minlength=None):\n y_score = column_or_1d(y_score)\n assert len(y_score) == len(sample_weight) == len(bin_indices), \"different size\"\n if minlength is None:\n minlength = numpy.max(bin_indices) + 1\n\n bin_total = numpy.bincount(bin_indices, weights=sample_weight, minlength=minlength)\n passed_cut = y_score > cut\n bin_passed_cut = numpy.bincount(bin_indices[passed_cut],\n weights=sample_weight[passed_cut], minlength=minlength)\n return bin_passed_cut / numpy.maximum(bin_total, 1)" ]
[ "0.7028389", "0.69049263", "0.68282115", "0.6756274", "0.65525955", "0.6534234", "0.6517673", "0.65156", "0.6511489", "0.6493547", "0.6453416", "0.63842267", "0.630369", "0.6285076", "0.62552357", "0.62531275", "0.6207189", "0.6184054", "0.6180757", "0.61535704", "0.61472905", "0.61317474", "0.6117792", "0.6092234", "0.60255945", "0.59861904", "0.5981901", "0.59649616", "0.59354675", "0.5932195", "0.5905469", "0.5842088", "0.5828209", "0.5804458", "0.5792083", "0.5789896", "0.575374", "0.5748606", "0.5740341", "0.57381374", "0.5737257", "0.5735844", "0.57312286", "0.57095677", "0.57010794", "0.56960064", "0.5691151", "0.5687027", "0.5680992", "0.56681645", "0.56649", "0.56646186", "0.5659573", "0.56416947", "0.56416684", "0.5633486", "0.5629551", "0.56012934", "0.55789465", "0.55645484", "0.5564454", "0.55463433", "0.55198586", "0.55178976", "0.5495198", "0.5492061", "0.5470556", "0.54694444", "0.54563844", "0.54544556", "0.5444149", "0.5438115", "0.541533", "0.5405104", "0.54009694", "0.5398532", "0.53853047", "0.5380422", "0.537311", "0.53667367", "0.53572166", "0.5345539", "0.53425616", "0.5341545", "0.534109", "0.5329469", "0.5325073", "0.53208125", "0.53199625", "0.5319453", "0.52922046", "0.5286837", "0.52820027", "0.5279666", "0.5279666", "0.52767116", "0.52578336", "0.5249415", "0.524326", "0.5242155" ]
0.6405554
11
Shannon Entropy Calculates the Shannon Entropy for the given data array x.
def entropy(x, bins, normalize=False, xy_probabilities=False): # calculate probabilities if xy_probabilities == False if xy_probabilities: # if x does not sum up to 1, raise an error if not np.isclose(sum(x),1,atol=0.0001): raise ValueError('Probabilities in vector x do not sum up to 1.') # add a small number to all probabilities if zero occurs if x.any(0): p = x + 1e-15 else: p = x else: # get the bins bins = np.histogram_bin_edges(x, bins) # calculate the empirical probabilities count = np.histogram(x, bins=bins)[0] # if counts should be None, raise an error if np.sum(count) == 0: raise ValueError('The histogram cannot be empty. Adjust the bins to ' + 'fit the data') # calculate the probabilities p = (count / np.sum(count)) + 1e-15 # calculate the Shannon Entropy if normalize: # get number of bins nbins = len(p) # maximal entropy: uniform distribution normalizer = np.log2(nbins) return - p.dot(np.log2(p)) / normalizer else: return - p.dot(np.log2(p))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ShannonEntropy(self,s):\n e = s[np.nonzero(s)]**2 * np.log(s[np.nonzero(s)]**2)\n return np.sum(e)", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def H(self, data):\n entropy = 0\n\n if not data:\n return entropy\n\n for x in range(256):\n p_x = float(data.count(chr(x))) / len(data)\n if p_x > 0:\n entropy -= p_x * math.log(p_x, 2)\n\n return entropy", "def salomonfcn(x: np.ndarray) -> np.ndarray:\n x2 = x**2\n sumx2 = np.sum(x2, axis=1)\n sqrtsx2 = np.sqrt(sumx2)\n\n scores = 1 - np.cos(2 * np.pi * sqrtsx2) + (0.1 * sqrtsx2)\n\n return scores", "def shannon_entropy(ps: np.ndarray, base: int = 2) -> float:\n\n return -np.sum(ps * np.log(ps) / np.log(base))", "def shannon_entropy(c):\n\n c_normalized = c / float(np.sum(c))\n c_normalized_nonzero = c_normalized[np.nonzero(c_normalized)] # gives 1D array\n entropy = -sum(c_normalized_nonzero * np.log2(c_normalized_nonzero)) # unit in bits\n return entropy", "def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e", "def shannon_entropy(counts):\n freq = np.array(counts) * 1.0 / np.sum(counts)\n return -np.sum([f * np.log2(f) for f in freq if f != 0])", "def shannon_entropy(probs):\n return -(\n math.sum([px * math.log2(px) if px != 0 and not (np.isclose(px, 0)) else 0 for px in probs])\n )", "def entropy(x):\n x_max, x_min = x.max(), x.min()\n assert (x_min >= 0) and (x_max <= 1)\n if x_min == x_max == 0:\n return np.float32(0.)\n # Take only non-zero values as log(0) = 0 :\n nnz_x = x[np.nonzero(x)]\n entr = -np.sum(nnz_x * np.log2(nnz_x))\n\n return entr", "def entropy(data):\n n, m = np.shape(data)\n data = np.tanh(data)\n data = data / np.sum(data, axis=0)\n a = data * 1.0\n a[np.where(data == 0)] = 0.000001\n\n e = (-1.0 / np.log(n)) * np.sum(data * np.log(a), axis=0)\n w = (1 - e) / np.sum(1 - e)\n return w", "def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')", "def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res", "def shannon(counts, base=2):\n freqs = counts/float(counts.sum())\n nonzero_freqs = freqs[freqs.nonzero()]\n return -sum(nonzero_freqs*log(nonzero_freqs))/log(base)", "def entropy(self, X):\n if isinstance(X, np.ndarray):\n X = pd.DataFrame(X, index=[str(i) for i in range(len(X))])\n K = self._posterior_covariance(X)\n L = np.linalg.cholesky(K)\n D = len(X)\n return np.sum(np.log(np.diag(L))) + 0.5 * D * np.log(2*np.pi*np.exp(1))", "def hx(self, x):\n yp = np.sqrt(x[0] ** 2 + x[2] ** 2)\n return yp", "def entropy(data):\n strings, lens = Counter(data), np.float(len(data))\n return -sum(count / lens * np.log2(count / lens) for count in strings.values())", "def xinsheyangn1fcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n scores = np.zeros((x.shape[0], 1))\n for i in range(n):\n scores += np.random.rand() * np.abs(x[:, i]) ** i\n return scores", "def xinsheyangn4fcn(x: np.ndarray) -> np.ndarray:\n scores = (\n np.sum(np.sin(x) ** 2, axis=1) - np.exp(-np.sum(x**2, axis=1))\n ) * np.exp(-np.sum(np.sin(np.sqrt(np.abs(x))) ** 2, axis=1))\n return scores", "def xinsheyangn2fcn(x: np.ndarray) -> np.ndarray:\n scores = np.sum(np.abs(x), axis=1) * np.exp(-np.sum(np.sin(x**2), axis=1))\n return scores", "def shubertfcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n scores = np.ones((x.shape[0], 1))\n\n for i in range(n):\n inner_sum = 0\n for j in range(1, 6):\n inner_sum += j * np.cos((j + 1) * x[:, i] + j)\n scores *= inner_sum.reshape(-1, 1)\n\n return scores", "def arcosh(x):\n return torch.log(x+torch.sqrt(x*x - 1))", "def acosh(x):\n return math.log(x+math.sqrt(x*x-1))", "def test_shannon(self):\n c = array([5])\n self.assertFloatEqual(shannon(c), 0)\n c = array([5,5])\n self.assertFloatEqual(shannon(c), 1)\n c = array([1,1,1,1,0])\n self.assertEqual(shannon(c), 2)", "def entropy(self, base: int = None):\n\n # shannon entropy in nats\n fdist_ = self.fdist\n fdist_[\"prob\"] = fdist_[\"freq\"] / fdist_[\"freq\"].sum()\n fdist_[\"logp\"] = np.log(fdist_[\"prob\"])\n fdist_[\"nats\"] = -fdist_[\"prob\"] * fdist_[\"logp\"]\n entropy_ = fdist_[\"nats\"].sum()\n\n # convert base\n if base:\n entropy_ = entropy_ / np.log(base)\n\n # return\n return entropy_", "def computeJensenShanon(sequence, frames_first):\n \n sequence = np.asarray(sequence)\n \n assert sequence.ndim == 3 or sequence.ndim == 2\n \n \n JS = JensenShanon_()\n \n overlay, _ = fromSequenceToOverlay(sequence,\n frames_first=frames_first,\n apply_standardization=True)\n \n return JS.fit(overlay).entropy", "def entropy(y):\n return -1 * sum(\n [\n pipe(np.sum(y == value) / len(y), lambda ratio: ratio * np.log(ratio))\n for value in set(y)\n ]\n )", "def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))", "def H_complicated(x):\n _ = x**2\n _[0] += np.sin(2*x[1]*x[0])\n _[1] += -3*x[0]**3 + np.log(np.abs(x[0]))\n return _", "def entropy(y):\n p = _proba(y)\n return (-p * np.log2(p)).sum()", "def logistic(mu, hw, x): \n n = np.exp(- ((x-mu)/(.477*hw))**2)\n return (2. * n)/( 1 + n)", "def schaffer(self, x):\r\n N = len(x);\r\n s = x[0:N-1]**2 + x[1:N]**2;\r\n return sum(s**0.25 * (np.sin(50*s**0.1)**2 + 1))", "def entropy(y):\n total = y.size\n value_counts = np.bincount(y).astype(\"float\")\n proportions = value_counts / y.size\n\n return sum(-i * np.log(i) for i in proportions if i)", "def spherew(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n # s = sum(abs(x))\r\n # return sum((x/s+0)**2) - 1/len(x)\r\n # return sum((x/s)**2) - 1/len(x)\r\n return -0.01*x[0] + abs(x[0])**-2 * sum(x[1:]**2)", "def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))", "def get_hess(self, x: np.ndarray) -> np.ndarray:\n hess = self(x, (2,), MODE_FUN)\n return hess", "def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')", "def entropy(s):\n p, lns = Counter(s), float(len(s))\n return -sum( count/lns * math.log(count/lns, 2) for count in p.values())", "def shannon(state_space):\n if isinstance(state_space, int) or len(state_space) == 1:\n return 0\n ws = sum(state_space.values())\n if ws == 0:\n print(state_space)\n return math.log(ws) - sum(map(lambda x: x * math.log(x), state_space.values())) / ws", "def schwefelfcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n scores = 418.9829 * n - np.sum(x * np.sin(np.sqrt(np.abs(x))), axis=1)\n return scores", "def hessian(self, x):\n h = self._hess(\n time_series=self.observed_data,\n a=x[0],\n b=x[1],\n c=x[2],\n sigma=self.sigma\n )\n return h", "def entropy(self):\n n = len(self.y)\n sum_ = 0\n for i in np.unique(self.y):\n v = len(self.y[self.y == i])\n sum_ += -((v/n) * log2(v/n))\n return sum_", "def shubert4fcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n\n scores = np.zeros((x.shape[0], 1))\n for i in range(n):\n for j in range(5):\n scores += np.cos(((j + 1) * x[:, i]) + j)\n\n return scores", "def entropy(self, params):\n log_std = params[:, :, 1]\n return (log_std + 0.5 * (self.LOG2PI + 1)).sum(dim=-1)", "def h(x):\n return torch.sin(3 * x) + 4.", "def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent", "def vectorized_hessp(self, x, p):\n primals = self.vectorizer.unpack(x)\n tangents = self.vectorizer.unpack(p)\n hp_arrays = self.handler.hessp(primals, tangents)\n self._n += 1\n self.losses.append(self.loss)\n self._maybe_update_pbar()\n return self.vectorizer.pack(hp_arrays, \"hp\")", "def stoch(x):\n return x/np.sum(x)", "def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n H = 0.0\n for x in np.nditer(self.t, op_flags=['readonly']):\n p = x/Z\n H += 0.0 if p==0 else -p*np.log(p)\n return H", "def exploitEntropy(self, position):\n pass", "def cosh(x):\n raise NotImplementedError", "def cEntropy(Y, X):\n return jEntropy(Y, X) - entropy(X)", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def easomfcn(x: np.ndarray) -> np.ndarray:\n\n n = x.shape[1]\n assert n == 2, \"The Easom's function is only defined on a 2D space.\"\n X = x[:, 0]\n Y = x[:, 1]\n\n scores = (\n -np.cos(X) * np.cos(Y) * np.exp(-((X - np.pi) ** 2 + (Y - np.pi) ** 2))\n )\n return scores", "def hann(self, _x):\n N = self.N\n return (1 - np.cos(2 * np.pi * _x / (N - 1))) / 2", "def Hamming(data):\r\n N=float(data.shape[0])\r\n temp=np.zeros(data.shape[0])\r\n for u, i in enumerate(data):\r\n temp[u]=(0.54-0.46*np.cos(2*np.pi*(u/N)))*i\r\n return temp", "def h(x):\n h = -x*np.math.log(x, 2) - (1 - x)*np.math.log(1 - x, 2)\n return h", "def entropy(self, y):\n n = y.size\n if n <= 1:\n return 0\n\n labels, counts = unique(y, return_counts=True)\n\n if counts.size <= 1:\n return 0\n\n probs = counts / n\n entropy = -sum([p * log(p, 2) for p in probs])\n return entropy", "def hessian(f, x, s=_DEFAULT_STEP):\n x = np.asarray(x)\n n = len(x)\n e = s * np.eye(n)\n\n forw1 = np.zeros(n)\n forw2 = np.zeros((n, n))\n for i in range(n):\n forw1[i] = f(x + e[i])\n for j in range(i, n):\n forw2[i, j] = forw2[j, i] = f(x + e[i] + e[j])\n\n H = (forw2 - _colvec(forw1) - _rowvec(forw1) + f(x)) / s**2\n return H", "def heavi(x):\n return 0.5 * (np.sign(x) + 1)", "def div(self):\n freqList = [i / sum(self.has.values()) for i in self.has.values()]\n entropies = [i * math.log(i, 2) for i in freqList]\n entropy = -sum(entropies)\n return entropy", "def entropy(data, idxList):\n df = data.loc[idxList]\n counts = df.value_counts().to_numpy()\n counts = counts.reshape(1, -1).astype(np.float32)\n counts /= np.sum(counts)\n log_sum = counts @ np.log2(counts.T)\n return -log_sum[0, 0]", "def f(self, x: np.array) -> np.array:\n return (1/np.sqrt(2*np.pi*self.sig**2))*np.exp(-1*((x - self.mu)**2/(2*self.sig**2)))", "def shubert3fcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n scores = np.zeros((x.shape[0], 1))\n for i in range(n):\n for j in range(1, 6):\n scores += j * np.sin(((j + 1) * x[:, i]) + j)\n return scores", "def entropycell(self):\n cells = [0] * self.k\n for i in range(self.width):\n cells[int(self.config[self.t, i])] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(self.k):\n if(cells[i] != 0):\n probability = cells[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_cell = (self.average_cell * self.t + shannon) / (self.t + 1)", "def entropy ( target_array ):\n return -1 * sum (\n [\n pipe ( np.sum ( target_array == value ) / len ( target_array ), lambda ratio: ratio * np.log ( ratio ) )\n for value in set ( target_array )\n ]\n ) # End entropy()", "def _entropy(self, y):\n # Get size\n n = y.shape[0]\n summation = 0\n\n # Summatory\n for c_i in np.unique(y):\n prob = sum(y == c_i) / float(n)\n summation += prob * np.log2(prob)\n\n return -summation", "def var(x):\n length = len(x)\n\n if length == 0:\n return None\n result = 0.0\n m = TinyStatistician.mean(x)\n for i in x:\n result += (i - m) ** 2\n\n return result / length", "def gen_psi(self, x):\n\n if isinstance(x, jnp.ndarray):\n x = x[:, None]\n return jnp.exp(-self.h * (x - self.c) ** 2)", "def test_heip_e(self):\n c = array([1,2,3,1])\n h = shannon(c, base=e)\n expected = exp(h-1)/3\n self.assertEqual(heip_e(c), expected)", "def r_soft_hash(x):\n if abs(x) < 1e-9:return 0\n # round it to some number of bits\n b = ns.round(ns.log(abs(x)) / ns.log(2))\n gran = 2**(b-30)\n return ns.round(x / gran) * gran", "def _ss(data):\n c = sum(data)/len(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def hx(self, xhat):\n zp = np.sqrt(xhat[0] ** 2 + xhat[2] ** 2)\n return zp", "def psi(n, x):\n H = h(n, x, orthonormal=True)\n weight = np.exp(-(x ** 2) / 2)\n psi = H * weight\n return psi", "def f(x):\n res = np.real(np.exp(-1j*x[1])*sum(y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1))) \n res = -res/np.sqrt(2*sample_size+1) \n return res", "def species_irreplaceability(x):\n x = x*100\n\n def h(x):\n # h(x) as specified\n miu = 39\n s = 9.5\n tmp = -(x - miu)/ s\n denominator = 1 + np.exp(tmp)\n return 1/denominator\n \n\n return (h(x) - h(0))/(h(100) - h(0))", "def sinh(x):\n raise NotImplementedError", "def equitability(counts, base=2):\n return shannon(counts, base)/(log((counts!=0).sum())/log(base))", "def entropy(self, priors=None):\n def entropy_f(x):\n x[x != 0] *= np.log(x[x != 0])\n return -x.sum(axis=0)\n return self.utility(entropy_f, priors)", "def chi(self, theta):\n yf = self.predict(self.t, theta)\n sse = np.sum(np.square((yf - self.y) / self.dy))\n return sse", "def __value(t,N):\n _,hist=np.unique(t, return_counts=True)\n hist=list(hist)\n hist.append(N-np.sum(hist))\n return EntropyD(np.array(hist))", "def acosh(x):\n return 0.0", "def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])", "def entropy(self):\n ent = 0.0\n for f in self.byte_freq:\n if f > 0:\n freq = float(f) / self.byte_total\n ent = ent + freq * math.log(freq, 2)\n return -ent", "def manual_log_like_normal(x, data):\n return np.sum(-np.log(x[1] * np.sqrt(2 * np.pi))-((data-x[0])**2) / (2*x[1]**2))", "def compute_energy(x, input_HP_sequence):\n # add code here, feel free to change the argument list\n # Given a input HP sequence, we already which points are H's.\n return U", "def h(self, X):\n if isinstance(X, int) or isinstance(X, float):\n if X < 1:\n x = max(0.001, X)\n a = np.log(x/2.)**2 - np.arccosh(1./x)**2\n elif X >= 1:\n a = np.log(X/2.)**2 + np.arccos(1./X)**2\n else:\n a=np.empty_like(X)\n X[X==0] = 0.001\n x = X[(X<1) & (X>0)]\n a[(X<1) & (X>0)] = np.log(x/2.)**2 - np.arccosh(1./x)**2\n x = X[X >= 1]\n a[X >= 1] = np.log(x/2.)**2 + np.arccos(1./x)**2\n return a", "def h_function(self, x, xsquare):\n return invert(self.l_function(powmod(self.public_key.g, x - 1, xsquare),x), x)", "def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en", "def toy_hamiltonian(x):\n q,p = extract_q_p(x)\n pSqr = tf.square(p)\n return 1/2 * tf.square(q - 1/4 * pSqr) + 1/32 * pSqr", "def shannons(data):\n \n from math import log as ln\n\n # from IPython.core.debugger import Tracer\n # Tracer()()\n \n def p(n, N):\n \"\"\" Relative abundance \"\"\"\n if n is 0:\n return 0\n else:\n return (float(n)/N) * ln(float(n)/N)\n \n N = sum(data)\n \n return -sum(p(n, N) for n in data if not n == 0)", "def density(self, x):\n\t\tN = len(self.train_data)\n\t\tpoints = list(self.train_data)\n\t\tdists = [np.linalg.norm(x-point)**2 for point in points]\n\t\texps = [np.exp(-dist / (2 * (self.bandwidth ** 2))) for dist in dists]\n\t\tunnormalized_sum = sum(exps)\n\t\tprobability = (1 / N) * self.normalizing_constant() * unnormalized_sum\n\t\treturn probability", "def hash_numpy(x: numpy.ndarray) -> int:\n x = x.astype(\"|S576\") if x.dtype == \"O\" else x\n return xxhash.xxh64_hexdigest(x.tobytes())", "def entropy(a):\n a = a.upper()\n\n freq = collections.defaultdict(int) # int() is the default constructor for non existent item, and returns 0\n for c in a:\n freq[c] = freq[c] + 1\n\n e = 0.0\n for f in freq.values():\n if f:\n p = f / len(a)\n e += p * math.log(p)\n\n return -e", "def f(x):\n res = np.real(np.exp(-1j*x[1])*\\\n sum(y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1))) \n res = -res/np.sqrt(2*sample_size+1) \n return res", "def hash_function(self, x):\n if not x:\n return -1\n hashed_value = 0\n\n for char in x:\n hashed_value = 181 * hashed_value + ord(char)\n\n return hashed_value % self.capacity", "def jensen_shannon(x, y, bins, calc_distance=False, xy_probabilities=False):\n # assert array length\n assert len(x) == len(y)\n\n if xy_probabilities:\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x), 1 ,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n # if y does not sum up to 1, raise an error\n if not np.isclose(sum(y), 1, atol=0.0001):\n raise ValueError('Probabilities in vector y do not sum up to 1.')\n \n # add a small number to all probabilities if zero occurs\n if x.any(0):\n px = x + 1e-15\n py = y + 1e-15\n else:\n px = x\n py = y\n else:\n # get the bins, joint bins for x and y (same_bins=True)\n bins = get_2D_bins(x, y, bins, same_bins=True)\n\n # calculate unconditioned histograms\n hist_x = np.histogram(x, bins=bins[0])[0]\n hist_y = np.histogram(y, bins=bins[1])[0]\n\n # calculate probabilities\n px = (hist_x / np.sum(hist_x)) + 1e-15\n py = (hist_y / np.sum(hist_y)) + 1e-15\n\n # calculate m\n pm = 0.5 * (px + py)\n\n # calculate kullback-leibler divergence between px and pm & py and pm\n kl_xm = kullback_leibler(px, pm, bins=bins, xy_probabilities=True)\n kl_ym = kullback_leibler(py, pm, bins=bins, xy_probabilities=True)\n \n if calc_distance:\n return (0.5 * kl_xm + 0.5 * kl_ym)**0.5\n else:\n return (0.5 * kl_xm + 0.5 * kl_ym)", "def evenness(df):\n obs = shannon(df)\n count = df.shape[0]\n max_freq = 1.0 / count\n max_vector = np.repeat(max_freq,count)\n pre = -(sum(max_vector * np.log(max_vector)))\n return obs / pre", "def jeffreys(self, x):\n return np.sqrt(1. / x)", "def entropy(message):\n n = len(message)\n message = letter_freq(message)\n h = 0\n for n_i in message.values():\n p_i = n_i/n\n h += -p_i*(log2(p_i))\n return h" ]
[ "0.7339232", "0.6906172", "0.67469835", "0.66126776", "0.6580351", "0.6566007", "0.65266234", "0.646055", "0.6361494", "0.6340554", "0.62182045", "0.6194352", "0.6190961", "0.6157335", "0.6148306", "0.61020863", "0.6101131", "0.60966253", "0.6066495", "0.60436225", "0.6029721", "0.59834", "0.5931555", "0.5925839", "0.59182525", "0.58996916", "0.5898303", "0.5887441", "0.5859824", "0.58569765", "0.58440596", "0.5839691", "0.58200365", "0.5810351", "0.5798393", "0.578523", "0.57807714", "0.57777876", "0.5770964", "0.5757985", "0.5704455", "0.5683404", "0.56801724", "0.5656068", "0.56245786", "0.56197053", "0.5606164", "0.5585159", "0.5557278", "0.5542862", "0.553604", "0.55303204", "0.5519091", "0.55129105", "0.5510744", "0.5506213", "0.549372", "0.5491815", "0.54911304", "0.5472284", "0.5465591", "0.54650146", "0.54544616", "0.5443581", "0.544332", "0.5432493", "0.54295903", "0.54291785", "0.5420431", "0.541597", "0.5412896", "0.5412091", "0.5411842", "0.53992075", "0.53878903", "0.5385198", "0.53848237", "0.5371965", "0.5369404", "0.5368891", "0.53561705", "0.53470343", "0.534513", "0.5339327", "0.5335368", "0.5326147", "0.5315033", "0.5311857", "0.53029484", "0.5299311", "0.5294926", "0.5291466", "0.52912486", "0.52791643", "0.52769536", "0.5272159", "0.52682436", "0.52594215", "0.5257753", "0.52562857" ]
0.6537447
6
Conditional Entropy Calculates the conditional Shannon Entropy for two discrete distributions. This metric gives the entropy of the distribution of x in case the distribution of y is known.
def conditional_entropy(x, y, bins, normalize=False): # get the bins bins = get_2D_bins(x, y, bins) # calculate H(x,y) and H(y) hjoint = joint_entropy(x,y,bins) hy = entropy(y, bins[1]) if normalize: normalizer = entropy(x, bins[0]) conditional_entropy = hjoint - hy # check if conditional entropy and normalizer are very small if conditional_entropy < 1e-4 and normalizer < 1e-4: # return zero to prevent very high values of normalized conditional entropy # e.g. conditional entropy = -1.3e-12, normalizer = -1.6e-12 # -> normalized conditional entropy = 812.5 return 0 else: return conditional_entropy / normalizer else: return hjoint - hy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conditional_entropy(f1, f2):\n\n ce = ee.entropyd(f1) - ee.midd(f1, f2)\n return ce", "def entropy(y):\n return -1 * sum(\n [\n pipe(np.sum(y == value) / len(y), lambda ratio: ratio * np.log(ratio))\n for value in set(y)\n ]\n )", "def conditional_entropy(self) -> float:\n pass", "def entropy(a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n return (_fun.logbeta(a, b)\n - (a - 1)*mp.psi(0, a)\n - (b - 1)*mp.psi(0, b)\n + (a + b - 2)*mp.psi(0, a + b))", "def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')", "def cEntropy(Y, X):\n return jEntropy(Y, X) - entropy(X)", "def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')", "def conditional_entropy_hyper(self) -> float:\n pass", "def cross_entropy(x, y, bins, xy_probabilities=False):\n # calculate probabilities if probabilities == False\n if xy_probabilities:\n # same bins for x and y -> same length of x and y if xy_probabilities == True\n assert len(x) == len(y)\n\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x),1,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n # if y does not sum up to 1, raise an error\n if not np.isclose(sum(y),1,atol=0.0001):\n raise ValueError('Probabilities in vector y do not sum up to 1.')\n\n # add a small number to all probabilities if zero occurs\n if x.any(0):\n px = x + 1e-15\n py = y + 1e-15\n else:\n px = x\n py = y\n else:\n # get the bins, joint bins for x and y (same_bins=True)\n bins = get_2D_bins(x, y, bins, same_bins=True)\n\n # calculate unconditioned histograms\n hist_x = np.histogram(x, bins=bins[0])[0]\n hist_y = np.histogram(y, bins=bins[1])[0]\n\n px = (hist_x / np.sum(hist_x)) + 1e-15\n py = (hist_y / np.sum(hist_y)) + 1e-15\n\n return - px.dot(np.log2(py))", "def _cal_igr(x, y):\n return (_cal_entropy(y) - _cal_conditionalEnt(x, y)) / _cal_conditionalEnt(x, y)", "def cross_entropy(x, y):\n\n if len(y.shape) == 1:\n return F.cross_entropy(x, y)\n if y.shape[1] == 1:\n y = y.squeeze(1)\n return F.cross_entropy(x, y)\n\n return torch.mean(\n torch.div(\n F.binary_cross_entropy_with_logits(x, y, reduction=\"none\"),\n torch.sum(y, dim=1),\n )\n )", "def entropy(y):\n total = y.size\n value_counts = np.bincount(y).astype(\"float\")\n proportions = value_counts / y.size\n\n return sum(-i * np.log(i) for i in proportions if i)", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent", "def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))", "def _conditional_entropy_compute(confmat: Tensor) ->Tensor:\n confmat = _drop_empty_rows_and_cols(confmat)\n total_occurrences = confmat.sum()\n p_xy_m = confmat / total_occurrences\n p_y = confmat.sum(1) / total_occurrences\n p_y_m = p_y.unsqueeze(1).repeat(1, p_xy_m.shape[1])\n return torch.nansum(p_xy_m * torch.log(p_y_m / p_xy_m))", "def js_divergence(dist1, dist2):\n mean_dist = (dist1 + dist2) / 2.0\n js = (\n scipy.stats.entropy(dist1, mean_dist) + scipy.stats.entropy(dist2, mean_dist)\n ) / 2.0\n return js", "def _entropy_filter(self, prob1, prob2):\n\n\n # calculate merged prob.\n prob_merged = (prob1 + prob2)/2\n # Compute entropy for each prob.\n H1 = -prob1 * math.log(prob1) - (1-prob1) * math.log(1-prob1)\n H2 = -prob2 * math.log(prob2) - (1-prob2) * math.log(1-prob2)\n Hm = -prob_merged * math.log(prob_merged) - (1-prob_merged) * math.log(1-prob_merged)\n\n H_min = min(H1, H2, Hm)\n\n if H_min == H1:\n return prob1\n elif H_min == H2:\n return prob2\n else:\n return prob_merged", "def entropy(y):\n p = _proba(y)\n return (-p * np.log2(p)).sum()", "def calc_conditional_entropy(map,data_stat,attribute):\n #acquire the data info of the attribute stored in data_stat\n data_info = data_stat[attribute]\n #acquire the label info\n # label_col = len(data_stat)-1\n label_col = data_stat.keys()[-1]\n # print(data_stat.keys())\n label_info = data_stat[label_col]\n #acquire the data \n data = map[attribute]\n labels = map[label_col]\n conditional_entropy =0\n for data_type in data_info:\n specific_entropy = 0\n for label_type in label_info: \n #attribute data indices where all data entries are equal to a speicifc value\n data_with_spec_val_idx = data_info[data_type]\n #label indices where all labels are of same value\n spec_label_idx = label_info[label_type]\n #the intersection of the two indices above\n intersect_idx = np.intersect1d(data_with_spec_val_idx,spec_label_idx)\n #conditional probability of label being of specific value given speicific data value\n temp_prob = len(intersect_idx)/float(len(data_with_spec_val_idx))\n if temp_prob!=0:\n specific_entropy += temp_prob*math.log(temp_prob,2)\n specific_entropy = -specific_entropy\n prob = len(data_with_spec_val_idx)/float(len(data))\n conditional_entropy += prob * specific_entropy\n return conditional_entropy", "def joint_entropy(x, y, bins):\n # assert array length\n assert len(x) == len(y)\n\n # get the bins, x and y get their own bins in case of joint entropy\n bins = get_2D_bins(x, y, bins)\n\n # get the joint histogram\n joint_hist = np.histogram2d(x, y, bins)[0]\n\n # calculate the joint probability and add a small number\n joint_p = (joint_hist / np.sum(joint_hist)) + 1e-15\n\n # calculate and return the joint entropy\n return - np.sum(joint_p * np.log2(joint_p))", "def _entropy(self, y):\n # Get size\n n = y.shape[0]\n summation = 0\n\n # Summatory\n for c_i in np.unique(y):\n prob = sum(y == c_i) / float(n)\n summation += prob * np.log2(prob)\n\n return -summation", "def entropy(self, y):\n n = y.size\n if n <= 1:\n return 0\n\n labels, counts = unique(y, return_counts=True)\n\n if counts.size <= 1:\n return 0\n\n probs = counts / n\n entropy = -sum([p * log(p, 2) for p in probs])\n return entropy", "def entropy(y,w):\r\n\r\n\t# my original entropy function commented below is not working as desired. The below implementation is based on from Sai Ram Chappidi's explanation\r\n\r\n # y_partition = partition(y)\r\n # elements,counts = np.unique(y,return_counts = True)\r\n # entropy=0\r\n\r\n # for i in range(len(elements)):\r\n # entropy += ((-(np.sum(w[y_partition[i]])))/np.sum(w))*np.log2(np.sum(w[y_partition[i]])/np.sum(w))\r\n # return entropy\r\n\r\n entropy = 0\r\n # two hypothesis cases 0,1\r\n h = {0: 0, 1: 0}\r\n leny = len(y)\r\n for i in range(leny):\r\n # if y is 0 add 0 to the weight\r\n if y[i] == 0:\r\n h[0] += w[i]\r\n # if y is 1 add 1 to the weight\r\n elif y[i] == 1:\r\n h[1] += + w[i]\r\n # summing all the weighted values \r\n val_sum = h[0] + h[1]\r\n\r\n # entropy calculation\r\n for j in range(len(h)):\r\n h[j] = h[j]/val_sum\r\n # to prevent divide by zero\r\n if h[j] != 0:\r\n entropy += h[j] * np.log2(h[j])\r\n entropy = -(entropy)\r\n return entropy", "def transfer_entropy(X, Y):\n coords = Counter(zip(Y[1:], X[:-1], Y[:-1]))\n\n p_dist = np.zeros((config.NUM_STATES, config.NUM_STATES, config.NUM_STATES))\n for y_f, x_p, y_p in coords.keys():\n p_dist[y_p, y_f, x_p] = coords[(y_f, x_p, y_p)] / (len(X) - 1)\n\n p_yp = p_dist.sum(axis=2).sum(axis=1)\n p_joint_cond_yp = p_dist / p_yp[:, None, None]\n p_yf_cond_yp = p_dist.sum(axis=2) / p_yp[:, None]\n p_xp_cond_yp = p_dist.sum(axis=1) / p_yp[:, None]\n\n denominator = np.multiply(p_yf_cond_yp, p_xp_cond_yp)\n denominator[denominator == 0] = np.nan\n\n division = np.divide(p_joint_cond_yp, denominator[:, :, None])\n division[division == 0] = np.nan\n\n log = np.log2(division)\n\n return np.nansum(np.multiply(p_dist, log))", "def entropy_coefficient(filter1, filter2, base=2):\n\n if (type(filter1) is NullField) or (type(filter2) is NullField):\n return 0\n\n total_count = int(filter1.bit_size)\n\n f1_element_count = filter1.filter.count(True)\n f2_element_count = filter2.filter.count(True)\n\n prob_f1 = f1_element_count / total_count\n prob_f2 = f1_element_count / total_count\n\n e_f1 = -1.0 * total_count * prob_f1 * math.log(prob_f1) / math.log(base)\n e_f2 = -1.0 * total_count * prob_f2 * math.log(prob_f2) / math.log(base)\n\n entropy = abs(e_f1 - e_f2)\n\n # for element_count in Counter(data).values():\n # p = element_count / total_count\n # entropy -= p * math.log(p, self.base)\n\n assert entropy >= 0\n\n return 1 - entropy", "def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en", "def entropy(x):\n x_max, x_min = x.max(), x.min()\n assert (x_min >= 0) and (x_max <= 1)\n if x_min == x_max == 0:\n return np.float32(0.)\n # Take only non-zero values as log(0) = 0 :\n nnz_x = x[np.nonzero(x)]\n entr = -np.sum(nnz_x * np.log2(nnz_x))\n\n return entr", "def cross_entropy(X, y):\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (\n 1 - y\n ) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)", "def shannon_entropy(probs):\n return -(\n math.sum([px * math.log2(px) if px != 0 and not (np.isclose(px, 0)) else 0 for px in probs])\n )", "def compute_empirical_conditional_distribution(var1_values, var2_values):\n conditional_distributions = {x2: {} for x2 in set(var2_values)}\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n \n # Compute the empirical distribution of var2_values\n x2_prob = compute_empirical_distribution(var2_values)\n \n # Compute the empirical joint distribution of var1_values and var2_values\n joint_prob_x1_x2 = compute_empirical_distribution(list(zip(var1_values, var2_values)))\n \n for x2 in var2_values:\n for x1 in var1_values:\n conditional_distributions[x2][x1] = joint_prob_x1_x2[x1, x2]/x2_prob[x2]\n\n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return conditional_distributions", "def calculate_entropy(y):\n\tlog2 = lambda x: math.log(x) / math.log(2)\n\tunique_labels = np.unique(y)\n\tentropy = 0\n\tfor label in unique_labels:\n\t\tcount = len(y[y == label])\n\t\tp = count / len(y)\n\t\tentropy += -p * log2(p)\n\treturn entropy", "def information_gain(f1, f2):\n\n ig = ee.entropyd(f1) - conditional_entropy(f1, f2)\n return ig", "def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)", "def dist_calc(self, x, y):\n p_xy = self.d2_bin(x, y)\n p_x = np.sum(p_xy, axis=1)\n p_y = np.sum(p_xy, axis=0)\n\n p_x_times_p_y = np.tensordot(p_x, p_y, axes = 0)\n info = np.sum(p_xy * np.ma.log(np.ma.divide(p_xy, p_x_times_p_y)))\n entropy = np.sum(-1 * p_xy * np.ma.log(p_xy))\n\n output = max(0.0, (1 - (info / entropy)))\n return output", "def chl_entropy(y, base=2):\n p,bins = histogram(y, bins=unique(y)) # don't use 'Normed' feature, since that includes the bin-width!\n p = p[p!=0]/float(len(y))\n S = -1.0*sum(p*log(p))/log(base)\n return S", "def entropy(x, bins, normalize=False, xy_probabilities=False):\n # calculate probabilities if xy_probabilities == False\n if xy_probabilities:\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x),1,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n \n # add a small number to all probabilities if zero occurs\n if x.any(0):\n p = x + 1e-15\n else:\n p = x\n else:\n # get the bins\n bins = np.histogram_bin_edges(x, bins)\n\n # calculate the empirical probabilities\n count = np.histogram(x, bins=bins)[0]\n\n # if counts should be None, raise an error\n if np.sum(count) == 0:\n raise ValueError('The histogram cannot be empty. Adjust the bins to ' +\n 'fit the data')\n # calculate the probabilities\n p = (count / np.sum(count)) + 1e-15\n\n\n # calculate the Shannon Entropy\n if normalize:\n # get number of bins\n nbins = len(p)\n # maximal entropy: uniform distribution\n normalizer = np.log2(nbins) \n\n return - p.dot(np.log2(p)) / normalizer\n else:\n return - p.dot(np.log2(p))", "def alt_cohen_d(x_arr, y_arr):\n delta = np.mean(x_arr) - np.mean(y_arr)\n pooled_std = np.sqrt((np.std(x_arr, ddof=1) ** 2 +\n np.std(y_arr, ddof=1) ** 2) / 2.0)\n return delta / pooled_std", "def entropyDistributed(distribution):\n return -sum(map(lambda p : p * log(p, 2), distribution))", "def cond_entropy(joint_prob, cond_prob):\n # Computing log2(P cond)\n log2_p = (np.ma.log2(cond_prob)).filled(0)\n # Multipling element wise the arrays\n prod_entropy = np.multiply(joint_prob, log2_p)\n # Getting the - sum of the resulting array.\n H = -( np.sum(prod_entropy))\n return H", "def logistic_loss(x, y):\n N = x.shape[0]\n x = np.squeeze(x)\n y_prime = (y + 1)/2\n h = 1 /(1 + np.exp(-x))\n loss = np.sum(-np.log( (h**y_prime) * ((1-h)**(1-y_prime)) ))/N\n dx = np.exp(-y*x)*(-y)/(1+np.exp(-y*x))/N\n return loss, dx", "def div(self):\n freqList = [i / sum(self.has.values()) for i in self.has.values()]\n entropies = [i * math.log(i, 2) for i in freqList]\n entropy = -sum(entropies)\n return entropy", "def symbolic_transfer_entropy(symX, symY):\n\n if len(symX) != len(symY):\n raise ValueError('All arrays must have same length')\n \n symX = np.array(symX)\n symY = np.array(symY)\n \n cp = symbolic_conditional_probabilities_consecutive(symX)\n cp2 = symbolic_conditional_probabilities_consecutive_external(symX, symY)\n jp = symbolic_joint_probabilities_consecutive_external(symX, symY)\n \n TE = 0\n \n for yi, xi, xii in list(jp.keys()):\n try:\n a = cp[xi,xii]\n b = cp2[yi,xi,xii]\n c = jp[yi,xi,xii]\n TE += c * np.log(b / a) / np.log(2.)\n except KeyError:\n continue\n except:\n print(\"Unexpected Error\")\n raise\n del cp\n del cp2\n del jp\n \n return TE", "def entropyCategorical(attr, X, y):\n uniques = X[attr].unique().tolist()\n idxLists = []\n entropies = []\n weights = []\n for u in uniques:\n idxLists.append(X.index[X[attr] == u].tolist())\n entropies.append(entropy(y, idxLists[-1]))\n weights.append(len(idxLists[-1]))\n\n entropies = np.array(entropies).reshape(1, -1)\n weights = np.array(weights).reshape(-1, 1).astype(np.float32)\n weights /= np.sum(weights)\n\n return (uniques, idxLists, (entropies @ weights)[0, 0])", "def entropy(self):\n raise NotImplementedError", "def entropy(self):\r\n return 1/2 * (self.dim * (_LOG_2PI + 1) + self._log_det_cov)", "def _calculate_probs_and_entropy_y(self):\n #calculate y probabilities and H(Y)\n #H(Y) = Sum(y € Y)(-P(Y=y) * log(P(Y=y)))\n self.lab_entropy = 0\n s = sum(self.lab_counts.values())\n for label, count in self.lab_counts.items():\n self.lab_probs[label] = count / s\n self.lab_entropy -= self.lab_probs[label] * self.log(self.lab_probs[label])", "def shannon_entropy(c):\n\n c_normalized = c / float(np.sum(c))\n c_normalized_nonzero = c_normalized[np.nonzero(c_normalized)] # gives 1D array\n entropy = -sum(c_normalized_nonzero * np.log2(c_normalized_nonzero)) # unit in bits\n return entropy", "def entropy ( target_array ):\n return -1 * sum (\n [\n pipe ( np.sum ( target_array == value ) / len ( target_array ), lambda ratio: ratio * np.log ( ratio ) )\n for value in set ( target_array )\n ]\n ) # End entropy()", "def entropy(self, **kwargs) -> TensorType:", "def entropy(self, **kwargs) -> TensorType:", "def condentropy(truelabels, labels):\n labels=array(labels)\n truelabels=array(truelabels)\n \n condent=0.\n for l in xrange(min(labels),max(labels)+1):\n sublabels = truelabels[ labels==l ]\n condent += len(sublabels)*chl_entropy( sublabels )\n return condent/float(len(labels))", "def entropy(self):\n n = len(self.y)\n sum_ = 0\n for i in np.unique(self.y):\n v = len(self.y[self.y == i])\n sum_ += -((v/n) * log2(v/n))\n return sum_", "def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e", "def _entropies(self):\n H_C = fentropy(self.row_totals)\n H_K = fentropy(self.col_totals)\n H_actual = fentropy(self.itervalues())\n H_expected = H_C + H_K\n I_CK = H_expected - H_actual\n return H_C, H_K, I_CK", "def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))", "def mutual_info(l1, l2):\n return entropy(l1) + entropy(l2) - entropy(joint_dataset(l1, l2))", "def entropy(temp,pres):\n g_t = liq_g(1,0,temp,pres)\n s = -g_t\n return s", "def entropy(data):\n strings, lens = Counter(data), np.float(len(data))\n return -sum(count / lens * np.log2(count / lens) for count in strings.values())", "def conditional_entropy(df, var, var_t):\n row_list = df \\\n .groupBy(var) \\\n .agg(count(\"*\").alias('num_entries')) \\\n .withColumn('all', lit('all')) \\\n .withColumn('total_num_entries', sql_sum('num_entries').over(Window.partitionBy('all'))) \\\n .withColumn('pcg', col('num_entries') / col('total_num_entries')) \\\n .select(var, 'pcg').collect()\n\n cat_and_weight = [(r[var], r['pcg']) for r in row_list]\n\n return sum([w * single_entropy(df=df.filter(col(var) == c), var=var_t) for (c, w) in cat_and_weight])", "def conditional_pdf(self, x1, x2 = None):\n return np.exp(self.conditional_logpdf(x1, x2))", "def shannon_entropy(counts):\n freq = np.array(counts) * 1.0 / np.sum(counts)\n return -np.sum([f * np.log2(f) for f in freq if f != 0])", "def cross_entropy_error(self, x, y):\n return -1 * sum([y[i] * np.log(self.logistic_function(self.weights.dot(x[i]))) + (1-y[i]) * np.log(1-self.logistic_function(self.weights.dot(x[i]))) for i in range(len(y))])", "def entropy_function(c, n):\n return -(c*1.0/n)*math.log(c*1.0/n,2)", "def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])", "def get_entropy(distribution, samples):\n entropy = -tf.reduce_sum(distribution.log_prob(samples), axis=1)\n return entropy", "def mutual_information(x, y, w):\r\n \r\n\r\n total_entropy = entropy(y,w)\r\n\r\n partitioned_x = partition(x)\r\n weighted_entropy = 0\r\n # calculate the weighted entropy over the partition of x\r\n vals,counts= np.unique(x,return_counts=True)\r\n for key in partitioned_x:\r\n weighted_entropy += np.sum([(np.sum(w[partitioned_x[key]])/np.sum(w)) * entropy(y[partitioned_x[key]],w[partitioned_x[key]])])\r\n\r\n information_gain = total_entropy - weighted_entropy\r\n return information_gain", "def entropy(self, X):\n if isinstance(X, np.ndarray):\n X = pd.DataFrame(X, index=[str(i) for i in range(len(X))])\n K = self._posterior_covariance(X)\n L = np.linalg.cholesky(K)\n D = len(X)\n return np.sum(np.log(np.diag(L))) + 0.5 * D * np.log(2*np.pi*np.exp(1))", "def entropy(self, args):\n mean, stddev = args\n dist = tfp.distributions.Normal(loc=mean, scale=stddev)\n entropy = dist.entropy()\n return entropy", "def entropy(counts):\n assert (counts >= 0).all()\n probs = counts / counts.sum()\n probs = probs[probs > 0] # Avoid log(0)\n return - np.sum(probs * np.log2(probs))", "def gain(Y, X):\n return entropy(Y) - cEntropy(Y,X)", "def get_entropy(*labels):\n entropies = [] #list of entropy values from each subset\n total = 0 #total number of datapoints\n for subset in labels:\n n = len(subset)\n total += n\n counts = np.unique(subset, return_counts=True)[1] #frequency of unique values\n entropy = np.sum([-(i/n) * np.log2(i/n) for i in counts]) #subset entropy calcuation\n entropies.append((entropy, n))\n return np.sum([(n/total) * ent for n, ent in iter(entropies)])", "def cohen_d(x_arr, y_arr):\n delta = np.mean(x_arr) - np.mean(y_arr)\n pooled_std = np.sqrt(\n (\n (len(x_arr) - 1) * np.std(x_arr, ddof=1) ** 2 +\n (len(y_arr) - 1) * np.std(y_arr, ddof=1) ** 2\n ) / (len(x_arr) + len(y_arr))\n )\n return delta / pooled_std", "def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))", "def cohens_d(x, y):\n nx, ny = len(x), len(y)\n pooled_variance = ((nx - 1) * np.std(x, ddof=1) ** 2 +\n (ny - 1) * np.std(y, ddof=1) ** 2) / \\\n ((nx - 1) + (ny - 1))\n return (np.mean(x) - np.mean(y)) / np.sqrt(pooled_variance)", "def conditional_entropy(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n\n marginals = np.nansum(P_nan, axis=1)\n P_cond = P_nan / marginals[:, None]\n\n return np.nansum(np.multiply(P_nan, np.log2(1 / P_cond)))", "def prob_larger_continuous(distr1, distr2):\n\n return distr1.expect(distr2.cdf)", "def _entropy2(labels, base=None):\n\n n_labels = len(labels)\n\n if n_labels <= 1:\n return 0\n\n value,counts = np.unique(labels, return_counts=True)\n probs = counts / n_labels\n n_classes = np.count_nonzero(probs)\n\n if n_classes <= 1:\n return 0\n\n ent = 0.\n\n # Compute entropy\n base = e if base is None else base\n for i in probs:\n ent -= i * log(i, base)\n\n # quick observation shows ent between 0.0 and 4.0.\n return ent", "def _calculate_probs_and_entropy_x(self, columns):\n #calculate x probabilities and H(Xi)\n #H(Xi) = Sum(x € Xi)(-P(Xi=x) * log(P(Xi=x)))\n for col in columns:\n self.cat_entropies[col] = 0\n xsum = 0\n for val in self.categories[col]:\n self.cat_probs[col][val] = 0\n for label in self.labels:\n self.cat_probs[col][val] += self.cat_counts[col][label][val]\n xsum += self.cat_probs[col][val]\n for val in self.categories[col]:\n self.cat_probs[col][val] /= xsum\n self.cat_entropies[col] -= self.cat_probs[col][val] * self.log(self.cat_probs[col][val])", "def mutual_information(x, y, logfunc=np.log2, nperms=1e4):\n def entropy(freqDict):\n return -np.array([p*logFunc(p) for p in freqDict.values()]).sum()\n freqx = objhist(x)\n freqy = objhist(y)\n \n Hx = freqx.entropy()\n Hy = freqy.entropy()\n Hxy = objhist(zip(x,y)).entropy()\n M = Hx + Hy - Hxy\n Mstar = 2*M / (Hx+Hy)\n\n if len(freqx)==1 or len(freqy)==1:\n p = 1\n elif np.all([xi==yi for xi,yi in zip(x,y)]):\n p = 0\n else:\n Mperms = np.array([Hx + Hy - objhist(zip(permutation(x),y)).entropy() for i in np.arange(nperms)])\n p = (Mperms >= M).sum() / nperms\n\n return M, Mstar, p, Hx, Hy, Hxy", "def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res", "def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n H = 0.0\n for x in np.nditer(self.t, op_flags=['readonly']):\n p = x/Z\n H += 0.0 if p==0 else -p*np.log(p)\n return H", "def H(self, data):\n entropy = 0\n\n if not data:\n return entropy\n\n for x in range(256):\n p_x = float(data.count(chr(x))) / len(data)\n if p_x > 0:\n entropy -= p_x * math.log(p_x, 2)\n\n return entropy", "def entropy_obj(self, x, h, num_samples_posterior=20, return_score=False, learn_post_sigma=True):\n inf_dist = distributions.Normal(h, self.post_logsigma.detach().exp())\n h_given_x = inf_dist.sample((num_samples_posterior,))\n if len(x.size()) == 4:\n inf_logprob = inf_dist.log_prob(h_given_x).sum(2)\n xr = x[None].repeat(num_samples_posterior, 1, 1, 1, 1)\n xr = xr.view(x.size(0) * num_samples_posterior, x.size(1), x.size(2), x.size(3))\n logq, mean_output = self.logq_joint(xr, h_given_x.view(-1, h.size(1)), return_mu=True)\n mean_output = mean_output.view(num_samples_posterior, x.size(0), x.size(1), x.size(2), x.size(3))\n logq = logq.view(num_samples_posterior, x.size(0))\n w = (logq - inf_logprob).softmax(dim=0)\n fvals = (x[None] - mean_output) / (self.logsigma.exp() ** 2)\n weighted_fvals = (fvals * w[:, :, None, None, None]).sum(0).detach()\n c = weighted_fvals\n else:\n inf_logprob = inf_dist.log_prob(h_given_x).sum(2)\n xr = x[None].repeat(num_samples_posterior, 1, 1)\n xr = xr.view(x.size(0) * num_samples_posterior, x.size(1))\n logq, mean_output = self.logq_joint(xr, h_given_x.view(-1, h.size(1)), return_mu=True)\n mean_output = mean_output.view(num_samples_posterior, x.size(0), x.size(1))\n logq = logq.view(num_samples_posterior, x.size(0))\n w = (logq - inf_logprob).softmax(dim=0)\n fvals = (x[None] - mean_output) / (self.logsigma.exp() ** 2)\n weighted_fvals = (fvals * w[:, :, None]).sum(0).detach()\n c = weighted_fvals\n\n mgn = c.norm(2, 1).mean()\n g_error_entropy = torch.mul(c, x).mean(0).sum()\n\n post = distributions.Normal(h.detach(), self.post_logsigma.exp())\n h_g_post = post.rsample()\n joint = self.logq_joint(x.detach(), h_g_post)\n post_ent = post.entropy().sum(1)\n\n elbo = joint + post_ent\n post_loss = -elbo.mean()\n\n if learn_post_sigma:\n self.post_optimizer.zero_grad()\n post_loss.backward()\n self.post_optimizer.step()\n\n if return_score:\n return g_error_entropy, mgn, c\n else:\n return g_error_entropy, mgn", "def compute_dists(x, y):\r\n \r\n return (x - y.permute(0, 2, 1)) ** 2", "def entropy(d, total, word_count):\n\t# Entropie je - Sum_morf p(morf) * log_2 p(morf)\n\t# p(morf) = c(morf) / c(all)\n\te = 0\n\tfor count in d.values():\n\t\tp = count/total\n\t\ttype_e = - p * log2(p)\n\t\te += type_e * count\n\treturn e / word_count", "def entropy(self, priors=None):\n def entropy_f(x):\n x[x != 0] *= np.log(x[x != 0])\n return -x.sum(axis=0)\n return self.utility(entropy_f, priors)", "def entropy(class_probabilities):\n return sum(-p * math.log(p, 2)\n for p in class_probabilities\n if p) #ignore 0's", "def entropy(data, idxList):\n df = data.loc[idxList]\n counts = df.value_counts().to_numpy()\n counts = counts.reshape(1, -1).astype(np.float32)\n counts /= np.sum(counts)\n log_sum = counts @ np.log2(counts.T)\n return -log_sum[0, 0]", "def print_entropies(independent_joint_probabilities, conditional_joint_probabilities):\n indepndent_entropy = entropy_from_probability_matrix(independent_joint_probabilities)\n conditional_entropy = entropy_from_probability_matrix(conditional_joint_probabilities)\n\n print 'Independent H(X,Y) = {h}'.format(h=indepndent_entropy)\n print 'Conditional H(X,Y) = {h}'.format(h=conditional_entropy)\n print 'D_KL(Independent, Conditional) = {d_kl}' \\\n .format(d_kl=kullback_leibler_divergence(independent_joint_probabilities, conditional_joint_probabilities))\n print 'D_KL(Conditional, Independent) = {d_kl}' \\\n .format(d_kl=kullback_leibler_divergence(conditional_joint_probabilities, independent_joint_probabilities))\n\n return indepndent_entropy, conditional_entropy", "def entropy_computations(\n self,\n between_labels=True,\n between_images=True,\n between_all_images=False,\n symmetrized=True,\n ):\n jzazbz_dist_dict = self.jzazbz_dist_dict\n\n if between_labels:\n words = self.labels_list\n labels_entropy_dict = {}\n labels_entropy_dict_js = {}\n color_sym_matrix = []\n color_sym_matrix_js = []\n\n for word1 in words:\n row = []\n row_js = []\n for word2 in words:\n entropy_js = js_divergence(\n np.mean(np.array(jzazbz_dist_dict[word1]), axis=0),\n np.mean(np.array(jzazbz_dist_dict[word2]), axis=0),\n )\n entropy = kl_divergence(\n np.mean(np.array(jzazbz_dist_dict[word1]), axis=0),\n np.mean(np.array(jzazbz_dist_dict[word1]), axis=0),\n symmetrized,\n )\n row.append(entropy)\n row_js.append(entropy_js)\n # these lines are for convenience; if strings are correctly synced across all data they are not needed\n if word1 == \"computer science\":\n labels_entropy_dict[\"computer_science\" + \"_\" + word2] = entropy\n labels_entropy_dict_js[\n \"computer_science\" + \"_\" + word2\n ] = entropy_js\n elif word2 == \"computer science\":\n labels_entropy_dict[word1 + \"_\" + \"computer_science\"] = entropy\n labels_entropy_dict_js[\n word1 + \"_\" + \"computer_science\"\n ] = entropy_js\n else:\n labels_entropy_dict[word1 + \"_\" + word2] = entropy\n labels_entropy_dict_js[word1 + \"_\" + word2] = entropy_js\n color_sym_matrix.append(row)\n color_sym_matrix_js.append(row_js)\n\n self.cross_entropy_between_labels_dict = labels_entropy_dict\n self.cross_entropy_between_labels_matrix = color_sym_matrix\n self.cross_entropy_between_labels_dict_js = labels_entropy_dict_js\n self.cross_entropy_between_labels_matrix_js = color_sym_matrix_js\n\n if between_images:\n entropy_dict = {}\n entropy_dict_js = {}\n for key in jzazbz_dist_dict:\n entropy_array = []\n entropy_array_js = []\n for i in range(len(jzazbz_dist_dict[key])):\n for j in range(len(jzazbz_dist_dict[key])):\n entropy_array_js.append(\n js_divergence(\n jzazbz_dist_dict[key][i], jzazbz_dist_dict[key][j]\n )\n )\n entropy_array.append(\n kl_divergence(\n jzazbz_dist_dict[key][i],\n jzazbz_dist_dict[key][j],\n symmetrized,\n )\n )\n entropy_dict[key] = entropy_array\n entropy_dict_js[key] = entropy_array_js\n\n self.cross_entropy_between_images_dict = entropy_dict\n self.cross_entropy_between_images_dict_js = entropy_dict_js\n\n if between_all_images:\n entropy_dict_all = {}\n color_sym_matrix_js_all = []\n\n for word1 in words:\n row_js_all = []\n for word2 in words:\n entropy_js_all = []\n for i in range(len(jzazbz_dist_dict[word1])):\n for j in range(len(jzazbz_dist_dict[word2])):\n try:\n entropy_js_all.append(\n js_divergence(\n jzazbz_dist_dict[word1][i],\n jzazbz_dist_dict[word2][j],\n )\n )\n except Exception as exc:\n self.log.error(exc)\n entropy_js_all.append(np.mean(entropy_js))\n entropy_dict_all[word1 + \"_\" + word2] = entropy_js_all\n row_js_all.append(np.mean(entropy_js_all))\n color_sym_matrix_js_all.append(row_js_all)\n\n self.cross_entropy_between_all_images_dict = entropy_dict_all\n self.cross_entropy_between_all_images_matrix = color_sym_matrix_js_all", "def mutual_info_fast(l1, l2, l1_entropy, l2_entropy):\n return l1_entropy + l2_entropy - entropy(joint_dataset(l1, l2))", "def entropy(self):\n return -np.sum(self.log_likelihoods * np.exp(self.log_likelihoods))", "def entropy(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n return np.nansum(np.multiply(P_nan, np.log2(1 / P_nan)))", "def entropy(self, base: int = None):\n\n # shannon entropy in nats\n fdist_ = self.fdist\n fdist_[\"prob\"] = fdist_[\"freq\"] / fdist_[\"freq\"].sum()\n fdist_[\"logp\"] = np.log(fdist_[\"prob\"])\n fdist_[\"nats\"] = -fdist_[\"prob\"] * fdist_[\"logp\"]\n entropy_ = fdist_[\"nats\"].sum()\n\n # convert base\n if base:\n entropy_ = entropy_ / np.log(base)\n\n # return\n return entropy_", "def entropy(distribution, unit=2):\n frequencies = distribution.frequencies(normalised=True)\n # check to see if it is a deterministic case (all but one are zero)\n zeros_size = frequencies[frequencies == 0].size\n if zeros_size + 1 == frequencies.size:\n return 0\n else:\n return np.sum(-frequencies * np.log2(frequencies) / np.log2(unit))", "def mutual_information(x, y, bins, normalize=False):\n # assert array length\n assert len(x) == len(y)\n\n # get the bins\n bins = get_2D_bins(x, y, bins)\n\n # calculate entropy(x) and conditional_entropy(x,y)\n hx = entropy(x, bins[0])\n hcon = conditional_entropy(x, y, bins)\n\n if normalize:\n normalizer = np.min([entropy(x, bins[0]), entropy(y, bins[1])])\n mutual_info = hx - hcon\n\n # check if mutual info and normalizer are very small\n if mutual_info < 1e-4 and normalizer < 1e-4:\n # return zero to prevent very high values of normalized mutual information\n # e.g. mutual information = -1.3e-12, normalizer = -1.6e-12 \n # -> normalized conditional entropy = 812.5\n return 0\n else:\n return mutual_info / normalizer\n else:\n return hx - hcon", "def entropy(class_probabilities):\n return sum(-p * math.log(p,2)\n for p in class_probabilities\n if p)", "def EntropyD(dist): \n if np.any(dist>0):\n dist2 = dist/np.sum(dist)\n return -np.sum(dist2[dist2>0]*np.log2(dist2[dist2>0]))\n else:\n return 0", "def compute_entropy(bincounts, include_zeros):\n num_total = tf.cast(tf.reduce_sum(bincounts), tf.float64)\n if not include_zeros:\n bincounts = bincounts[1:]\n mask = tf.greater(bincounts, 0)\n nonzero_bincounts = tf.cast(\n tf.boolean_mask(bincounts, mask), tf.float64)\n num_nonzero = tf.cast(tf.reduce_sum(nonzero_bincounts), tf.float64)\n log_nonzero_bincounts = tf.math.log(nonzero_bincounts)\n log_prob = log_nonzero_bincounts - tf.reduce_logsumexp(\n log_nonzero_bincounts)\n entropy = tf.math.reduce_sum(\n log_prob * tf.exp(log_prob)) / -tf.math.log(tf.cast(2, tf.float64))\n return entropy * num_nonzero / num_total" ]
[ "0.7481201", "0.6875717", "0.6782211", "0.6750487", "0.6720426", "0.6655979", "0.6648373", "0.6570259", "0.6566688", "0.64887154", "0.64597607", "0.64407265", "0.63953054", "0.6383221", "0.63512045", "0.63497704", "0.63459283", "0.63403004", "0.6330119", "0.6322664", "0.6289737", "0.6262488", "0.62549144", "0.6251724", "0.6230731", "0.6217289", "0.61961704", "0.6191671", "0.61783874", "0.61604345", "0.6134164", "0.61279017", "0.61182636", "0.6096975", "0.60740614", "0.60637957", "0.6060501", "0.6000772", "0.5995078", "0.59780097", "0.5965804", "0.5962275", "0.5938181", "0.5931029", "0.59214073", "0.58948284", "0.5884102", "0.58662707", "0.58644086", "0.5844605", "0.5844605", "0.5831491", "0.5826183", "0.58253723", "0.5824066", "0.5819012", "0.58132887", "0.5806917", "0.57983744", "0.57874715", "0.57716763", "0.5768342", "0.5764843", "0.5764293", "0.5762632", "0.57526463", "0.5746962", "0.57385045", "0.57261175", "0.57250965", "0.5715183", "0.5712746", "0.57112396", "0.57057035", "0.5703124", "0.5693805", "0.5688762", "0.56652105", "0.5663189", "0.56622654", "0.5652942", "0.56420094", "0.5633418", "0.5633384", "0.5622846", "0.5621945", "0.5621546", "0.5619045", "0.56153", "0.5594045", "0.55938196", "0.5593046", "0.55873567", "0.55843854", "0.5583587", "0.5580749", "0.55726624", "0.55649835", "0.55515546", "0.55513716" ]
0.71850336
1
Mutual information Calculates the mutual information of a discrete distribution x given a known discrete distribution y. The mutual information is the amount of information that two distributions share.
def mutual_information(x, y, bins, normalize=False): # assert array length assert len(x) == len(y) # get the bins bins = get_2D_bins(x, y, bins) # calculate entropy(x) and conditional_entropy(x,y) hx = entropy(x, bins[0]) hcon = conditional_entropy(x, y, bins) if normalize: normalizer = np.min([entropy(x, bins[0]), entropy(y, bins[1])]) mutual_info = hx - hcon # check if mutual info and normalizer are very small if mutual_info < 1e-4 and normalizer < 1e-4: # return zero to prevent very high values of normalized mutual information # e.g. mutual information = -1.3e-12, normalizer = -1.6e-12 # -> normalized conditional entropy = 812.5 return 0 else: return mutual_info / normalizer else: return hx - hcon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')", "def mutual_information(x, y, logfunc=np.log2, nperms=1e4):\n def entropy(freqDict):\n return -np.array([p*logFunc(p) for p in freqDict.values()]).sum()\n freqx = objhist(x)\n freqy = objhist(y)\n \n Hx = freqx.entropy()\n Hy = freqy.entropy()\n Hxy = objhist(zip(x,y)).entropy()\n M = Hx + Hy - Hxy\n Mstar = 2*M / (Hx+Hy)\n\n if len(freqx)==1 or len(freqy)==1:\n p = 1\n elif np.all([xi==yi for xi,yi in zip(x,y)]):\n p = 0\n else:\n Mperms = np.array([Hx + Hy - objhist(zip(permutation(x),y)).entropy() for i in np.arange(nperms)])\n p = (Mperms >= M).sum() / nperms\n\n return M, Mstar, p, Hx, Hy, Hxy", "def mutual_information(x, y, w):\r\n \r\n\r\n total_entropy = entropy(y,w)\r\n\r\n partitioned_x = partition(x)\r\n weighted_entropy = 0\r\n # calculate the weighted entropy over the partition of x\r\n vals,counts= np.unique(x,return_counts=True)\r\n for key in partitioned_x:\r\n weighted_entropy += np.sum([(np.sum(w[partitioned_x[key]])/np.sum(w)) * entropy(y[partitioned_x[key]],w[partitioned_x[key]])])\r\n\r\n information_gain = total_entropy - weighted_entropy\r\n return information_gain", "def MutualInformation(x, y, bins):\n hist_xy, x_edges, y_edges = np.histogram2d(x, y, bins)\n return sklearn.metrics.mutual_info_score(None, None, hist_xy)", "def mutual_information_2d(x, y, sigma=1, normalized=False):\n \n bins = (256, 256)\n \n jh = np.histogram2d(x, y, bins=bins)[0]\n \n # smooth the jh with a gaussian filter of given sigma\n ndimage.gaussian_filter(jh, sigma=sigma, mode='constant',\n output=jh)\n \n # compute marginal histograms\n jh = jh + EPS\n sh = np.sum(jh)\n jh = jh / sh\n s1 = np.sum(jh, axis=0).reshape((-1, jh.shape[0]))\n s2 = np.sum(jh, axis=1).reshape((jh.shape[1], -1))\n \n # Normalised Mutual Information of:\n # Studholme, jhill & jhawkes (1998).\n # \"A normalized entropy measure of 3-D medical image alignment\".\n # in Proc. Medical Imaging 1998, vol. 3338, San Diego, CA, pp. 132-143.\n if normalized:\n mi = ((np.sum(s1 * np.log(s1)) + np.sum(s2 * np.log(s2)))\n / np.sum(jh * np.log(jh))) - 1\n else:\n mi = ( np.sum(jh * np.log(jh)) - np.sum(s1 * np.log(s1))\n - np.sum(s2 * np.log(s2)))\n \n return mi", "def mutual_info(l1, l2):\n return entropy(l1) + entropy(l2) - entropy(joint_dataset(l1, l2))", "def mutual_information_from_data(X, Y, num_bins):\n N = X.size\n delta = 10e-10\n\n x_min, x_max = (X.min() - delta, X.max() + delta)\n y_min, y_max = (Y.min() - delta, Y.max() + delta)\n\n X_hist, X_bin = np.histogram(X, bins=num_bins, range=(x_min, x_max))\n Y_hist, Y_bin = np.histogram(Y, bins=num_bins, range=(y_min, y_max))\n\n X_states = np.digitize(X, X_bin)\n Y_states = np.digitize(Y, Y_bin)\n coords = Counter(zip(X_states, Y_states))\n\n joint_linear = np.zeros((config.NUM_STATES, config.NUM_STATES))\n for x, y in coords.keys():\n joint_linear[x - 1, y - 1] = coords[(x, y)] / N\n\n p_X = X_hist / N\n p_Y = Y_hist / N\n prod_XY = np.tensordot(p_X.T, p_Y, axes=0)\n\n div_XY = joint_linear / prod_XY\n div_XY[div_XY == 0] = np.nan\n\n return np.nansum(np.multiply(joint_linear, np.log2(div_XY)))", "def compute_empirical_mutual_info_nats(var1_values, var2_values):\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n\n empirical_mutual_info_nats = 0.0\n \n var1_distribution = compute_empirical_distribution(var1_values)\n var2_distribution = compute_empirical_distribution(var2_values)\n joint_distribution = compute_empirical_distribution(list(zip(var1_values,var2_values)))\n \n empirical_mutual_info_nats = 0\n for var1 in var1_distribution:\n for var2 in var2_distribution:\n empirical_mutual_info_nats += joint_distribution[(var1, var2)] \\\n * np.log(joint_distribution[(var1,var2)]/(var1_distribution[var1]*var2_distribution[var2]))\n \n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return empirical_mutual_info_nats", "def pairwiseMutualInformation(align, nperms=1e4):\n L=len(align[align.index[0]])\n columns = [align.map(lambda s: s[i]) for i in np.arange(L)]\n M = np.nan*np.zeros((L, L))\n p = np.nan*np.zeros((L, L))\n Mstar = np.nan*np.zeros((L, L))\n for xi, yi in itertools.combinations(np.arange(L), 2):\n freqx = objhist(columns[xi])\n freqy = objhist(columns[yi])\n\n tmpM, tmpMstar, tmpp, Hx, Hy, Hxy= mutual_information(columns[xi],\n columns[yi],\n logfunc=np.log2,\n nperms=nperms)\n \n \"\"\"We wouldn't need to test invariant sites or a site with itself\"\"\"\n if len(freqx) == 1 or len(freqy) == 1:\n tmpp = np.nan\n elif xi == yi:\n tmpp = np.np.nan\n\n M[xi, yi] = tmpM\n p[xi, yi] = tmpp\n Mstar[xi, yi] = tmpMstar\n q = adjustnonnan(p)\n\n return M, Mstar, p, q", "def compute_dists(x, y):\r\n \r\n return (x - y.permute(0, 2, 1)) ** 2", "def mi_bin(x, y, bins_x, bins_y):\n if bins_y == 0:\n bins_y = len(np.unique(y))\n # compute probabilities\n p_x = histogram(x, bins_x)\n p_y = histogram(y, bins_y)\n p_xy = histogram2d(x, y, bins_x, bins_y)\n p_x = p_x / p_x.sum()\n p_y = p_y / p_y.sum()\n p_xy = p_xy / p_xy.sum()\n # compute entropy\n h_x = entropy(p_x.astype(np.float32))\n h_y = entropy(p_y.astype(np.float32))\n h_xy = entropy(p_xy.ravel().astype(np.float32))\n # compute mutual information\n i = h_x + h_y - h_xy\n\n return i", "def mutual_information(co_freq, s_freq, t_freq, total_instances, mitype=None):\n if co_freq > 0:\n if mitype is not None:\n if mitype == \"expected\":\n mi = math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)\n ) * (co_freq / total_instances)\n elif mitype == \"normalized\":\n alpha = - math.log2(co_freq / total_instances)\n mi = (\n (math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)) / alpha)\n if alpha != 0 else 0\n )\n elif mitype == \"pmi2\":\n mi = math.log2((co_freq ** 2) / (s_freq * t_freq))\n elif mitype == \"pmi3\":\n mi = math.log2(\n (co_freq ** 3) / (s_freq * t_freq * total_instances))\n else:\n raise ValueError(\n \"Provided Mutual information score type (mitype) is not \"\n \"supported. Provide one value from the following list \"\n \"['expected', 'normalized','pmi2', 'pmi3'] \")\n else:\n mi = math.log2((total_instances * co_freq) / (s_freq * t_freq))\n else:\n mi = 0\n return mi if mi > 0 else 0", "def mutual_information(transposed, transposed_2 = False):\n\tmi = []\n\tlength = range(len(transposed))\n\tfor i in length:\n\t\tentropy_i = entropy(transposed[i])\n\t\tmi_list = []\n\t\tif transposed_2 == False:\n\t\t\tfor j in length:\n\t\t\t\tentropy_j = entropy(transposed[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\n\t\telse:\n\t\t\tlength_2 = range(len(transposed_2))\n\t\t\tfor j in length_2:\n\t\t\t\tentropy_j = entropy(transposed_2[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed_2[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\treturn mi", "def mutual_information(transposed, transposed_2 = False):\n\tmi = []\n\tlength = range(len(transposed))\n\tfor i in length:\n\t\tentropy_i = entropy(transposed[i])\n\t\tmi_list = []\n\t\tif transposed_2 == False:\n\t\t\tfor j in length:\n\t\t\t\tentropy_j = entropy(transposed[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\n\t\telse:\n\t\t\tlength_2 = range(len(transposed_2))\n\t\t\tfor j in length_2:\n\t\t\t\tentropy_j = entropy(transposed_2[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed_2[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\treturn mi", "def mutual_information_brute_force(self, ret_prob_activity=False):\n base = 2 ** np.arange(0, self.Nr)\n\n # prob_a contains the probability of finding activity a as an output.\n prob_a = np.zeros(2**self.Nr)\n for c, prob_c in self._iterate_mixtures():\n # get the associated output ...\n a = np.dot(self.sens_mat, c).astype(np.bool)\n # ... and represent it as a single integer\n a = np.dot(base, a)\n\n prob_a[a] += prob_c\n \n # normalize the output to make it a probability distribution\n prob_a /= prob_a.sum()\n \n # calculate the mutual information\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n \n if ret_prob_activity:\n return MI, prob_a\n else:\n return MI", "def mutual_info_fast(l1, l2, l1_entropy, l2_entropy):\n return l1_entropy + l2_entropy - entropy(joint_dataset(l1, l2))", "def dist_calc(self, x, y):\n p_xy = self.d2_bin(x, y)\n p_x = np.sum(p_xy, axis=1)\n p_y = np.sum(p_xy, axis=0)\n\n p_x_times_p_y = np.tensordot(p_x, p_y, axes = 0)\n info = np.sum(p_xy * np.ma.log(np.ma.divide(p_xy, p_x_times_p_y)))\n entropy = np.sum(-1 * p_xy * np.ma.log(p_xy))\n\n output = max(0.0, (1 - (info / entropy)))\n return output", "def get_information_y_hat(x_pub, x_priv, x_joint, y_hat, num_of_bins_y):\n\tprint('Start calculating the information for y_hat...')\n\ty_hat = np.array(y_hat).astype(np.float)\n\tpys_hat, unique_inverse_y, y_hat = extract_probs_label(y_hat,num_of_bins_y)\n\tp_y_given_x_pub, b1_pub, b_pub, unique_a_pub, unique_inverse_x_pub, pxs_pub = extract_probs(y_hat, x_pub)\n\tp_y_given_x_priv, b1_priv, b_priv, unique_a_priv, unique_inverse_x_priv, pxs_priv = extract_probs(y_hat, x_priv)\n\tp_y_given_x_joint, b1_joint, b_joint, unique_a_joint, unique_inverse_x_joint, pxs_joint = extract_probs(y_hat, x_joint)\n\t# Shannon Entropy over label\n\tH2Y_hat = -np.sum(pys_hat * np.log2(pys_hat))\n\t# mutual Information between secret layer and label\n\tMI_pri_y_hat = calc_information_for_inp_out(pxs_priv,pys_hat,y_hat,unique_inverse_x_priv)\n\t# mutual Information between secret layer and label\n\tMI_pub_y_hat = calc_information_for_inp_out(pxs_pub,pys_hat,y_hat,unique_inverse_x_pub)\n\treturn H2Y_hat, MI_pri_y_hat, MI_pub_y_hat", "def get_mi_mvn(x, y):\n\n d = x.shape[1]\n\n # hx = 0.5 * log((2 * np.pi * np.e)**d * det(np.cov(x.T)))\n # hy = 0.5 * log((2 * np.pi * np.e)**d * det(np.cov(y.T)))\n # hxy = 0.5 * log((2 * np.pi * np.e)**(2*d) * det(np.cov(x.T, y=y.T)))\n # mi = hx + hy - hxy\n\n # hx = 0.5 * log(det(2*np.pi*np.e*np.cov(x.T)))\n # hy = 0.5 * log(det(2*np.pi*np.e*np.cov(y.T)))\n # hxy = 0.5 * log(det(2*np.pi*np.e*np.cov(np.c_[x,y].T)))\n hx = get_h_mvn(x)\n hy = get_h_mvn(y)\n hxy = get_h_mvn(np.c_[x,y])\n mi = hx + hy - hxy\n\n # mi = 0.5 * (log(det(np.cov(x.T))) + log(det(np.cov(y.T))) - log(det(np.cov(np.c_[x,y].T))))\n\n return mi", "def symbolic_mutual_information(symX, symY):\n\n if len(symX) != len(symY):\n raise ValueError('All arrays must have same length')\n \n symX = np.array(symX)\n symY = np.array(symY)\n \n symbols = np.unique(np.concatenate((symX,symY))).tolist()\n \n jp = symbolic_joint_probabilities(symX, symY)\n pX = symbolic_probabilities(symX)\n pY = symbolic_probabilities(symY)\n \n MI = 0\n\n for yi, b in pY.items():\n for xi, a in pX.items():\n try:\n c = jp[yi,xi]\n MI += c * np.log(c /(a * b)) / np.log(len(symbols))\n except KeyError:\n continue\n except:\n print(\"Unexpected Error\")\n raise\n \n return MI", "def mutual_information(mc_preds):\n mutual_info = entropy(np.mean(mc_preds, axis=0)) - np.mean(entropy(mc_preds),\n axis=0)\n return mutual_info", "def metric(x, y):\n d = 2\n summ = []\n i = 0\n while i < len(x):\n # in this case use euclidean distance\n summ.append((x[i] - y[i])**d)\n i = i + 1\n return sum(summ) ** (1 / float(d))", "def get_information_priv_y_hat(x_priv, y_hat, num_of_bins_y):\n\tprint('Start calculating the information for y_hat...')\n\ty_hat = np.array(y_hat).astype(np.float)\n\tpys_hat, unique_inverse_y, y_hat = extract_probs_label(y_hat,num_of_bins_y)\n\tp_y_given_x_priv, b1_priv, b_priv, unique_a_priv, unique_inverse_x_priv, pxs_priv = extract_probs(y_hat, x_priv)\n\t# mutual Information between secret layer and label\n\tMI_pri_y_hat = calc_information_for_inp_out(pxs_priv,pys_hat,y_hat,unique_inverse_x_priv)\n\treturn MI_pri_y_hat.astype(np.float32)", "def synergy(g1, g2, c):\n return mutual_info(joint_dataset(g1, g2), c) -\\\n mutual_info(g1, c) - mutual_info(g2, c)", "def mutual_information_estimate(self, approx_prob=False):\n \n # this might be not the right approach\n q_n = self.receptor_activity_estimate(approx_prob=approx_prob)\n q_nm = self.receptor_crosstalk_estimate(approx_prob=approx_prob)\n \n # calculate the approximate mutual information\n return self._estimate_MI_from_q_values(q_n, q_nm)", "def get_dists_2():\n d1 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d2 = Distribution(['0', '1'], [1 / 3, 2 / 3])\n d3 = Distribution(['0', '1'], [2 / 5, 3 / 5])\n return d1, d2, d3", "def normalized_mutual_information(cl: np.ndarray, org: np.ndarray):\n assert cl.shape == org.shape\n\n return mutual_info_score(org, cl) / (abs(entropy(cl) + entropy(org)) / 2)", "def mutual_information(self, excitation_method='auto', **kwargs):\n if excitation_method == 'auto':\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n excitation_method = 'brute_force'\n else:\n excitation_method = 'monte_carlo'\n \n if excitation_method == 'brute_force' or excitation_method == 'brute-force':\n return self.mutual_information_brute_force(**kwargs)\n elif excitation_method == 'monte_carlo' or excitation_method == 'monte-carlo':\n return self.mutual_information_monte_carlo(**kwargs)\n elif excitation_method == 'estimate':\n return self.mutual_information_estimate(**kwargs)\n else:\n raise ValueError('Unknown excitation_method `%s`.' % excitation_method)", "def mutual_information(pi, pj, pij):\n p_i = 1 - pi\n p_j = 1 - pj\n p_ij = pj - pij\n pi_j = pi - pij\n p_i_j = 1 - pi - pj + pij\n \n log_pi = log(pi)\n log_pj = log(pj)\n log_p_i = log(p_i)\n log_p_j = log(p_j)\n \n mi = pij * (log(pij) - log_pi - log_pj) + \\\n pi_j * (log(pi_j) - log_pi - log_p_j) + \\\n p_i_j * (log(p_i_j) - log_p_i - log_p_j)\n if p_ij != 0: # For language groups and features, this is the only probability that could be zero, and lim_x->0[x*log(x)] = 0 \n mi += p_ij * (log(p_ij) - log_p_i - log_pj)\n \n return mi", "def mattes_mutual_information(fixed, moving, bins_count=24, pixels_fraction=None, **kwargs) :\n \n pixels_count = 0\n if isinstance(fixed, medipy.base.Image) :\n pixels_count = numpy.prod(fixed.shape)\n FixedImageType = medipy.itk.itk_image_type(fixed)\n else :\n pixels_count = numpy.prod(fixed.GetLargestPossibleRegion().GetSize())\n FixedImageType = fixed\n \n if isinstance(moving, medipy.base.Image) :\n pixels_count = max(pixels_count, numpy.prod(moving.shape))\n MovingImageType = medipy.itk.itk_image_type(moving)\n else :\n pixels_count = max(pixels_count, numpy.prod(moving.GetLargestPossibleRegion().GetSize()))\n MovingImageType = moving\n\n arguments = {}\n \n # Medimax : recalage/imx_mtch.c:erreur_IM : histograms have 250 bins. We use\n # ITK suggested value\n arguments[\"NumberOfHistogramBins\"] = int(bins_count)\n arguments[\"UseAllPixels\"] = True\n\n if pixels_fraction is not None :\n arguments[\"NumberOfSpatialSamples\"] = int(pixels_fraction*pixels_count)\n\n #if pixels_fraction is None :\n # if kwargs == {} :\n # arguments[\"UseAllPixels\"] = False\n # arguments[\"NumberOfSpatialSamples\"] = int(0.1*pixels_count)\n # elif kwargs.get(\"all_pixels\", False) :\n # arguments[\"UseAllPixels\"] = True\n # elif \"spatial_samples_count\" in kwargs :\n # arguments[\"UseAllPixels\"] = False\n # arguments[\"NumberOfSpatialSamples\"] = kwargs[\"spatial_samples_count\"]\n # else :\n # raise Exception(\"Incorrect arguments\")\n #else :\n # arguments[\"UseAllPixels\"] = False\n # arguments[\"NumberOfSpatialSamples\"] = int(pixels_fraction*pixels_count)\n \n metric = itk.MattesMutualInformationImageToImageMetric[\n FixedImageType, MovingImageType].New(**arguments)\n\n return metric", "def make_likelihood_table(x, y):\n\n Y = np.unique(y)\n X = np.unique(x)\n\n likelihood = [[0 for i in range(len(Y))] for j in range(len(X))]\n\n freq = make_frequency_table(x, y, X, Y)\n\n for j in range(len(Y)):\n Sum = (y == Y[j]).sum()\n for i in range(len(X)):\n likelihood[i][j] = freq[X[i]][j] / Sum\n\n return likelihood", "def mutual_information(self,max_lag):\n\n digi = utilities.mi_digitize(self.X)\n\n mi = np.empty(max_lag)\n\n for i in range(max_lag):\n\n ind = i+1\n unshift = digi[ind:]\n shift = digi[0:-ind]\n\n mi[i] = skmetrics.mutual_info_score(unshift,shift)\n\n return mi", "def get_mutual_information(c_wic, c_wioc, c_owic, c_owioc):\n # total word count\n c_total = c_wic + c_wioc + c_owic + c_owioc\n\n mi_1 = (c_wic / float(c_total)) * log10((c_total * c_wic) /\n float((c_wic + c_wioc) * (c_wic + c_owic)))\n mi_2 = (c_owic / float(c_total)) * log10((c_total * c_owic) /\n float((c_owic + c_owioc) * (c_wic + c_owic)))\n mi_3 = (c_wioc / float(c_total)) * log10((c_total * c_wioc) /\n float((c_wic + c_wioc) * (c_wioc + c_owioc)))\n mi_4 = (c_owioc / float(c_total)) * log10((c_total * c_owioc) /\n float((c_owic + c_owioc) * (c_wioc + c_owioc)))\n\n return mi_1 + mi_2 + mi_3 + mi_4", "def mass_based_dissimilarity(self, x1, x2):\n # In each i-tree, find lowest nodes containing both x and y\n # TODO: parallelize\n sum_masses = 0\n for itree in self.random_itrees:\n # Divide each sum by size of subset used to construct the trees.\n sum_masses += self.get_lowest_common_node_mass(itree, x1, x2)/self.subs_size\n return (1/len(self.random_itrees)) * sum_masses # Divide by number of space partitioning models.", "def compute_information_gain(Y, xi):\r\n H_Y = H(Y)\r\n\r\n TrainSet = np.delete(AllSets[2], -1, axis=1)\r\n ColumnInd = AllSets[3].index(xi) # extract from dictionary\r\n\r\n NumHeadlines = AllSets[2].shape[0]\r\n AllOccurences, Count = np.unique(AllSets[2][:, ColumnInd], return_counts=True)\r\n\r\n TotalH_YGivenX = 0\r\n for i, count in zip(AllOccurences, Count):\r\n NewY = Y[TrainSet[:, ColumnInd] == i]\r\n\r\n TotalH_YGivenX += H(NewY) * float(count) / NumHeadlines\r\n\r\n return H_Y - TotalH_YGivenX", "def mutual_information_union(p1, p2, measure=normalized_mutual_info_score):\n nodes = sorted(set(p1.keys()) | set(p2.keys()))\n if nodes == []: return 0\n return measure(\n [p1[n] if n in p1 else np.random.randint(1e12) for n in nodes],\n [p2[n] if n in p2 else np.random.randint(1e12) for n in nodes]\n )", "def calc_mutual_information(probability_mat):\n\n marginals = sp.outer(\n sp.sum(probability_mat, axis=1), sp.sum(probability_mat, axis=0))\n p = probability_mat[probability_mat != 0.0]\n m = marginals[probability_mat != 0.0]\n return sp.sum(p * sp.log(p / m))", "def mi(x,y,k=3,base=2):\n x = [[entry] for entry in x]\n y = [[entry] for entry in y]\n assert len(x)==len(y), \"Lists should have same length\"\n assert k <= len(x) - 1, \"Set k smaller than num. samples - 1\"\n intens = 1e-10 #small noise to break degeneracy, see doc.\n x = [list(p + intens*nr.rand(len(x[0]))) for p in x]\n y = [list(p + intens*nr.rand(len(y[0]))) for p in y]\n points = zip2(x,y)\n #Find nearest neighbors in joint space, p=inf means max-norm\n tree = ss.cKDTree(points)\n dvec = [tree.query(point,k+1,p=float('inf'))[0][k] for point in points]\n a,b,c,d = avgdigamma(x,dvec), avgdigamma(y,dvec), digamma(k), digamma(len(x)) \n return (-a-b+c+d)/log(base)", "def get_mutual_information_table(self, dims_to_use=None, ignore_negative_values=True, use_correlation=False):\n from mlabwrap import mlab\n bad_dims = self.get_markers('surface_ignore')\n bad_dims.append('Cell Length')\n bad_dims.append('Time')\n bad_dims.append('191-DNA')\n bad_dims.append('193-DNA')\n bad_dims.append('103-Viability')\n bad_dims.append('cluster_name')\n bad_dims.append('stim')\n bad_dims.append('cluster_num')\n if not dims_to_use:\n dims_to_use = self.dims[:]\n dims_to_use = [d for d in dims_to_use if not d in bad_dims] \n num_dims = len(dims_to_use)\n res = np.zeros((num_dims, num_dims))\n logging.info(\n 'Calculating mutual information for %d pairs...' % ((num_dims ** 2 - num_dims) / 2))\n timer = MultiTimer((num_dims ** 2 - num_dims) / 2)\n for i in xrange(num_dims):\n for j in xrange(i):\n arr = self.get_points(dims_to_use[i], dims_to_use[j])\n if ignore_negative_values:\n arr = arr[np.all(arr > 0, axis=1)]\n if arr.shape[0] < 100:\n logging.warning('Less than 100 cells in MI calculation for (%s, %s)' % (dims_to_use[i], dims_to_use[j]))\n res[j,i] = 0\n res[i,j] = 0\n continue\n if use_correlation:\n res[i,j] = np.corrcoef(arr.T[0], arr.T[1])[0,1]\n else:\n res[i,j] = mlab.mutualinfo_ap(arr, nout=1)\n res[j,i] = res[i,j]\n timer.complete_task('%s, %s' % (dims_to_use[i], dims_to_use[j]))\n return DataTable(res, dims_to_use)", "def nmi(ypred, y):\n# print (ypred)\n# print (y)\n return normalized_mutual_info_score(y,ypred)", "def associate_comp(x, y):\n return torch.cat([x[:1] * y[:1] - x[1:] * y[1:], x[:1] * y[1:] + x[1:] * y[:1]])", "def adjusted_mutual_info(self):\n # Prepare row totals and check for special cases\n row_totals = np.fromiter(self.iter_row_totals(), dtype=np.int64)\n col_totals = np.fromiter(self.iter_col_totals(), dtype=np.int64)\n R = len(row_totals)\n C = len(col_totals)\n if R == C == 1 or R == C == 0:\n # No clustering since the data is not split. This is a perfect match\n # hence return 1.0.\n return 1.0\n\n # In one step, calculate entropy for each labeling and mutual\n # information\n h_true, h_pred, mi = self._entropies()\n mi_max = max(h_true, h_pred)\n\n # Calculate the expected value for the MI\n emi = emi_from_margins(row_totals, col_totals)\n\n # Calculate the adjusted MI score\n ami = (mi - emi) / (mi_max - emi)\n return ami", "def d_mse(x, y):\n\n return 2 * (x - y) / x.size(0) / x.size(1)", "def mutual_info_score(self):\n _, _, I_CK = self._entropies()\n return I_CK / self.grand_total", "def __mul__(self, y):\n sum = 0\n x = self\n if len(x) > len(y):\n x, y = y, x\n for key in x:\n if key not in y:\n continue\n sum += x[key] * y[key]\n return sum", "def __mul__(self, y):\n sum = 0\n x = self\n if len(x) > len(y):\n x, y = y, x\n for key in x:\n if key not in y:\n continue\n sum += x[key] * y[key]\n return sum", "def _information_gain(self, y, subsets):\n n = y.shape[0]\n child_entropy = 0\n\n for y_i in subsets:\n child_entropy += self._entropy(y_i) * y_i.shape[0] / float(n)\n\n return self._entropy(y) - child_entropy", "def mutual_information_intersection(p1, p2, measure=normalized_mutual_info_score):\n nodes = sorted(set(p1.keys()) & set(p2.keys()))\n if nodes == []: return 0\n return measure(\n [p1[n] for n in nodes],\n [p2[n] for n in nodes]\n )", "def marginalDistribution(self, x, variable):\n return self._distribution.marginal(x, variable)", "def mutual_info_score(labels_true, labels_pred):\n ct = ContingencyTable.from_labels(labels_true, labels_pred)\n return ct.mutual_info_score()", "def rmsd(x, y):\n\n # get the length of the dataset\n n, = x.shape\n\n return np.sqrt(np.sum((x-y)**2)/n)", "def manhattan(x, y):\n md = np.sum(abs(x-y))\n # print md\n return md", "def mutual_info_matrix(time_series, num_of_bins):\n num_of_rafts, interval_width = time_series.shape\n mi_matrix = np.zeros((num_of_rafts, num_of_rafts))\n\n for i in range(num_of_rafts):\n for j in range(i + 1):\n i0 = time_series[i, :].copy()\n j0 = time_series[j, :].copy()\n c_xy = np.histogram2d(i0, j0, num_of_bins)[0]\n mi = mutual_info_score(None, None, contingency=c_xy) * np.log2(np.e)\n # in unit of bits, * np.log2(np.e) to convert nats to bits\n mi_matrix[i, j] = mi\n mi_matrix[j, i] = mi\n\n return mi_matrix", "def probabilities(self, x, y):\n return self.feed_and_return(x, y, self.network.a)", "def mutual_info(a,b,c,n):\r\n if a == 0: \r\n return 0\r\n print(a,b,c,n) \r\n return log10((a * n) / ((a + c) * (a + b)))", "def Pdist2(x, y):\r\n x_norm = (x ** 2).sum(1).view(-1, 1)\r\n if y is not None:\r\n y_norm = (y ** 2).sum(1).view(1, -1)\r\n else:\r\n y = x\r\n y_norm = x_norm.view(1, -1)\r\n Pdist = x_norm + y_norm - 2.0 * torch.mm(x, torch.transpose(y, 0, 1))\r\n Pdist[Pdist<0]=0\r\n return Pdist", "def mutual_information_spatial(self,max_lag,percent_calc=.5,digitize=True):\n if digitize:\n M = utilities.mi_digitize(self.X)\n else:\n M = self.X\n\n rs, cs = np.shape(M)\n\n rs_iters = int(rs*percent_calc)\n cs_iters = int(cs*percent_calc)\n\n r_picks = np.random.choice(np.arange(rs),size=rs_iters,replace=False)\n c_picks = np.random.choice(np.arange(cs),size=cs_iters,replace=False)\n\n\n # The r_picks are used to calculate the MI in the columns\n # and the c_picks are used to calculate the MI in the rows\n\n c_mi = np.zeros((rs_iters,max_lag))\n r_mi = np.zeros((cs_iters,max_lag))\n\n for i in range(rs_iters):\n for j in range(max_lag):\n\n ind = j+1\n unshift = M[r_picks[i],ind:]\n shift = M[r_picks[i],:-ind]\n c_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n for i in range(cs_iters):\n for j in range(max_lag):\n\n ind=j+1\n unshift = M[ind:, c_picks[i]]\n shift = M[:-ind, c_picks[i]]\n r_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n r_mut = np.mean(r_mi,axis=0)\n c_mut = np.mean(c_mi,axis=0)\n\n return r_mut, c_mut, r_mi, c_mi", "def __mul__(self, other):\n\n\t\tassert set(self.keys()) == set(other.keys())\n\t\tdistribution, total = {}, 0.0\n\n\t\tfor key in self.keys():\n\t\t\tx, y = self.probability(key), other.probability(key)\n\t\t\tdistribution[key] = (x + eps) * (y + eps)\n\t\t\ttotal += distribution[key]\n\n\t\tfor key in self.keys():\n\t\t\tdistribution[key] /= total\n\n\t\t\tif distribution[key] <= eps / total:\n\t\t\t\tdistribution[key] = 0.0\n\t\t\telif distribution[key] >= 1 - eps / total:\n\t\t\t\tdistribution[key] = 1.0\n\n\t\treturn DiscreteDistribution(distribution)", "def Dist(p1,p2):\n x1, y1 = p1\n x2, y2 = p2\n return (((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2)))**0.5", "def get_information(ws_pub, ws_priv, ws_joint, x_pub, x_priv, x_joint, y, num_of_bins, num_of_bins_y,\n\t\t\t\t\tinterval_information_display, calc_parallel=True, py_hats=0):\n\tprint('Start calculating the information...')\n\tbins = np.linspace(-1, 1, num_of_bins)\n\ty = np.array(y).astype(np.float)\n\tpys1, unique_inverse_y,label = extract_probs_label(y,num_of_bins_y)\n\tp_y_given_x_pub, b1_pub, b_pub, unique_a_pub, unique_inverse_x_pub, pxs_pub = extract_probs(label, x_pub)\n\tp_y_given_x_priv, b1_priv, b_priv, unique_a_priv, unique_inverse_x_priv, pxs_priv = extract_probs(label, x_priv)\n\tp_y_given_x_joint, b1_joint, b_joint, unique_a_joint, unique_inverse_x_joint, pxs_joint = extract_probs(label, x_joint)\n\t# Shannon Entropy over label\n\tH2Label = -np.sum(pys1 * np.log2(pys1))\n\t# mutual Information between secret layer and label\n\tMI_pri_label = calc_information_for_inp_out(pxs_priv,pys1,label,unique_inverse_x_priv)\n\t# mutual Information between secret layer and label\n\tMI_pub_label = calc_information_for_inp_out(pxs_pub,pys1,label,unique_inverse_x_pub)\n\n\tif calc_parallel:\n\t\tprint('calculating the information for public layer...')\n\t\tparams_pub = np.array(Parallel(n_jobs=NUM_CORES)(delayed(calc_information_for_epoch)\n\t\t (i, interval_information_display, ws_pub[i], bins, unique_inverse_x_pub,\n\t\t\t\t\t\t\t\t\tunique_inverse_y, label, b_pub, b1_pub, len(unique_a_pub),\n\t\t \t\t\t\t\tpxs_pub, p_y_given_x_pub, pys1)\n\t\t for i in range(len(ws_pub))))\n\t\tprint('calculating the information for secret layer...')\n\t\tparams_priv = np.array(Parallel(n_jobs=NUM_CORES)(delayed(calc_information_for_epoch)\n\t\t (i, interval_information_display, ws_priv[i], bins, unique_inverse_x_priv,\n\t\t\t\t\t\t\t\t\tunique_inverse_y, label, b_priv, b1_priv, len(unique_a_priv),\n\t\t \t\t\t\t\tpxs_priv, p_y_given_x_priv, pys1)\n\t\t for i in range(len(ws_priv))))\n\t\tprint('calculating the information for joint layer...')\n\t\tparams_joint = np.array(Parallel(n_jobs=NUM_CORES)(delayed(calc_information_for_epoch)\n\t\t (i, interval_information_display, ws_joint[i], bins, unique_inverse_x_joint,\n\t\t\t\t\t\t\t\t\tunique_inverse_y, label, b_joint, b1_joint, len(unique_a_joint),\n\t\t \t\t\t\t\tpxs_joint, p_y_given_x_joint, pys1)\n\t\t for i in range(len(ws_joint))))\n\n\telse:\n\t\tparams_pub = np.array([calc_information_for_epoch\n\t\t\t\t\t\t\t\t(i, interval_information_display, ws_pub[i], bins, unique_inverse_x_pub,\n\t\t\t\t\t\t\t\tunique_inverse_y, label, b_pub, b1_pub, len(unique_a_pub),\n\t\t\t\t\t\t\t\tpxs_pub, p_y_given_x_pub, pys1)\n\t\t \t\tfor i in range(len(ws_pub))])\n\t\tparams_priv = np.array([calc_information_for_epoch\n\t\t (i, interval_information_display, ws_priv[i], bins, unique_inverse_x_priv,\n\t\t\t\t\t\t\t\t\tunique_inverse_y, label, b_priv, b1_priv, len(unique_a_priv),\n\t\t \t\t\t\t\tpxs_priv, p_y_given_x_priv, pys1)\n\t\t for i in range(len(ws_priv))])\n\t\tparams_joint = np.array([calc_information_for_epoch\n\t\t (i, interval_information_display, ws_joint[i], bins, unique_inverse_x_joint,\n\t\t\t\t\t\t\t\t\tunique_inverse_y, label, b_joint, b1_joint, len(unique_a_joint),\n\t\t \t\t\t\t\tpxs_joint, p_y_given_x_joint, pys1)\n\t\t for i in range(len(ws_joint))])\n\treturn params_pub, params_priv, params_joint, H2Label, MI_pri_label, MI_pub_label", "def calculateDist(kSet1, kSet2, x, y, group, iteration):\n\tkSet1Dist = 0\n\tkSet2Dist = 0\n\t\n\tfor j in range(len(x)):\n\t\tk1Dist = math.sqrt((x[j] - kSet1[0])**2 + (y[j] - kSet1[1])**2)\n\t\tk2Dist = math.sqrt((x[j] - kSet2[0])**2 + (y[j] - kSet2[1])**2)\n\n\t\tif(k1Dist < k2Dist):\n\t\t\tgroup[iteration].append(1)\n\t\t\tkSet1Dist += k1Dist\n\t\telse:\n\t\t\tgroup[iteration].append(2)\n\t\t\tkSet2Dist += k2Dist\n\n\treturn group", "def hammingDist(x, y):\n hd = 0\n for ch1, ch2 in zip(x, y):\n if ch1 != ch2:\n hd += 1\n return hd", "def _compute_x2_statistic(self, expected, actual):\n rng = expected.keys()\n if actual.keys() != rng:\n raise Exception(\"Ranges of two frequencies are not equal.\")\n num_observations = sum([actual[r] for r in rng])\n if abs(num_observations - sum([expected[r] for r in rng])) > _FLOAT_EQ_DELTA:\n raise Exception(\"Frequencies must sum to the same value.\")\n chi_squared_stat = sum([(actual[r] - expected[r])**2 / max(float(expected[r]), 1.0)\n for r in rng])\n p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, # Find the p-value\n df=len(rng))\n return chi_squared_stat, p_value", "def example_A():\n d = dit.example_dists.Xor()\n\n # Calculate marginal maximum entropy distributions up to order 3.\n maxent_dists = dit.algorithms.marginal_maxent_dists(d, 3)\n\n print_output(d, maxent_dists)\n\n return maxent_dists", "def nmi(y_pred, y_true, average_method='geometric'):\n return metrics.normalized_mutual_info_score(y_true, y_pred, average_method=average_method)", "def mcd(x, y):\n r = x % y\n if r == 0:\n return y\n else:\n return mcd(y, r)", "def _densityctr(self, rangex, rangey, dim = misc.DEF_VIS_DIM):\n gr = N.meshgrid(rangex, rangey)\n x = gr[0].flatten()\n y = gr[1].flatten()\n xdata = N.concatenate((x[:, N.newaxis], y[:, N.newaxis]), axis = 1)\n dmu = self.mu[:, dim]\n dva = self._get_va(dim)\n den = GM.fromvalues(self.w, dmu, dva).pdf(xdata, log = True)\n den = den.reshape(len(rangey), len(rangex))\n\n return gr[0], gr[1], den", "def _compute_prob_y_given_x(self, _x, _y):\n normalisation_constant = sum([\n math.exp(sum([self.weights[_feature] *\n self.feature_funcs[_feature, cls](_feature, cls)\n for _feature in _x]))\n for cls in self.classes])\n\n return math.exp(sum([\n self.weights[_feature] *\n self.feature_funcs[_feature, _y](_feature, _y)\n for _feature in _x])) / normalisation_constant", "def L2_dists(x, y):\n #print(x.shape)\n #print(y.shape)\n dists = -2 * np.matmul(x, y.T)\n dists += np.sum(x**2)[np.newaxis]\n dists += np.sum(y**2)\n return np.sqrt(dists)", "def cid(x, y):\n assert(len(x.shape) == 2 and x.shape == y.shape) # time series must have same length and dimensionality\n ce_x = np.sqrt(np.sum(np.square(np.diff(x, axis=0)), axis=0) + 1e-9)\n ce_y = np.sqrt(np.sum(np.square(np.diff(y, axis=0)), axis=0) + 1e-9)\n d = np.sqrt(np.sum(np.square(x - y), axis=0)) * np.divide(np.maximum(ce_x, ce_y), np.minimum(ce_x, ce_y))\n return np.sum(d)", "def occupation_distribution(data):", "def mutual_information_monte_carlo_extrapolate(self, ret_prob_activity=False):\n if self.is_correlated_mixture:\n raise NotImplementedError('Not implemented for correlated mixtures')\n \n base = 2 ** np.arange(0, self.Nr)\n prob_s = self.substrate_probabilities\n\n max_steps = self._sample_steps\n steps, MIs = [], []\n\n # sample mixtures according to the probabilities of finding\n # substrates\n count_a = np.zeros(2**self.Nr)\n step_check = 10000\n for step in range(max_steps):\n # choose a mixture vector according to substrate probabilities\n m = (np.random.random(self.Ns) < prob_s)\n \n # get the associated output ...\n a = np.dot(self.sens_mat, m).astype(np.bool)\n # ... and represent it as a single integer\n a = np.dot(base, a)\n # increment counter for this output\n count_a[a] += 1\n\n if step == step_check - 1:\n # do an extrapolation step\n # calculate the mutual information from the result pattern\n prob_a = count_a / step\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n \n # save the data \n steps.append(step)\n MIs.append(MI)\n \n # do the extrapolation\n if len(steps) >= 3:\n a2, a1, a0 = MIs[-3:]\n MI_ext = (a0*a2 - a1*a1)/(a0 - 2*a1 + a2)\n# MI_ext = self._get_extrapolated_mutual_information(steps, MIs)\n print((step, MIs[-1], MI_ext))\n \n step_check += 10000\n \n else:\n # count_a contains the number of times output pattern a was observed.\n # We can thus construct P_a(a) from count_a. \n \n # calculate the mutual information from the result pattern\n prob_a = count_a / step\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n\n if ret_prob_activity:\n return MI, prob_a\n else:\n return MI", "def get_trans_pmi(x2ys, x2cnt, y2cnt, Nxy, Nx, Ny, width, n_trans):\n x2ys_pmi = dict()\n pmi_diff = -np.log2(Nxy) + np.log2(Nx) + np.log2(Ny)\n for x, ys in tqdm(x2ys.items()):\n l_scores = []\n for y, cnt in sorted(ys.items(), key=operator.itemgetter(1),\n reverse=True)[:width]:\n pmi = np.log2(cnt) - np.log2(x2cnt[x]) - np.log2(y2cnt[y])\n pmi += pmi_diff\n l_scores.append((y, pmi))\n trans = sorted(l_scores, key=operator.itemgetter(1, 0), reverse=True)[:n_trans]\n trans = [each[0] for each in trans]\n x2ys_pmi[x] = trans\n\n return x2ys_pmi", "def dist(self, X, Y):\n raise NotImplementedError", "def conditional_MI(data=None, x=None, y=None, z=None):\n X = data[list(x)].astype(int)\n Y = data[list(y)].astype(int)\n t = list(z)\n Z = data[t].astype(int)\n Z = Z.values.tolist()\n Z = list(data[t].itertuples(index=False, name=None))\n Hxz = entropy(map(lambda x: \"%s/%s\" % x, zip(X, Z))) # Finding Joint entropy of X and Z\n Hyz = entropy(map(lambda x: \"%s/%s\" % x, zip(Y, Z))) # Finding Joint entropy of Y and Z\n Hz = entropy(Z) # Finding Entropy of Z\n Hxyz = entropy(map(lambda x: \"%s/%s/%s\" % x, zip(X, Y, Z))) # Finding Joint Entropy of X, Y and Z\n return Hxz + Hyz - Hxyz - Hz", "def mutual_information_from_table(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n\n marginals_p1 = np.nansum(P_nan, axis=1)\n marginals_p2 = np.nansum(P_nan, axis=0)\n\n return np.nansum(np.multiply(P_nan, np.log2(P_nan / (np.tensordot(marginals_p1, marginals_p2, axes=0)))))", "def information_gain(Y, attr):\n initial_gain = entropy(Y)\n\n temp_Y = Y.tolist()\n temp_attr = attr.tolist()\n\n temp_attr = list(np.unique(attr))\n\n for a in temp_attr:\n l = []\n count = 0\n for j in attr:\n if (j == a):\n l.append(temp_Y[count])\n count+=1\n initial_gain -= ((len(l) / len(temp_Y)) * entropy(pd.Series(l)))\n return initial_gain", "def calc_euclidian_dists(x, y):\n n = x.shape[0]\n m = y.shape[0]\n x = tf.tile(tf.expand_dims(x, 1), [1, m, 1])\n y = tf.tile(tf.expand_dims(y, 0), [n, 1, 1])\n return tf.reduce_mean(tf.math.pow(x - y, 2), 2)", "def informationGain2(data, attribute):\n \n split_data = splitBy(data, attribute) \n weighted_entropies = 0\n \n for set in split_data:\n weighted_entropies += len(set) / len(data) * entropy2(set) \n \n columnIG = entropy2(data) - weighted_entropies\n \n return columnIG", "def get_mi(x, y, k=1, normalize=None, norm=np.inf, estimator='ksg'):\n\n if normalize:\n x = normalize(x)\n y = normalize(y)\n\n # construct state array for the joint process:\n xy = np.c_[x,y]\n\n if estimator == 'naive':\n # compute individual entropies\n hx = get_h(x, k=k, norm=norm)\n hy = get_h(y, k=k, norm=norm)\n hxy = get_h(xy, k=k, norm=norm)\n\n # compute mi\n mi = hx + hy - hxy\n\n elif estimator == 'ksg':\n\n # store data pts in kd-trees for efficient nearest neighbour computations\n # TODO: choose a better leaf size\n x_tree = cKDTree(x)\n y_tree = cKDTree(y)\n xy_tree = cKDTree(xy)\n\n # kth nearest neighbour distances for every state\n # query with k=k+1 to return the nearest neighbour, not counting the data point itself\n # dist, idx = xy_tree.query(xy, k=k+1, p=norm)\n dist, idx = xy_tree.query(xy, k=k+1, p=np.inf)\n epsilon = dist[:, -1]\n\n # for each point, count the number of neighbours\n # whose distance in the x-subspace is strictly < epsilon\n # repeat for the y subspace\n n = len(x)\n nx = np.empty(n, dtype=np.int)\n ny = np.empty(n, dtype=np.int)\n for ii in range(n):\n # nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n # ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n\n mi = digamma(k) - np.mean(digamma(nx+1) + digamma(ny+1)) + digamma(n) # version (1)\n # mi = digamma(k) -1./k -np.mean(digamma(nx) + digamma(ny)) + digamma(n) # version (2)\n\n elif estimator == 'lnc':\n # TODO: (only if you can find some decent explanation on how to set alpha!)\n raise NotImplementedError(\"Estimator is one of 'naive', 'ksg'; currently: {}\".format(estimator))\n\n else:\n raise NotImplementedError(\"Estimator is one of 'naive', 'ksg'; currently: {}\".format(estimator))\n\n return mi", "def _basic_data_info(X, y):\n num_samples, num_feats = X.shape # start with X properties\n\n # Compute distribution\n classes, counts, percs = _class_distribution(y)\n num_classes = classes.size\n\n # Return data info dictionary\n output_dic = {\n \"Num_samples\": num_samples,\n \"Num_feats\": num_feats,\n \"Num_classes\": num_classes,\n \"classes\": classes,\n \"counts\": counts,\n \"percs\": percs\n }\n\n return output_dic", "def cofactors(self,x,y):\r\n return self.factorset(x) & self.factorset(y)", "def mutual_information(pred, true):\n \n #for now , only for univariate forecasting. So reshapes entire batch of K timesteps into vector as if single feature\n MI = mutual_info_regression(true.detach().numpy().flatten().reshape(-1,1), pred.detach().numpy().flatten())[0]\n return torch.tensor(MI)", "def marginalize(self, axis):\n \n dist = {}\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE\n #\n \n # get relevant data based on the given random variable\n for i in self._table:\n if axis == 0:\n if i[0] in dist:\n dist[i[0]] += self._table[i]\n else:\n dist[i[0]] = self._table[i]\n else:\n if i[1] in dist:\n dist[i[1]] += self._table[i]\n else:\n dist[i[1]] = self._table[i]\n\n\n #\n # END OF YOUR CODE\n # ------------------------------------------------------------------------- \n\n return dist", "def mmd2(K_xx, K_yy, K_xy, w_x=None, w_y=None):\n n, m = K_xy.shape\n\n assert n == K_xx.shape[0], \"Shapes must conform between K_xx and K_xy, K_xx.shape == {}, K_xy.shape == {}\".format(\n K_xx.shape, K_xy.shape)\n assert m == K_yy.shape[0], \"Shapes must conform between K_yy and K_xy, K_yy.shape == {}, K_xy.shape == {}\".format(\n K_yy.shape, K_xy.shape)\n\n if isinstance(w_x, np.ndarray) and isinstance(w_y, np.ndarray):\n assert np.isclose(w_x.sum(), 1) and np.isclose(\n w_y.sum(), 1), \"w_x and w_y must sum to 1\"\n assert w_x.shape == (n, 1) and w_y.shape == (\n m, 1), \"w_x and w_y must conform to K_xx and K_yy, have w_x.shape == {}, w_y.shape == {} and K_xx.shape == {}, K_yy == {}\".format(w_x.shape, w_y.shape, K_xx.shape, K_yy.shape)\n assert (w_x >= 0).all(), \"All entries of w_x should be greater than zero\"\n assert (w_y >= 0).all(), \"All entries of w_y should be greater than zero\"\n mmd2 = w_x.T @ K_xx @ w_x - 2 * w_x.T @ K_xy @ w_y + w_y.T @ K_yy @ w_y\n else:\n mmd2 = (K_xx.sum() / (n**2)) + (K_yy.sum() / (m**2)) - \\\n 2 * (K_xy.sum() / (m*n))\n\n # Had problem with negative values on order of machine epsilon\n mmd2 += 2 * np.finfo(float).eps\n assert mmd2 > 0.0, \"mmd2 should be non-negative, is {}\".format(mmd2)\n\n return mmd2", "def prob(self, x, y):\n p = self.tag_prob(y)\n for i in range(len(y)):\n p *= self.out_prob(x[i], y[i])\n\n return p", "def marginal_2D(self, wrt_x, wrt_y, figure_name=None):\n res = self.marginalize_wrt_x_y(wrt_x, wrt_y)\n tagx= np.array([tup[0] for tup in res], dtype=np.int16)\n tagy= np.array([tup[1] for tup in res], dtype=np.int16)\n z = np.array([tup[2] for tup in res], dtype=np.float32)\n\n if wrt_x == 'logD':\n x = tagx[:]\n else:\n x = self.convert_tags_to_features(tagx, wrt_x)\n if wrt_y == 'logD':\n y = tagy[:]\n else:\n y = self.convert_tags_to_features(tagy, wrt_y)\n\n nx = ny = 101\n xi = np.linspace(min(x), max(x), nx)\n yi = np.linspace(min(y), max(y), ny)\n xi_ = xi[None,:]\n yi_ = yi[:,None]\n # zi0 = mlab.griddata(x, y, z, xi, yi, interp='linear')\n zi = interpolate.griddata((x, y), z, (xi_, yi_), method='cubic')\n zi[np.isnan(zi)] = 0.0\n max_zi = np.max(zi)\n zi_low = max_zi / 100.0\n ind_zero = zi <= zi_low\n zi[zi <= ind_zero] = 0\n\n if figure_name is None: return (x, y, xi, yi, zi)\n\n # Continue making the figure\n fig, ax = plt.subplots(1, figsize=(4,4), dpi=100, tight_layout=True)\n\n lev = marg_contour_levels\n # c = ax.contour(xi, yi, zi, lev, linestyle='dotted', linewidth=0.5, color='k')\n cf = ax.contourf(xi, yi, zi, lev, zorder=1, cmap=plt.get_cmap('Greys'),\n norm=plt.Normalize(vmin=0, vmax=abs(zi).max())) \n ax.scatter(x, y, marker=',', facecolor='grey', edgecolor='grey', s=1, zorder=2)\n cb = fig.colorbar(cf, ax=ax, shrink=1.00)\n\n ax.set_xlabel(utils.feature_name_in_latex(wrt_x))\n ax.set_ylabel(utils.feature_name_in_latex(wrt_y))\n\n if wrt_x == 'logD': \n ax.xaxis.set_ticks(range(5))\n ax.set_xticklabels(logD_ticks, rotation=45, fontsize='small')\n\n if wrt_y == 'logD': \n ax.yaxis.set_ticks(range(5))\n ax.set_yticklabels(logD_ticks, rotation=45, fontsize='small')\n\n if figure_name is not None:\n plt.savefig(figure_name)\n logger.info('marginal_2D: saved {0}'.format(figure_name))\n plt.close()\n\n return (x, y, xi, yi, zi)", "def get_pmi_mvn(x, y, z):\n\n d = x.shape[1]\n hz = 0.5 * log((2 * np.pi * np.e)**d * det(np.cov(z.T)))\n hxz = 0.5 * log((2 * np.pi * np.e)**(2*d) * det(np.cov(x.T, y=z.T)))\n hyz = 0.5 * log((2 * np.pi * np.e)**(2*d) * det(np.cov(y.T, y=z.T)))\n hxyz = 0.5 * log((2 * np.pi * np.e)**(3*d) * det(np.cov(np.c_[x,y,z].T)))\n\n pmi = hxz + hyz - hxyz - hz\n return pmi", "def _mutual_info(self, object_focus, premise_focus, object_cp):\n\n # match up change points with known object's\n assert object_focus.shape[0] == premise_focus.shape[0]\n n_focus = object_focus.shape[0]\n object_cp_mask = util.list_to_mask(object_cp, n_focus)\n\n # filter out those timestamp when object's far from premise\n prox_mask = self._get_prox_mask(object_focus, premise_focus)\n match_mask = object_cp_mask & prox_mask\n diffs_mask = object_cp_mask ^ prox_mask\n if self.verbose:\n logger.info('prox filter valid= %d: match= %3d, diff= %3d'%(\n np.sum(prox_mask), np.sum(match_mask), np.sum(diffs_mask)))\n\n # probability of proximal conditioned on changepoint\n frame_shape = self.frame_source.get_shape()[-2:]\n frame_shape = (frame_shape[0] // 8, frame_shape[1] // 8)\n is_object_cp = set(object_cp)\n tp, fp, fn, tn = 1e-10, 1e-10, 1e-10, 1e-10\n cp_cnt = np.zeros(frame_shape)\n prox_cnt = np.zeros(frame_shape)\n cp_prox_cnt = np.zeros(frame_shape)\n total_cnt = np.zeros(frame_shape)\n for i, obj_fx in enumerate(object_focus):\n obj_x = (obj_fx * frame_shape).astype(int)\n total_cnt[obj_x[0], obj_x[1]] += 1\n if prox_mask[i]:\n prox_cnt[obj_x[0], obj_x[1]] += 1\n if i in is_object_cp:\n tp += 1\n else:\n fp += 1\n else:\n if i in is_object_cp:\n fn += 1\n else:\n tn += 1\n for cpi in object_cp:\n obj_x = (object_focus[cpi] * frame_shape).astype(int)\n premise_x = (premise_focus[cpi] * frame_shape).astype(int)\n cp_cnt[obj_x[0], obj_x[1]] += 1\n if prox_mask[cpi]:\n cp_prox_cnt[obj_x[0], obj_x[1]] += 1\n none_cnt = total_cnt - cp_cnt - prox_cnt + cp_prox_cnt\n cnt_valid = np.logical_and(cp_prox_cnt > 0.0, \n prox_cnt - cp_prox_cnt > 0.0)\n cndcp_score = np.mean(2 * cp_prox_cnt[cnt_valid]\n / (cp_cnt[cnt_valid] + prox_cnt[cnt_valid])) \\\n if np.sum(cnt_valid) > 0.0 else 0.0\n # util.confuse_metrics(cp_prox_cnt, prox_cnt-cp_prox_cnt, \n # cp_cnt-cp_prox_cnt, none_cnt)\n \n # print('tp', int(tp), 'fp', int(fp), 'fn', int(fn), 'tn', int(tn))\n # util.confuse_metrics(tp, fp, fn, tn)\n\n return np.sum(match_mask)/n_focus, \\\n np.sum(diffs_mask)/n_focus, \\\n np.sum(prox_mask)/n_focus, \\\n cndcp_score", "def _mutual_info(self, focus, object_cp, premise_cp):\n\n # match up change points with known object's\n n_focus = focus.shape[0]\n match_mask, diffs_mask = util.match_diffs(object_cp, premise_cp, n_focus)\n\n return np.sum(match_mask)/n_focus, np.sum(diffs_mask)/n_focus, n_focus", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)", "def cross_entropy(x, y, bins, xy_probabilities=False):\n # calculate probabilities if probabilities == False\n if xy_probabilities:\n # same bins for x and y -> same length of x and y if xy_probabilities == True\n assert len(x) == len(y)\n\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x),1,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n # if y does not sum up to 1, raise an error\n if not np.isclose(sum(y),1,atol=0.0001):\n raise ValueError('Probabilities in vector y do not sum up to 1.')\n\n # add a small number to all probabilities if zero occurs\n if x.any(0):\n px = x + 1e-15\n py = y + 1e-15\n else:\n px = x\n py = y\n else:\n # get the bins, joint bins for x and y (same_bins=True)\n bins = get_2D_bins(x, y, bins, same_bins=True)\n\n # calculate unconditioned histograms\n hist_x = np.histogram(x, bins=bins[0])[0]\n hist_y = np.histogram(y, bins=bins[1])[0]\n\n px = (hist_x / np.sum(hist_x)) + 1e-15\n py = (hist_y / np.sum(hist_y)) + 1e-15\n\n return - px.dot(np.log2(py))", "def obtain_consistent_marginals(self, priv_marginal_config, priv_split_method) -> Marginals:\n\n # generate_all_pub_marginals() generates all the one and two way marginals of the public set which is implemented in DataLoader.py\n if self.data.pub_ref:\n pub_marginals = self.data.generate_all_pub_marginals()\n \n # get_noisy_marginals() is in synthesizer.py\n # which first calls generate_..._by_config(), and computes on priv_data to return marginal_sets, epss\n # (note that 'marginal_key' could be 'priv_all_one_way' or 'priv_all_two_way')\n # later it calls anonymize() which add noises to marginals\n # (what decides noises is 'priv_split_method') \n # priv_split_method[set_key]='lap' or....\n # Step 1: generate noisy marginals\n noisy_marginals = self.get_noisy_marginals(priv_marginal_config, priv_split_method)\n\n # since calculated on noisy marginals\n # we use mean function to estimate the number of synthesized records\n num_synthesize_records = np.mean([np.sum(x.values) for _, x in noisy_marginals.items()]).round().astype(np.int)\n print(\"------------------------> now we get the estimate of records' num by averaging from nosiy marginals:\", num_synthesize_records)\n \n \n \n # the list of all attributes' name(str) except the identifier attribute\n self.attr_list = self.data.obtain_attrs()\n # domain_list is an array recording the count of each attribute's candidate values\n self.domain_list = np.array([len(self.data.encode_schema[att]) for att in self.attr_list])\n \n # map the attribute str to its index in attr_list, for possible use\n # use enumerate to return Tuple(index, element) \n self.attr_index_map = {att: att_i for att_i, att in enumerate(self.attr_list)}\n\n\n # views are wrappers of marginals with additional functions for consistency\n # if there exist public dataset to refer to\n if self.data.pub_ref:\n pub_onehot_view_dict, pub_attr_view_dict = self.construct_views(pub_marginals)\n # Step 2: create some data structures\n noisy_onehot_view_dict, noisy_attr_view_dict = self.construct_views(noisy_marginals)\n \n # all_views is one-hot to view dict, views_dict is attribute to view dict\n # they have different format to satisfy the needs of consistenter and synthesiser\n # to fit in code when we do not have public things to utilize \n if not self.data.pub_ref:\n pub_onehot_view_dict = noisy_onehot_view_dict\n pub_attr_view_dict = noisy_attr_view_dict\n\n self.onehot_view_dict, self.attrs_view_dict = self.normalize_views(\n pub_onehot_view_dict,\n pub_attr_view_dict,\n noisy_attr_view_dict,\n self.attr_index_map,\n num_synthesize_records)\n\n # consist the noisy marginals to submit to some rules\n consistenter = Consistenter(self.onehot_view_dict, self.domain_list)\n consistenter.consist_views()\n\n # consistenter uses unnormalized counts;\n # after consistency, synthesizer uses normalized counts\n for _, view in self.onehot_view_dict.items():\n view.count /= sum(view.count)\n\n return noisy_marginals, num_synthesize_records", "def make_frequency_table(x, y, X, Y):\n freq = dict()\n\n for i in range(len(X)):\n freq[X[i]] = [0, 0]\n\n # merging the two to get a matrix\n\n M = np.array([[x[i], y[i]] for i in range(len(x))])\n\n for i in range(len(M)):\n if M[i][1] == Y[0]:\n freq[M[i][0]][0] += 1\n else:\n freq[M[i][0]][1] += 1\n\n return freq", "def _update_info_and_n(self, y_i, h_tilde, phi_p, msr_cov):\n h_i = np.matmul(h_tilde, phi_p)\n # update fisher_info\n L = np.matmul(np.transpose(h_i), np.matmul(msr_cov, h_i)) # placeholder matrix for computations\n self.fisher_info.append(np.add(self.fisher_info[-1], L))\n # update N\n M = np.matmul(np.transpose(h_i), np.matmul(msr_cov, np.transpose(y_i))) #placeholder matrix for computations\n self.N.append(np.add(self.N[-1], M))", "def _impurity(y, y1, y2, sample_weights=None):\n # YOUR CODE HERE\n # begin answer\n weight_1=len(y1)\n weight_2=len(y2)\n meal_1=np.sum(y1)/float(weight_1)\n meal_2=np.sum(y2)/float(weight_2)\n diff=meal_1-meal_2\n sum_var=weight_1*weight_2*diff*diff/(weight_1+weight_2)\n # end answer\n return sum_var", "def get_mutual_information(filename):\n categories = {} #{category: speakers of this category}\n features = {} #{feat: speakers who use this feature}\n pos_categories_features = {} #{category: {feat: speakers of category who use this feat}}\n neg_categories_features = {} #{category: {feat: speakers of category who do not use this feat}}\n users = set() #set of all users in data\n \n for line in open(filename):\n userid, c, date, statusid, rawtweet, toktweet, tagtweet = line.split('\\t')\n users.add(userid)\n \n if c not in categories:\n categories[c] = set()\n pos_categories_features[c] = {}\n categories[c].add(userid)\n \n feats = set(toktweet.lower().split()) #lowercase tweet and split into words\n\n for feat in feats:\n if feat not in pos_categories_features[c]:\n pos_categories_features[c][feat] = set()\n pos_categories_features[c][feat].add(userid)\n \n if feat not in features:\n features[feat] = set()\n features[feat].add(userid)\n\n print \"Parsed data\"\n\n numfeats = len(features) #num of features\n print numfeats, \"features\"\n numusers = len(users) #num of users \n print numusers, \"users\"\n\n #keep sizes of sets, not sets themselves\n for feat in features:\n features[feat] = len(features[feat])\n for c in categories:\n categories[c] = len(categories[c])\n for c in pos_categories_features:\n for feat in features:\n if feat in pos_categories_features[c]:\n pos_categories_features[c][feat] = len(pos_categories_features[c][feat])\n else:\n pos_categories_features[c][feat] = 0\n\n for c in categories:\n print c, categories[c], \"users\"\n\n print \"Computed counts\"\n \n mi = {}\n for feat in features:\n mi[feat] = 0.0\n for c in categories:\n #print c, feat, features[feat], pos_categories_features[c][feat]\n \n catprob = categories[c]/numusers\n\n #prob of speakers of category c using feat\n featprob = features[feat]/numusers\n jointprob = pos_categories_features[c][feat]/numusers\n if jointprob > 0 and featprob > 0:\n mi[feat] += jointprob * log2(jointprob/(catprob * featprob))\n \n #prob of speakers of category c NOT using feat\n featprob = 1 - featprob\n jointprob = (categories[c] - pos_categories_features[c][feat])/numusers\n if jointprob > 0 and featprob > 0:\n mi[feat] += jointprob * log2(jointprob/(catprob * featprob))\n\n print \"Computed mutual information\"\n\n feature_scores = sorted(mi.items(), key=lambda x:x[1], reverse=True)\n refcat = categories.keys()[0] #pick one of the categories\n print 'Feature\\tMI\\tP({0}|Feature)\\tNum. users'.format(refcat)\n for feat, score in feature_scores[:200]:\n prob = pos_categories_features[refcat][feat]/features[feat]\n print '{0}\\t{1:.3f}\\t{2:.3f}\\t{3}'.format(feat, score, prob, features[feat])", "def C(self, y, x):\n return self.minor(y,x).det()*(-1.0)**(y+x+2.0)", "def mmi_norm(self, x, y, tuples):\r\n P_ = {x: self.P(x, tuples), y: self.P(y, tuples)}\r\n P_xy = self.condP(x, y, tuples)\r\n return - P_[x] * log2(P_[x]) - P_[y] * (-P_xy * log2(P_xy))", "def information_reduction(\n X: np.ndarray, Y: np.ndarray, uni_entropy: Callable, tol_dims: int, p: float = 0.25,\n) -> float:\n # calculate the marginal entropy\n hx = jax.vmap(uni_entropy)(X.T)\n hy = jax.vmap(uni_entropy)(Y.T)\n\n # Information content\n delta_info = np.sum(hy) - np.sum(hx)\n tol_info = np.sqrt(np.sum((hy - hx) ** 2))\n\n # get tolerance\n n_dimensions = X.shape[1]\n\n # conditional\n cond = np.logical_or(\n tol_info < np.sqrt(n_dimensions * p * tol_dims ** 2), delta_info < 0\n )\n return np.array(np.where(cond, 0.0, delta_info))" ]
[ "0.8168408", "0.78968424", "0.74779177", "0.73788935", "0.72612417", "0.71773905", "0.7003771", "0.66112494", "0.63962644", "0.6395062", "0.6346539", "0.6204402", "0.61775285", "0.61775285", "0.6012243", "0.59877944", "0.58945143", "0.588451", "0.5869148", "0.5841374", "0.5809743", "0.5729805", "0.5727414", "0.56662536", "0.5636081", "0.56304675", "0.5628946", "0.5613597", "0.5563033", "0.55395275", "0.5447546", "0.5433021", "0.543216", "0.54252166", "0.5414748", "0.54035026", "0.53933233", "0.5380384", "0.5380088", "0.53800124", "0.5378057", "0.53614", "0.53254634", "0.527559", "0.52741164", "0.52741164", "0.5272789", "0.527138", "0.5267276", "0.5230705", "0.5224003", "0.522376", "0.5207498", "0.51915324", "0.5176436", "0.5167963", "0.51548064", "0.5146139", "0.5128395", "0.51241577", "0.5120819", "0.5117544", "0.51168674", "0.5107121", "0.51004153", "0.5099705", "0.5099515", "0.50876856", "0.5086105", "0.50697327", "0.5061687", "0.5059447", "0.505844", "0.5046841", "0.50450414", "0.5036962", "0.50272846", "0.50199515", "0.501416", "0.50084525", "0.49955836", "0.49914473", "0.4981081", "0.4976352", "0.4972747", "0.49637088", "0.49626595", "0.49565917", "0.49551675", "0.49524516", "0.49500343", "0.49450034", "0.4944269", "0.494324", "0.49373305", "0.4935795", "0.49297708", "0.49266875", "0.49172008", "0.49082807" ]
0.7191145
5
Cross Entropy Calculates the cross entropy of two discrete distributions x and y.
def cross_entropy(x, y, bins, xy_probabilities=False): # calculate probabilities if probabilities == False if xy_probabilities: # same bins for x and y -> same length of x and y if xy_probabilities == True assert len(x) == len(y) # if x does not sum up to 1, raise an error if not np.isclose(sum(x),1,atol=0.0001): raise ValueError('Probabilities in vector x do not sum up to 1.') # if y does not sum up to 1, raise an error if not np.isclose(sum(y),1,atol=0.0001): raise ValueError('Probabilities in vector y do not sum up to 1.') # add a small number to all probabilities if zero occurs if x.any(0): px = x + 1e-15 py = y + 1e-15 else: px = x py = y else: # get the bins, joint bins for x and y (same_bins=True) bins = get_2D_bins(x, y, bins, same_bins=True) # calculate unconditioned histograms hist_x = np.histogram(x, bins=bins[0])[0] hist_y = np.histogram(y, bins=bins[1])[0] px = (hist_x / np.sum(hist_x)) + 1e-15 py = (hist_y / np.sum(hist_y)) + 1e-15 return - px.dot(np.log2(py))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cross_entropy(x, y):\n\n if len(y.shape) == 1:\n return F.cross_entropy(x, y)\n if y.shape[1] == 1:\n y = y.squeeze(1)\n return F.cross_entropy(x, y)\n\n return torch.mean(\n torch.div(\n F.binary_cross_entropy_with_logits(x, y, reduction=\"none\"),\n torch.sum(y, dim=1),\n )\n )", "def cEntropy(Y, X):\n return jEntropy(Y, X) - entropy(X)", "def cross_entropy(X, y):\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (\n 1 - y\n ) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)", "def cross_entropy_error(self, x, y):\n return -1 * sum([y[i] * np.log(self.logistic_function(self.weights.dot(x[i]))) + (1-y[i]) * np.log(1-self.logistic_function(self.weights.dot(x[i]))) for i in range(len(y))])", "def crossEntropy(p_m1):\n p_m2 = 1 - p_m1\n D = - p_m1*math.log(p_m1) - p_m2*math.log(p_m2)\n return D", "def cross_entropy(y_prob,y):\n from numpy import log, sum\n m = y.shape[0]\n p = y_prob\n log_likelihood = -log(p[range(m),y])\n loss = sum(log_likelihood) / m\n return loss", "def cross_entropy(y, y_hat):\n return -tf.math.log(\n tf.gather_nd(y_hat, tf.reshape(y, (-1, 1)), batch_dims=1)\n )", "def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')", "def cross_entropy(self, yhat):\n n = len(self._y)\n c = 0.0\n for i in range(0, n):\n c += self._y[i] * log(\n yhat[i]) + (1 - self._y[i]) * log(1 - yhat[i])\n\n return c", "def alt_cohen_d(x_arr, y_arr):\n delta = np.mean(x_arr) - np.mean(y_arr)\n pooled_std = np.sqrt((np.std(x_arr, ddof=1) ** 2 +\n np.std(y_arr, ddof=1) ** 2) / 2.0)\n return delta / pooled_std", "def cross_entropy(y_observed, p):\n\n pass", "def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')", "def cohen_d(x_arr, y_arr):\n delta = np.mean(x_arr) - np.mean(y_arr)\n pooled_std = np.sqrt(\n (\n (len(x_arr) - 1) * np.std(x_arr, ddof=1) ** 2 +\n (len(y_arr) - 1) * np.std(y_arr, ddof=1) ** 2\n ) / (len(x_arr) + len(y_arr))\n )\n return delta / pooled_std", "def cohens_d(x, y):\n nx, ny = len(x), len(y)\n pooled_variance = ((nx - 1) * np.std(x, ddof=1) ** 2 +\n (ny - 1) * np.std(y, ddof=1) ** 2) / \\\n ((nx - 1) + (ny - 1))\n return (np.mean(x) - np.mean(y)) / np.sqrt(pooled_variance)", "def transfer_entropy(X, Y):\n coords = Counter(zip(Y[1:], X[:-1], Y[:-1]))\n\n p_dist = np.zeros((config.NUM_STATES, config.NUM_STATES, config.NUM_STATES))\n for y_f, x_p, y_p in coords.keys():\n p_dist[y_p, y_f, x_p] = coords[(y_f, x_p, y_p)] / (len(X) - 1)\n\n p_yp = p_dist.sum(axis=2).sum(axis=1)\n p_joint_cond_yp = p_dist / p_yp[:, None, None]\n p_yf_cond_yp = p_dist.sum(axis=2) / p_yp[:, None]\n p_xp_cond_yp = p_dist.sum(axis=1) / p_yp[:, None]\n\n denominator = np.multiply(p_yf_cond_yp, p_xp_cond_yp)\n denominator[denominator == 0] = np.nan\n\n division = np.divide(p_joint_cond_yp, denominator[:, :, None])\n division[division == 0] = np.nan\n\n log = np.log2(division)\n\n return np.nansum(np.multiply(p_dist, log))", "def cross_entropy(t,y):\r\n #print(-1*t*np.log(y))\r\n #print(np.shape(np.log(y)))\r\n #print(np.shape(t))\r\n return t*np.log(y)*(-1)", "def cross_entropy(y_pred,y):\n \n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n return sum(-y*np.log(y_pred+epsilon))", "def crossEntropy(obs, actual, offset=1e-7):\n # (tf.Tensor, tf.Tensor, float) -> tf.Tensor\n # bound by clipping to avoid nan\n obs_ = tf.clip_by_value(obs, offset, 1 - offset)\n return -tf.reduce_sum(actual * tf.log(obs_) +\n (1 - actual) * tf.log(1 - obs_), 1)", "def conditional_entropy(f1, f2):\n\n ce = ee.entropyd(f1) - ee.midd(f1, f2)\n return ce", "def compute_cross_entropy(probs, target):\n avg_probs_per_sample = probs.mean(\n -1)\n xe = torch.nn.CrossEntropyLoss(reduction='none')\n return xe(avg_probs_per_sample, target).detach().cpu().numpy()", "def cross_entropy(self):\n return self._cross_entropy_func", "def J(W1, b1, W2, b2, x, y):\n yhat = forwardPropagate(W1, b1, W2, b2, x) # OLD: yhat = softmax(x.dot(w))\n return crossEntropy(y, yhat)", "def _entropy(self, y):\n # Get size\n n = y.shape[0]\n summation = 0\n\n # Summatory\n for c_i in np.unique(y):\n prob = sum(y == c_i) / float(n)\n summation += prob * np.log2(prob)\n\n return -summation", "def calculate_entropy(y):\n\tlog2 = lambda x: math.log(x) / math.log(2)\n\tunique_labels = np.unique(y)\n\tentropy = 0\n\tfor label in unique_labels:\n\t\tcount = len(y[y == label])\n\t\tp = count / len(y)\n\t\tentropy += -p * log2(p)\n\treturn entropy", "def cross_entropy(predictions, targets):\n likelihood = targets * np.log(predictions)\n return -np.sum(likelihood) / predictions.shape[0]", "def cross_entropy(X, y, using_onehot=True):\n\tM = y.shape[0]\n\tif using_onehot :\n\t\tlog_likelihood = -np.log(np.max(X * y, -1))\n\telse:\n\t\tlog_likelihood = -np.log(X[range(M), y]) # 找到y对应的那个类别所对应的logit\n\tloss = np.sum(log_likelihood) / M\n\treturn loss", "def entropyCategorical(attr, X, y):\n uniques = X[attr].unique().tolist()\n idxLists = []\n entropies = []\n weights = []\n for u in uniques:\n idxLists.append(X.index[X[attr] == u].tolist())\n entropies.append(entropy(y, idxLists[-1]))\n weights.append(len(idxLists[-1]))\n\n entropies = np.array(entropies).reshape(1, -1)\n weights = np.array(weights).reshape(-1, 1).astype(np.float32)\n weights /= np.sum(weights)\n\n return (uniques, idxLists, (entropies @ weights)[0, 0])", "def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent", "def crossEntropyPredict(YPredict):\n YPredict = np.atleast_2d(YPredict)\n return np.argmax(YPredict, axis=1)", "def cross_entropy_loss(self, logits, labels):\n return F.cross_entropy(logits, labels)", "def dist_calc(self, x, y):\n p_xy = self.d2_bin(x, y)\n p_x = np.sum(p_xy, axis=1)\n p_y = np.sum(p_xy, axis=0)\n\n p_x_times_p_y = np.tensordot(p_x, p_y, axes = 0)\n info = np.sum(p_xy * np.ma.log(np.ma.divide(p_xy, p_x_times_p_y)))\n entropy = np.sum(-1 * p_xy * np.ma.log(p_xy))\n\n output = max(0.0, (1 - (info / entropy)))\n return output", "def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))", "def joint_entropy(x, y, bins):\n # assert array length\n assert len(x) == len(y)\n\n # get the bins, x and y get their own bins in case of joint entropy\n bins = get_2D_bins(x, y, bins)\n\n # get the joint histogram\n joint_hist = np.histogram2d(x, y, bins)[0]\n\n # calculate the joint probability and add a small number\n joint_p = (joint_hist / np.sum(joint_hist)) + 1e-15\n\n # calculate and return the joint entropy\n return - np.sum(joint_p * np.log2(joint_p))", "def crossEntropyLoss(YPredict, YTrueOneHot):\n if YPredict.shape != YTrueOneHot.shape:\n YTrueOneHot = YTrueOneHot.reshape(YPredict.shape)\n return -np.sum(np.multiply(np.log(YPredict), YTrueOneHot))", "def entropy(y):\n return -1 * sum(\n [\n pipe(np.sum(y == value) / len(y), lambda ratio: ratio * np.log(ratio))\n for value in set(y)\n ]\n )", "def entropy(self, y):\n n = y.size\n if n <= 1:\n return 0\n\n labels, counts = unique(y, return_counts=True)\n\n if counts.size <= 1:\n return 0\n\n probs = counts / n\n entropy = -sum([p * log(p, 2) for p in probs])\n return entropy", "def compute_dists(x, y):\r\n \r\n return (x - y.permute(0, 2, 1)) ** 2", "def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en", "def logistic_loss(x, y):\n N = x.shape[0]\n x = np.squeeze(x)\n y_prime = (y + 1)/2\n h = 1 /(1 + np.exp(-x))\n loss = np.sum(-np.log( (h**y_prime) * ((1-h)**(1-y_prime)) ))/N\n dx = np.exp(-y*x)*(-y)/(1+np.exp(-y*x))/N\n return loss, dx", "def entropy(y):\n p = _proba(y)\n return (-p * np.log2(p)).sum()", "def cross_entropy(self, sents):\n return (-1) * self.log_prob(sents) / len(self._count)", "def cross_entropy(input: Tensor, target: Tensor) -> Tensor:\n norm_log = log_softmax(input, 1)\n\n np_one_hot = np.eye(input.shape[1])[target.data]\n tensor_one_hot = tensor(np_one_hot, 'one-hot', False, True)\n\n mask = -norm_log * tensor_one_hot\n mask_sum = sum(mask, 1)\n loss = sum(mask_sum, 0)\n\n return loss / input.shape[0]", "def cross_entropy_loss(self, y, y_hat):\n if y.ndim == 1:\n batch_size = 1\n else:\n batch_size = y.shape[0]\n delta = 1e-7\n return -np.sum(y * np.log(y_hat + delta)) / batch_size", "def cross_entropy_loss():\n return nn.CrossEntropyLoss()", "def NCC_loss(x, y):\n len_x = torch.sqrt(torch.sum(x ** 2))\n len_y = torch.sqrt(torch.sum(y ** 2))\n return torch.sqrt(torch.sum((x / len_x - y / len_y) ** 2))", "def entropy(y):\n total = y.size\n value_counts = np.bincount(y).astype(\"float\")\n proportions = value_counts / y.size\n\n return sum(-i * np.log(i) for i in proportions if i)", "def categorical_crossentropy(self, y_hat, y):\n y_hat[y_hat == 0] = 10 ** -10\n return -np.sum(y * np.log(y_hat))", "def cross_entropy_four_hot(x: float, y: float, width: int, height: int) -> torch.Tensor:\n t = torch.zeros(height, width)\n\n from_left = (x + 1.0) / 2.0 * (width - 1)\n from_top = (y + 1.0) / 2.0 * (height - 1)\n\n width_idx = math.floor(from_left)\n height_idx = math.floor(from_top)\n\n left_frac = from_left - width_idx\n top_frac = from_top - height_idx\n\n t[height_idx + 0, width_idx + 0] = (1 - top_frac) * (1 - left_frac)\n\n if width_idx + 1 < width:\n t[height_idx + 0, width_idx + 1] = (1 - top_frac) * left_frac\n\n if height_idx + 1 < height:\n t[height_idx + 1, width_idx + 0] = top_frac * (1 - left_frac)\n\n if width_idx + 1 < width and height_idx + 1 < height:\n t[height_idx + 1, width_idx + 1] = top_frac * left_frac\n\n return t", "def cross_entropy(targets, predictions, epsilon=1e-12):\n\n predictions = np.clip(predictions, epsilon, 1. - epsilon)\n\n N = predictions.shape[0]\n\n ce = -np.sum(np.sum(targets*np.log(predictions+1e-9)))/N\n\n return ce", "def cross_entropy(\n **kwargs\n) -> Callable:\n return categorical_crossentropy", "def cross(x, y):\n x = x.reshape(3)\n y = y.reshape(3)\n z = np.cross(x, y)\n z = z.reshape((3, 1))\n return z", "def cross_covariance(\n self, kernel: Kernel, x: Float[Array, \"N D\"], y: Float[Array, \"M D\"]\n ) -> Float[Array, \"N M\"]:\n cross_cov = vmap(lambda x: vmap(lambda y: kernel(x, y))(y))(x)\n return cross_cov", "def euclidean_dist(X, y):\n return np.sqrt(np.sum((X - y) ** 2, 1)) # broadcasted calculations", "def cid(x, y):\n assert(len(x.shape) == 2 and x.shape == y.shape) # time series must have same length and dimensionality\n ce_x = np.sqrt(np.sum(np.square(np.diff(x, axis=0)), axis=0) + 1e-9)\n ce_y = np.sqrt(np.sum(np.square(np.diff(y, axis=0)), axis=0) + 1e-9)\n d = np.sqrt(np.sum(np.square(x - y), axis=0)) * np.divide(np.maximum(ce_x, ce_y), np.minimum(ce_x, ce_y))\n return np.sum(d)", "def calc_euclidian_dists(x, y):\n n = x.shape[0]\n m = y.shape[0]\n x = tf.tile(tf.expand_dims(x, 1), [1, m, 1])\n y = tf.tile(tf.expand_dims(y, 0), [n, 1, 1])\n return tf.reduce_mean(tf.math.pow(x - y, 2), 2)", "def entropy(a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n return (_fun.logbeta(a, b)\n - (a - 1)*mp.psi(0, a)\n - (b - 1)*mp.psi(0, b)\n + (a + b - 2)*mp.psi(0, a + b))", "def get_cross_entropy(self):\n assert (self.dataset is not None) and (self.labels is not None), 'Logistic Regression requires a dataset and labels.'\n potential = 0.0\n logits = self.dataset @ self.parameters[:self.dataset.shape[1]]\n max_logits = torch.max(torch.zeros(logits.shape[0]),logits)\n potential = (-logits @ self.labels.t() + torch.sum(max_logits) + torch.sum(\n torch.log(torch.exp(-max_logits)+torch.exp(logits - max_logits))))# * n.reciprocal())\n return potential", "def cv_with_entropy(X, Y):\n\t# Decision tree with entropy\n\tclf_entropy = decision_tree_clf()\n\n\t# Returns score\n\tresult = cross_val_score(\n\t\tclf_entropy, X, Y, \n\t\tscoring='f1_macro', \n\t\tcv=StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)\n\t\t)\n\treturn result", "def cross_entropy(predicted, target):\n batch_size, num_classes = predicted.shape\n\n # Tip: You can implement XELoss all here, without creating a new subclass of Function.\n # However, if you'd prefer to implement a Function subclass you're free to.\n # Just be sure that nn.loss.CrossEntropyLoss calls it properly.\n\n # Tip 2: Remember to divide the loss by batch_size; this is equivalent\n # to reduction='mean' in PyTorch's nn.CrossEntropyLoss\n\n e_x = predicted.exp()\n log_e_x = e_x.log()\n a = log_sum_x_trick(predicted)\n x_n_offset = predicted - a\n\n exp_xn_offset = x_n_offset.exp()\n\n sum_exp_xn_offset = exp_xn_offset.sum(axis=1, keepdims=True)\n log_sum_exp_xn_offset = sum_exp_xn_offset.log()\n denominator = a + log_sum_exp_xn_offset\n log_softmax = log_e_x - denominator\n\n labels = to_one_hot(target, num_classes)\n prod = log_softmax*labels\n total = prod.sum()\n batch_size = tensor.Tensor(-batch_size)\n\n total = total / batch_size\n\n return total", "def cross_entropy(Y, Y_hat, epsilon=1e-8):\n \n m = Y.shape[0]\n \n # make data safe\n Y_hat = np.clip(Y_hat, a_min=epsilon, a_max=(1 - epsilon))\n \n # calc cost\n cost = (-1 / m) * np.nansum(Y * np.log(Y_hat))\n cost = np.squeeze(cost)\n \n # calc gradient\n dY_hat = -Y / Y_hat\n \n return cost, dY_hat", "def associate_comp(x, y):\n return torch.cat([x[:1] * y[:1] - x[1:] * y[1:], x[:1] * y[1:] + x[1:] * y[:1]])", "def cross(x, y, target=utils.CCE):\n utils.elemwise_shape_check(get_shape(y), get_shape(x))\n utils.elemwise_dtype_check(\n y.dtype, x.dtype,\n (utils.DtypeForDavinci.ALL_FLOAT) if product_is_mini() \\\n else (utils.DtypeForDavinci.FLOAT16,\n utils.DtypeForDavinci.FLOAT32,\n utils.DtypeForDavinci.INT32,\n utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.UINT8))\n\n shape = get_shape(x)\n\n if shape[0] != 3:\n raise RuntimeError(\n \"The first axis of input must be 3, actual input is %d\" % shape[0])\n\n inp_dtype = x.dtype\n need_type_convert = inp_dtype in (\"int8\", \"uint8\")\n\n shape = get_shape(x)\n shp = shape[1:]\n\n if need_type_convert:\n x = cast(x, \"float16\", target=utils.CCE)\n y = cast(y, \"float16\", target=utils.CCE)\n\n a0b1 = tvm.compute(shp, lambda *i: x(0, *i) * y(1, *i), name=\"a0b1\")\n a0b2 = tvm.compute(shp, lambda *i: x(0, *i) * y(2, *i), name=\"a0b2\")\n a1b0 = tvm.compute(shp, lambda *i: x(1, *i) * y(0, *i), name=\"a1b0\")\n a1b2 = tvm.compute(shp, lambda *i: x(1, *i) * y(2, *i), name=\"a1b2\")\n a2b0 = tvm.compute(shp, lambda *i: x(2, *i) * y(0, *i), name=\"a2b0\")\n a2b1 = tvm.compute(shp, lambda *i: x(2, *i) * y(1, *i), name=\"a2b1\")\n\n res0 = tvm.compute(shp, lambda *i: a1b2(*i) - a2b1(*i), name=\"res0\")\n res1 = tvm.compute(shp, lambda *i: a2b0(*i) - a0b2(*i), name=\"res1\")\n res2 = tvm.compute(shp, lambda *i: a0b1(*i) - a1b0(*i), name=\"res2\")\n\n res = tvm.compute(\n shape,\n lambda *i:\n tvm.expr.Select(\n i[0] == 0,\n res0(*i[1:]),\n tvm.expr.Select(i[0] == 1, res1(*i[1:]), res2(*i[1:]))),\n name='res')\n\n if need_type_convert:\n res = cast(res, inp_dtype, target=utils.CCE)\n\n return res", "def calculate_cross_entropy(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def _cross_valid_h(h, x, y):\n n = x.shape[0]\n # allocate output\n out = np.empty(n)\n # Loop through each regression point\n for i in range(n):\n # all-1 points\n xx = np.delete(x, i, axis=0)\n yy = np.delete(y, i, axis=0)\n z = (xx - x[i, :]) / h\n out[i] = _nadaraya_watson(z, yy)\n cv = np.sum((y - out)**2) / float(n)\n\n return cv", "def d_cross(a, b):\n d_cross = np.zeros((3, 3), dtype=float)\n for i in range(3):\n ei = np.zeros(3, dtype=float)\n ei[i] = 1.0\n d_cross[i] = np.cross(ei, b)\n return d_cross", "def crossentropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n return -torch.log2(probability_fn(args))", "def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)", "def gain(Y, X):\n return entropy(Y) - cEntropy(Y,X)", "def cross_cov(a, b): \n da = a - np.mean(a, axis=0)\n db = b - np.mean(b, axis=0)\n\n return np.matmul(da.T, db) / a.shape[0]", "def get_cross_entropy(actual, expected):\n\n\t# use try except to handle overflow in the log\n\ttry:\t\n\t\t# formula -(y*log(a) + (1-y)log(1-a))\n\t\tones = np.ones(actual.shape) # used to subtract array from one\n\t\tcross_entropy_vec = np.multiply(expected, np.log(actual)) + np.multiply(ones - expected, np.log(ones - actual))\n\t\tcross_entropy = np.sum(cross_entropy_vec)\n\texcept:\n\t\treturn sys.maxsize\n\n\treturn -1.0*cross_entropy", "def cross_entropy(self, true_values, predicted_values):\n\n testing_set_size = len(true_values)\n running_sum = 0\n for i in range(len(true_values)):\n true_set = true_values[i]\n predicted_set = predicted_values[i]\n running_sum += sum([(true_set[j] * math.log(predicted_set[j])) for j in range(len(true_set))])\n\n #print(f\"Average cross entropy:\\t{-running_sum / testing_set_size}\")\n self.performance += (-running_sum / testing_set_size)\n return -running_sum / testing_set_size", "def chl_entropy(y, base=2):\n p,bins = histogram(y, bins=unique(y)) # don't use 'Normed' feature, since that includes the bin-width!\n p = p[p!=0]/float(len(y))\n S = -1.0*sum(p*log(p))/log(base)\n return S", "def evaluate(self, X, y):\n y_pred = self.predict(X)\n return self.cross_entropy_loss(y, y_pred)", "def cross_covariance(\n self, kernel: Kernel, x: Float[Array, \"N D\"], y: Float[Array, \"M D\"]\n ) -> Float[Array, \"N M\"]:\n # TODO: This is currently a dense implementation. We should implement\n # a sparse LinearOperator for non-square cross-covariance matrices.\n cross_cov = vmap(lambda x: vmap(lambda y: kernel(x, y))(y))(x)\n return cross_cov", "def cross_entropy(p, q, base=2):\n q = ma.array(q, mask=(q == 0))\n return - np.vdot(p, ma.log(q)) / np.log(base)", "def _calculate_probs_and_entropy_y(self):\n #calculate y probabilities and H(Y)\n #H(Y) = Sum(y € Y)(-P(Y=y) * log(P(Y=y)))\n self.lab_entropy = 0\n s = sum(self.lab_counts.values())\n for label, count in self.lab_counts.items():\n self.lab_probs[label] = count / s\n self.lab_entropy -= self.lab_probs[label] * self.log(self.lab_probs[label])", "def entropy(y,w):\r\n\r\n\t# my original entropy function commented below is not working as desired. The below implementation is based on from Sai Ram Chappidi's explanation\r\n\r\n # y_partition = partition(y)\r\n # elements,counts = np.unique(y,return_counts = True)\r\n # entropy=0\r\n\r\n # for i in range(len(elements)):\r\n # entropy += ((-(np.sum(w[y_partition[i]])))/np.sum(w))*np.log2(np.sum(w[y_partition[i]])/np.sum(w))\r\n # return entropy\r\n\r\n entropy = 0\r\n # two hypothesis cases 0,1\r\n h = {0: 0, 1: 0}\r\n leny = len(y)\r\n for i in range(leny):\r\n # if y is 0 add 0 to the weight\r\n if y[i] == 0:\r\n h[0] += w[i]\r\n # if y is 1 add 1 to the weight\r\n elif y[i] == 1:\r\n h[1] += + w[i]\r\n # summing all the weighted values \r\n val_sum = h[0] + h[1]\r\n\r\n # entropy calculation\r\n for j in range(len(h)):\r\n h[j] = h[j]/val_sum\r\n # to prevent divide by zero\r\n if h[j] != 0:\r\n entropy += h[j] * np.log2(h[j])\r\n entropy = -(entropy)\r\n return entropy", "def ucross(a, b):\n ev = a / np.linalg.norm(a)\n return np.cross(ev, b)", "def cross_entropy(U, V):\n return -np.sum(U * np.log(V))", "def crossentropy_loss(y_true, y_pred):\n ce = tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True) \n return ce", "def __test_cross_entropy_loss():\n y = np.array([[0, 1], [1, 0], [1, 0]])\n yhat = np.array([[.5, .5], [.5, .5], [.5, .5]])\n\n test1 = cross_entropy_loss(tf.constant(y, dtype=tf.int32), tf.constant(yhat, dtype=tf.float32))\n with tf.Session() as sess:\n test1 = sess.run(test1)\n expected = -3 * np.log(.5)\n __test_all_close(\"Cross-entropy test 1\", test1, expected)\n\n print(\"Basic (non-exhaustive) cross-entropy tests pass\")", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def cross(a, b):\n return np.array([a[1]*b[2] - a[2]*b[1],\n a[2]*b[0] - a[0]*b[2],\n a[0]*b[1] - a[1]*b[0]])", "def test_cross_entropy(self):\n loss = losses.cross_entropy_with_logits(\n logits=self.logits, targets=self.targets)\n kl = loss.mean()\n\n expected_loss = -(jnp.log(0.9) + jnp.log(0.1) + jnp.log(0.9)) / 3\n\n self.assertAlmostEqual(kl, expected_loss)", "def _entropy2(labels, base=None):\n\n n_labels = len(labels)\n\n if n_labels <= 1:\n return 0\n\n value,counts = np.unique(labels, return_counts=True)\n probs = counts / n_labels\n n_classes = np.count_nonzero(probs)\n\n if n_classes <= 1:\n return 0\n\n ent = 0.\n\n # Compute entropy\n base = e if base is None else base\n for i in probs:\n ent -= i * log(i, base)\n\n # quick observation shows ent between 0.0 and 4.0.\n return ent", "def loss_fn(outputs, labels):\n return nn.CrossEntropyLoss()(outputs, labels)", "def get_entropy(*labels):\n entropies = [] #list of entropy values from each subset\n total = 0 #total number of datapoints\n for subset in labels:\n n = len(subset)\n total += n\n counts = np.unique(subset, return_counts=True)[1] #frequency of unique values\n entropy = np.sum([-(i/n) * np.log2(i/n) for i in counts]) #subset entropy calcuation\n entropies.append((entropy, n))\n return np.sum([(n/total) * ent for n, ent in iter(entropies)])", "def xCrossProd(self, other):\n return other.y * self.z - other.z * self.y", "def __init__(self):\n super(MetricCrossEntropy, self).__init__()", "def cross_entropy_with_probs(pred_probs, target_probs, name=None):\n with tf.name_scope(name, \"SoftmaxCrossEntropyWithProb\", [pred_probs, target_probs]):\n crossent_loss = -tf.reduce_sum(target_probs * tf.log(pred_probs), reduction_indices=[1], keep_dims=False)\n return crossent_loss", "def cross_entropy(y_pred, y_true, normalize=True, eps=1e-15):\n if type(y_pred) != np.ndarray:\n raise TypeError(\"Require np.ndarray type,{} checked\".format(type(y_pred)))\n if type(y_true) != np.ndarray:\n raise TypeError(\"Require np.ndarray type,{} checked\".format(type(y_true)))\n # clip = np.vectorize(lambda x: max(eps, min(1 - eps, x)))\n # y_pred = clip(y_pred)\n y_pred = np.array(list(map(lambda x: max(eps, min(1 - eps, x)), y_pred)))\n l = np.multiply(y_true, np.log(y_pred)) + np.multiply(1 - y_true, np.log(1 - y_pred))\n loss = -1 * np.sum(l).item()\n if normalize:\n loss = loss / len(y_pred)\n return loss", "def entropyDistributed(distribution):\n return -sum(map(lambda p : p * log(p, 2), distribution))", "def train_batch(x, y, clf, opt, args):\n opt.zero_grad()\n criterion = nn.CrossEntropyLoss()\n if args.cuda:\n x = x.cuda()\n y = y.cuda()\n\n res = clf(x)\n ncorrect = 0\n for i in range(y.size(0)):\n _, ind = res[i].max(0)\n\n if torch.equal(y[i], ind):\n ncorrect +=1\n\n loss = criterion(res, y)\n loss.backward()\n opt.step()\n\n return loss, ncorrect", "def euclidean(x, y):\n return np.sqrt(np.sum((x - y) ** 2))", "def getCrossEntropy(visible, PDF):\n\n\tndocs = visible.shape[0]\n\tCrossEnt = numpy.zeros((ndocs,))\n\tfor doc in xrange(ndocs):\n\t\tCrossEnt[doc] = numpy.dot(visible[doc], numpy.log(PDF[doc]))\n\n\treturn CrossEnt.mean()", "def cross_entropy(true, pred, axis=-1, epsilon=1e-7):\n pred = ivy.clip(pred, epsilon, 1 - epsilon)\n log_pred = ivy.log(pred)\n # noinspection PyUnresolvedReferences\n return -ivy.reduce_sum(log_pred * true, axis)", "def cross(a,b):\n \n return [ a[1]*b[2] - a[2]*b[1],\n a[2]*b[0] - a[0]*b[2],\n a[0]*b[1] - a[1]*b[0],\n 1.0 ]", "def cross(v1, v2):\n return np.cross(v1, v2)", "def cross_entropy(predicted, target):\n batch_size, num_classes = predicted.shape\n\n e_x = predicted.exp()\n log_e_x = e_x.log()\n a = log_sum_x_trick(predicted)\n x_n_offset = predicted - a\n\n exp_xn_offset = x_n_offset.exp()\n\n sum_exp_xn_offset = exp_xn_offset.sum(axis=1, keepdims=True)\n log_sum_exp_xn_offset = sum_exp_xn_offset.log()\n denominator = a + log_sum_exp_xn_offset\n log_softmax = log_e_x - denominator\n\n labels = to_one_hot(target, num_classes)\n prod = log_softmax*labels\n total = prod.sum()\n batch_size = tensor.Tensor(-batch_size)\n\n total = total / batch_size\n\n return total", "def categorical_crossentropy(y_pred, y_true):\n with tf.name_scope(\"Crossentropy\"):\n y_pred /= tf.reduce_sum(y_pred,\n reduction_indices=len(y_pred.get_shape())-1,\n keep_dims=True)\n # manual computation of crossentropy\n y_pred = tf.clip_by_value(y_pred, tf.cast(_EPSILON, dtype=_FLOATX),\n tf.cast(1.-_EPSILON, dtype=_FLOATX))\n cross_entropy = - tf.reduce_sum(y_true * tf.log(y_pred),\n reduction_indices=len(y_pred.get_shape())-1)\n return tf.reduce_mean(cross_entropy)" ]
[ "0.7399793", "0.7246357", "0.71943384", "0.70277774", "0.6669306", "0.6637702", "0.6602384", "0.65894943", "0.65266645", "0.6473503", "0.6437216", "0.642684", "0.63894004", "0.6365542", "0.6353329", "0.63220906", "0.628778", "0.6279644", "0.62760943", "0.6269529", "0.62406224", "0.62195385", "0.61654633", "0.6152873", "0.6143035", "0.61429644", "0.6135727", "0.6133718", "0.60981774", "0.60756135", "0.6072393", "0.60690266", "0.60622185", "0.6049972", "0.60315335", "0.60290545", "0.6018554", "0.60074216", "0.5996101", "0.59799945", "0.5966888", "0.59644103", "0.59559864", "0.5950494", "0.59398454", "0.5934339", "0.59282357", "0.5917388", "0.59104455", "0.58902115", "0.58900064", "0.5873779", "0.5868822", "0.5856024", "0.5849674", "0.58225507", "0.5822494", "0.5809818", "0.58094764", "0.5807639", "0.5807032", "0.58044803", "0.580027", "0.5770744", "0.5748133", "0.57479316", "0.5746499", "0.5740157", "0.5736116", "0.57275593", "0.5718761", "0.5712834", "0.5708433", "0.57054484", "0.57036364", "0.56999415", "0.569966", "0.5696662", "0.56869596", "0.567938", "0.5666363", "0.56654423", "0.56379944", "0.5637788", "0.5631408", "0.56308454", "0.5612492", "0.5610317", "0.5607675", "0.5601812", "0.55823976", "0.55810976", "0.55792344", "0.55780286", "0.55705804", "0.5565569", "0.55633736", "0.55602086", "0.55582935", "0.55575573" ]
0.7348612
1
r"""Joint Entropy Calculates the joint entropy of two discrete distributions x and y. This is the combined Entropy of X added to the conditional Entropy of x given y.
def joint_entropy(x, y, bins): # assert array length assert len(x) == len(y) # get the bins, x and y get their own bins in case of joint entropy bins = get_2D_bins(x, y, bins) # get the joint histogram joint_hist = np.histogram2d(x, y, bins)[0] # calculate the joint probability and add a small number joint_p = (joint_hist / np.sum(joint_hist)) + 1e-15 # calculate and return the joint entropy return - np.sum(joint_p * np.log2(joint_p))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')", "def joint_entropy(column_i, column_j):\n\tfreq_ij = dict()\n\ttotal = len(column_i)\n\tentropy = 0\n\tfor index in range(total):\n\t\ti = column_i[index]\n\t\tj = column_j[index]\n\t\tif i+j in freq_ij:\n\t\t\tfreq_ij[i+j] +=1\n\t\telse:\n\t\t\tfreq_ij[i+j] = 1\n\n\tfor key in freq_ij:\n\t\tfreq_ij[key] /= total\n\t\tentropy += freq_ij[key]*math.log(freq_ij[key], 2)\n\treturn -entropy", "def joint_entropy(column_i, column_j):\n\tif len(column_i) != len(column_j):\n\t\traise IndexError(\"The two MSA should have the same number of related sequences (same species)\")\n\tfreq_ij = dict()\n\ttotal = len(column_i)\n\tentropy = 0\n\tfor index in range(total):\n\t\ti = column_i[index]\n\t\tj = column_j[index]\n\t\tif i+j in freq_ij:\n\t\t\tfreq_ij[i+j] +=1\n\t\telse:\n\t\t\tfreq_ij[i+j] = 1\n\n\tfor key in freq_ij:\n\t\tfreq_ij[key] /= total\n\t\tentropy += freq_ij[key]*math.log(freq_ij[key], 2)\n\tif entropy != 0.0:\n\t\treturn -entropy\n\telse:\n\t\treturn entropy", "def joint_pdf(self, x1, x2 = None):\n return np.exp(self.joint_logpdf(x1, x2))", "def transfer_entropy(X, Y):\n coords = Counter(zip(Y[1:], X[:-1], Y[:-1]))\n\n p_dist = np.zeros((config.NUM_STATES, config.NUM_STATES, config.NUM_STATES))\n for y_f, x_p, y_p in coords.keys():\n p_dist[y_p, y_f, x_p] = coords[(y_f, x_p, y_p)] / (len(X) - 1)\n\n p_yp = p_dist.sum(axis=2).sum(axis=1)\n p_joint_cond_yp = p_dist / p_yp[:, None, None]\n p_yf_cond_yp = p_dist.sum(axis=2) / p_yp[:, None]\n p_xp_cond_yp = p_dist.sum(axis=1) / p_yp[:, None]\n\n denominator = np.multiply(p_yf_cond_yp, p_xp_cond_yp)\n denominator[denominator == 0] = np.nan\n\n division = np.divide(p_joint_cond_yp, denominator[:, :, None])\n division[division == 0] = np.nan\n\n log = np.log2(division)\n\n return np.nansum(np.multiply(p_dist, log))", "def J(W1, b1, W2, b2, x, y):\n yhat = forwardPropagate(W1, b1, W2, b2, x) # OLD: yhat = softmax(x.dot(w))\n return crossEntropy(y, yhat)", "def cross_entropy(x, y, bins, xy_probabilities=False):\n # calculate probabilities if probabilities == False\n if xy_probabilities:\n # same bins for x and y -> same length of x and y if xy_probabilities == True\n assert len(x) == len(y)\n\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x),1,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n # if y does not sum up to 1, raise an error\n if not np.isclose(sum(y),1,atol=0.0001):\n raise ValueError('Probabilities in vector y do not sum up to 1.')\n\n # add a small number to all probabilities if zero occurs\n if x.any(0):\n px = x + 1e-15\n py = y + 1e-15\n else:\n px = x\n py = y\n else:\n # get the bins, joint bins for x and y (same_bins=True)\n bins = get_2D_bins(x, y, bins, same_bins=True)\n\n # calculate unconditioned histograms\n hist_x = np.histogram(x, bins=bins[0])[0]\n hist_y = np.histogram(y, bins=bins[1])[0]\n\n px = (hist_x / np.sum(hist_x)) + 1e-15\n py = (hist_y / np.sum(hist_y)) + 1e-15\n\n return - px.dot(np.log2(py))", "def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')", "def entropy(y,w):\r\n\r\n\t# my original entropy function commented below is not working as desired. The below implementation is based on from Sai Ram Chappidi's explanation\r\n\r\n # y_partition = partition(y)\r\n # elements,counts = np.unique(y,return_counts = True)\r\n # entropy=0\r\n\r\n # for i in range(len(elements)):\r\n # entropy += ((-(np.sum(w[y_partition[i]])))/np.sum(w))*np.log2(np.sum(w[y_partition[i]])/np.sum(w))\r\n # return entropy\r\n\r\n entropy = 0\r\n # two hypothesis cases 0,1\r\n h = {0: 0, 1: 0}\r\n leny = len(y)\r\n for i in range(leny):\r\n # if y is 0 add 0 to the weight\r\n if y[i] == 0:\r\n h[0] += w[i]\r\n # if y is 1 add 1 to the weight\r\n elif y[i] == 1:\r\n h[1] += + w[i]\r\n # summing all the weighted values \r\n val_sum = h[0] + h[1]\r\n\r\n # entropy calculation\r\n for j in range(len(h)):\r\n h[j] = h[j]/val_sum\r\n # to prevent divide by zero\r\n if h[j] != 0:\r\n entropy += h[j] * np.log2(h[j])\r\n entropy = -(entropy)\r\n return entropy", "def dist_calc(self, x, y):\n p_xy = self.d2_bin(x, y)\n p_x = np.sum(p_xy, axis=1)\n p_y = np.sum(p_xy, axis=0)\n\n p_x_times_p_y = np.tensordot(p_x, p_y, axes = 0)\n info = np.sum(p_xy * np.ma.log(np.ma.divide(p_xy, p_x_times_p_y)))\n entropy = np.sum(-1 * p_xy * np.ma.log(p_xy))\n\n output = max(0.0, (1 - (info / entropy)))\n return output", "def joint_logpdf(self, x1, x2 = None):\n dists = self.conditionalMVNs\n joint_pdfs = np.array([d.joint_pdf(x1, x2) for d in dists])\n return np.log(np.sum(self.weights * joint_pdfs))", "def joint_feature(self, x, y):\n self._check_size_x(x)\n features, edges = self._get_features(x), self._get_edges(x)\n n_nodes = features.shape[0]\n\n if isinstance(y, tuple):\n # y is result of relaxation, tuple of unary and pairwise marginals\n unary_marginals, pw = y\n unary_marginals = unary_marginals.reshape(n_nodes, self.n_states)\n # accumulate pairwise\n pw = pw.reshape(-1, self.n_states, self.n_states).sum(axis=0)\n else:\n y = y.reshape(n_nodes)\n gx = np.ogrid[:n_nodes]\n\n #make one hot encoding\n unary_marginals = np.zeros((n_nodes, self.n_states), dtype=np.int)\n gx = np.ogrid[:n_nodes]\n unary_marginals[gx, y] = 1\n\n ##accumulated pairwise\n pw = np.dot(unary_marginals[edges[:, 0]].T,\n unary_marginals[edges[:, 1]])\n unaries_acc = np.dot(unary_marginals.T, features)\n if self.directed:\n pw = pw.ravel()\n else:\n pw = compress_sym(pw)\n joint_feature_vector = np.hstack([unaries_acc.ravel(), pw])\n return joint_feature_vector", "def joint_entropy(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n return np.nansum(np.multiply(P_nan, np.log2(1 / P_nan)))", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def cross_entropy(x, y):\n\n if len(y.shape) == 1:\n return F.cross_entropy(x, y)\n if y.shape[1] == 1:\n y = y.squeeze(1)\n return F.cross_entropy(x, y)\n\n return torch.mean(\n torch.div(\n F.binary_cross_entropy_with_logits(x, y, reduction=\"none\"),\n torch.sum(y, dim=1),\n )\n )", "def mutual_info(l1, l2):\n return entropy(l1) + entropy(l2) - entropy(joint_dataset(l1, l2))", "def cEntropy(Y, X):\n return jEntropy(Y, X) - entropy(X)", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def cond_entropy(joint_prob, cond_prob):\n # Computing log2(P cond)\n log2_p = (np.ma.log2(cond_prob)).filled(0)\n # Multipling element wise the arrays\n prod_entropy = np.multiply(joint_prob, log2_p)\n # Getting the - sum of the resulting array.\n H = -( np.sum(prod_entropy))\n return H", "def joint_dataset(l1, l2):\n N = np.max(l1) + 1\n return l2 * N + l1", "def _calculate_probs_and_entropy_y(self):\n #calculate y probabilities and H(Y)\n #H(Y) = Sum(y € Y)(-P(Y=y) * log(P(Y=y)))\n self.lab_entropy = 0\n s = sum(self.lab_counts.values())\n for label, count in self.lab_counts.items():\n self.lab_probs[label] = count / s\n self.lab_entropy -= self.lab_probs[label] * self.log(self.lab_probs[label])", "def _entropy(self, y):\n # Get size\n n = y.shape[0]\n summation = 0\n\n # Summatory\n for c_i in np.unique(y):\n prob = sum(y == c_i) / float(n)\n summation += prob * np.log2(prob)\n\n return -summation", "def logistic_loss(x, y):\n N = x.shape[0]\n x = np.squeeze(x)\n y_prime = (y + 1)/2\n h = 1 /(1 + np.exp(-x))\n loss = np.sum(-np.log( (h**y_prime) * ((1-h)**(1-y_prime)) ))/N\n dx = np.exp(-y*x)*(-y)/(1+np.exp(-y*x))/N\n return loss, dx", "def entropy(y):\n return -1 * sum(\n [\n pipe(np.sum(y == value) / len(y), lambda ratio: ratio * np.log(ratio))\n for value in set(y)\n ]\n )", "def log_prob(self, x, y):\n p = self.tag_log_prob(y)\n for i in range(len(y)):\n if self.out_prob(x[i], y[i]) == 0:\n return -math.inf\n\n p += math.log2(self.out_prob(x[i], y[i]))\n\n return p", "def logq_joint(self, x, h, return_mu=False):\n logph = distributions.Normal(0, 1).log_prob(h).sum(1)\n gmu = self.g(h)\n px_given_h = distributions.Normal(gmu, self.logsigma.exp())\n logpx_given_h = px_given_h.log_prob(x).flatten(start_dim=1).sum(1)\n if return_mu:\n return logpx_given_h + logph, gmu\n else:\n return logpx_given_h + logph", "def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent", "def _cal_igr(x, y):\n return (_cal_entropy(y) - _cal_conditionalEnt(x, y)) / _cal_conditionalEnt(x, y)", "def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en", "def prob(self, x, y):\n p = self.tag_prob(y)\n for i in range(len(y)):\n p *= self.out_prob(x[i], y[i])\n\n return p", "def entropy(y):\n p = _proba(y)\n return (-p * np.log2(p)).sum()", "def mutual_info_fast(l1, l2, l1_entropy, l2_entropy):\n return l1_entropy + l2_entropy - entropy(joint_dataset(l1, l2))", "def conditional_entropy(f1, f2):\n\n ce = ee.entropyd(f1) - ee.midd(f1, f2)\n return ce", "def calculate_entropy(y):\n\tlog2 = lambda x: math.log(x) / math.log(2)\n\tunique_labels = np.unique(y)\n\tentropy = 0\n\tfor label in unique_labels:\n\t\tcount = len(y[y == label])\n\t\tp = count / len(y)\n\t\tentropy += -p * log2(p)\n\treturn entropy", "def print_entropies(independent_joint_probabilities, conditional_joint_probabilities):\n indepndent_entropy = entropy_from_probability_matrix(independent_joint_probabilities)\n conditional_entropy = entropy_from_probability_matrix(conditional_joint_probabilities)\n\n print 'Independent H(X,Y) = {h}'.format(h=indepndent_entropy)\n print 'Conditional H(X,Y) = {h}'.format(h=conditional_entropy)\n print 'D_KL(Independent, Conditional) = {d_kl}' \\\n .format(d_kl=kullback_leibler_divergence(independent_joint_probabilities, conditional_joint_probabilities))\n print 'D_KL(Conditional, Independent) = {d_kl}' \\\n .format(d_kl=kullback_leibler_divergence(conditional_joint_probabilities, independent_joint_probabilities))\n\n return indepndent_entropy, conditional_entropy", "def _entropy_filter(self, prob1, prob2):\n\n\n # calculate merged prob.\n prob_merged = (prob1 + prob2)/2\n # Compute entropy for each prob.\n H1 = -prob1 * math.log(prob1) - (1-prob1) * math.log(1-prob1)\n H2 = -prob2 * math.log(prob2) - (1-prob2) * math.log(1-prob2)\n Hm = -prob_merged * math.log(prob_merged) - (1-prob_merged) * math.log(1-prob_merged)\n\n H_min = min(H1, H2, Hm)\n\n if H_min == H1:\n return prob1\n elif H_min == H2:\n return prob2\n else:\n return prob_merged", "def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))", "def empirical_bottleneck(x,y,numuniquex=0,numuniquey=0,**kw):\n \n # Marginal, joint and conditional distributions required to calculate the IB\n pxy_j = p_joint(x,y)\n px = pxy_j.sum(axis=1)\n py = pxy_j.sum(axis=0)\n pyx_c = pxy_j.T / px\n #Calculate the information bottleneck for different values of beta\n i_p,i_f,beta = IB(px,py,pyx_c,**kw)\n # Return array of ipasts and ifutures for array of different values of beta - mi should correspond to the saturation point\n mi = mi_x1x2_c(py, px, pyx_c)\n return i_p,i_f,beta,mi,entropy(px,base=2),entropy(py,base=2)", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def symbolic_transfer_entropy(symX, symY):\n\n if len(symX) != len(symY):\n raise ValueError('All arrays must have same length')\n \n symX = np.array(symX)\n symY = np.array(symY)\n \n cp = symbolic_conditional_probabilities_consecutive(symX)\n cp2 = symbolic_conditional_probabilities_consecutive_external(symX, symY)\n jp = symbolic_joint_probabilities_consecutive_external(symX, symY)\n \n TE = 0\n \n for yi, xi, xii in list(jp.keys()):\n try:\n a = cp[xi,xii]\n b = cp2[yi,xi,xii]\n c = jp[yi,xi,xii]\n TE += c * np.log(b / a) / np.log(2.)\n except KeyError:\n continue\n except:\n print(\"Unexpected Error\")\n raise\n del cp\n del cp2\n del jp\n \n return TE", "def get_information_y_hat(x_pub, x_priv, x_joint, y_hat, num_of_bins_y):\n\tprint('Start calculating the information for y_hat...')\n\ty_hat = np.array(y_hat).astype(np.float)\n\tpys_hat, unique_inverse_y, y_hat = extract_probs_label(y_hat,num_of_bins_y)\n\tp_y_given_x_pub, b1_pub, b_pub, unique_a_pub, unique_inverse_x_pub, pxs_pub = extract_probs(y_hat, x_pub)\n\tp_y_given_x_priv, b1_priv, b_priv, unique_a_priv, unique_inverse_x_priv, pxs_priv = extract_probs(y_hat, x_priv)\n\tp_y_given_x_joint, b1_joint, b_joint, unique_a_joint, unique_inverse_x_joint, pxs_joint = extract_probs(y_hat, x_joint)\n\t# Shannon Entropy over label\n\tH2Y_hat = -np.sum(pys_hat * np.log2(pys_hat))\n\t# mutual Information between secret layer and label\n\tMI_pri_y_hat = calc_information_for_inp_out(pxs_priv,pys_hat,y_hat,unique_inverse_x_priv)\n\t# mutual Information between secret layer and label\n\tMI_pub_y_hat = calc_information_for_inp_out(pxs_pub,pys_hat,y_hat,unique_inverse_x_pub)\n\treturn H2Y_hat, MI_pri_y_hat, MI_pub_y_hat", "def __call__(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:\n return torch.log(1 + torch.exp(-y1 * y2))", "def cross_entropy(X, y):\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (\n 1 - y\n ) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)", "def entropy(self, y):\n n = y.size\n if n <= 1:\n return 0\n\n labels, counts = unique(y, return_counts=True)\n\n if counts.size <= 1:\n return 0\n\n probs = counts / n\n entropy = -sum([p * log(p, 2) for p in probs])\n return entropy", "def js_divergence(dist1, dist2):\n mean_dist = (dist1 + dist2) / 2.0\n js = (\n scipy.stats.entropy(dist1, mean_dist) + scipy.stats.entropy(dist2, mean_dist)\n ) / 2.0\n return js", "def entropy(a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n return (_fun.logbeta(a, b)\n - (a - 1)*mp.psi(0, a)\n - (b - 1)*mp.psi(0, b)\n + (a + b - 2)*mp.psi(0, a + b))", "def associate_comp(x, y):\n return torch.cat([x[:1] * y[:1] - x[1:] * y[1:], x[:1] * y[1:] + x[1:] * y[:1]])", "def mutual_information(x, y, w):\r\n \r\n\r\n total_entropy = entropy(y,w)\r\n\r\n partitioned_x = partition(x)\r\n weighted_entropy = 0\r\n # calculate the weighted entropy over the partition of x\r\n vals,counts= np.unique(x,return_counts=True)\r\n for key in partitioned_x:\r\n weighted_entropy += np.sum([(np.sum(w[partitioned_x[key]])/np.sum(w)) * entropy(y[partitioned_x[key]],w[partitioned_x[key]])])\r\n\r\n information_gain = total_entropy - weighted_entropy\r\n return information_gain", "def entropy(y):\n total = y.size\n value_counts = np.bincount(y).astype(\"float\")\n proportions = value_counts / y.size\n\n return sum(-i * np.log(i) for i in proportions if i)", "def logistic_loss(x, y):\n x = x.reshape((-1,))\n y = y.reshape((-1,))\n \n N, = x.shape\n \n y_p = np.where(y == 1,1,0)\n\n p = sigmoid(x)\n loss = -(y_p*np.log(p) + (1-y_p)*np.log(1-p))\n loss = np.sum(loss)/N\n\n dx = (1/N)*(p - y_p)\n \n return loss, dx", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n for cls in self.classes:\n class_probability = self.prior_prob[cls]\n for key, value in datum.items():\n relative_feature_values = self.likelihoods[cls][key]\n class_probability += math.log(relative_feature_values.get(datum[key], 0.01))\n\n logJoint[cls] = class_probability\n\n return logJoint", "def symbolic_joint_probabilities(symX, symY):\n\n if len(symX) != len(symY):\n raise ValueError('All arrays must have same length')\n \n symX = np.array(symX)\n symY = np.array(symY)\n \n # initialize\n jp = defaultdict(float)\n n = len(symX)\n\n for yi, xi in zip(symY,symX):\n jp[yi, xi] += 1.0 / n\n\n return dict(jp)", "def _compute_prob_y_given_x(self, _x, _y):\n normalisation_constant = sum([\n math.exp(sum([self.weights[_feature] *\n self.feature_funcs[_feature, cls](_feature, cls)\n for _feature in _x]))\n for cls in self.classes])\n\n return math.exp(sum([\n self.weights[_feature] *\n self.feature_funcs[_feature, _y](_feature, _y)\n for _feature in _x])) / normalisation_constant", "def Pdist2(x, y):\r\n x_norm = (x ** 2).sum(1).view(-1, 1)\r\n if y is not None:\r\n y_norm = (y ** 2).sum(1).view(1, -1)\r\n else:\r\n y = x\r\n y_norm = x_norm.view(1, -1)\r\n Pdist = x_norm + y_norm - 2.0 * torch.mm(x, torch.transpose(y, 0, 1))\r\n Pdist[Pdist<0]=0\r\n return Pdist", "def prob1():\n x, y = sy.symbols('x, y')\n return sy.Rational(2,5) * sy.exp(x**2 - y) * sy.cosh(x + y) + \\\n sy.Rational(3,7) * sy.log(x*y + 1)", "def conditional_entropy(x, y, bins, normalize=False):\n \n # get the bins\n bins = get_2D_bins(x, y, bins)\n \n # calculate H(x,y) and H(y)\n hjoint = joint_entropy(x,y,bins)\n hy = entropy(y, bins[1])\n\n if normalize:\n normalizer = entropy(x, bins[0])\n conditional_entropy = hjoint - hy\n\n # check if conditional entropy and normalizer are very small\n if conditional_entropy < 1e-4 and normalizer < 1e-4:\n # return zero to prevent very high values of normalized conditional entropy\n # e.g. conditional entropy = -1.3e-12, normalizer = -1.6e-12 \n # -> normalized conditional entropy = 812.5\n return 0\n else:\n return conditional_entropy / normalizer\n else:\n return hjoint - hy", "def cross_entropy_error(self, x, y):\n return -1 * sum([y[i] * np.log(self.logistic_function(self.weights.dot(x[i]))) + (1-y[i]) * np.log(1-self.logistic_function(self.weights.dot(x[i]))) for i in range(len(y))])", "def chl_entropy(y, base=2):\n p,bins = histogram(y, bins=unique(y)) # don't use 'Normed' feature, since that includes the bin-width!\n p = p[p!=0]/float(len(y))\n S = -1.0*sum(p*log(p))/log(base)\n return S", "def dist(x, y):\n dx = x[0] - y[0]\n dy = x[1] - y[1]\n ans = dx**2 + dy**2\n ans = ans**(0.5)\n return ans", "def prob_2_entropy(prob):\r\n n, c, h, w = prob.size()\r\n return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)", "def dist(x, y):\n dx = x[0] - y[0]\n dy = x[1] - y[1]\n ans = dx**2 + dy**2\n ans = ans**(0.5)\n return ans", "def compute_dists(x, y):\r\n \r\n return (x - y.permute(0, 2, 1)) ** 2", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def logistic_loss(x, y):\n N = x.shape[0]\n x_flat = np.squeeze(x)\n ex = np.exp(x_flat)\n loss = np.sum(-y*x_flat+np.log(1+ex))/N\n dx = (-y+ex/(1+ex))/N\n # dx = np.reshape(dx,(len(dx),1))\n return loss, dx", "def weight(self, y, xn, xo):\n\n return self._model.log_prob(y, xn) + self._model.h_weight(xn, xo) - self._kernel.log_prob(xn)", "def logits_and(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n t = (x + y) / 2\n f = logaddexp(logaddexp((x - y) / 2, (y - x) / 2), -t)\n return t - f", "def gain(Y, X):\n return entropy(Y) - cEntropy(Y,X)", "def entropy_obj(self, x, h, num_samples_posterior=20, return_score=False, learn_post_sigma=True):\n inf_dist = distributions.Normal(h, self.post_logsigma.detach().exp())\n h_given_x = inf_dist.sample((num_samples_posterior,))\n if len(x.size()) == 4:\n inf_logprob = inf_dist.log_prob(h_given_x).sum(2)\n xr = x[None].repeat(num_samples_posterior, 1, 1, 1, 1)\n xr = xr.view(x.size(0) * num_samples_posterior, x.size(1), x.size(2), x.size(3))\n logq, mean_output = self.logq_joint(xr, h_given_x.view(-1, h.size(1)), return_mu=True)\n mean_output = mean_output.view(num_samples_posterior, x.size(0), x.size(1), x.size(2), x.size(3))\n logq = logq.view(num_samples_posterior, x.size(0))\n w = (logq - inf_logprob).softmax(dim=0)\n fvals = (x[None] - mean_output) / (self.logsigma.exp() ** 2)\n weighted_fvals = (fvals * w[:, :, None, None, None]).sum(0).detach()\n c = weighted_fvals\n else:\n inf_logprob = inf_dist.log_prob(h_given_x).sum(2)\n xr = x[None].repeat(num_samples_posterior, 1, 1)\n xr = xr.view(x.size(0) * num_samples_posterior, x.size(1))\n logq, mean_output = self.logq_joint(xr, h_given_x.view(-1, h.size(1)), return_mu=True)\n mean_output = mean_output.view(num_samples_posterior, x.size(0), x.size(1))\n logq = logq.view(num_samples_posterior, x.size(0))\n w = (logq - inf_logprob).softmax(dim=0)\n fvals = (x[None] - mean_output) / (self.logsigma.exp() ** 2)\n weighted_fvals = (fvals * w[:, :, None]).sum(0).detach()\n c = weighted_fvals\n\n mgn = c.norm(2, 1).mean()\n g_error_entropy = torch.mul(c, x).mean(0).sum()\n\n post = distributions.Normal(h.detach(), self.post_logsigma.exp())\n h_g_post = post.rsample()\n joint = self.logq_joint(x.detach(), h_g_post)\n post_ent = post.entropy().sum(1)\n\n elbo = joint + post_ent\n post_loss = -elbo.mean()\n\n if learn_post_sigma:\n self.post_optimizer.zero_grad()\n post_loss.backward()\n self.post_optimizer.step()\n\n if return_score:\n return g_error_entropy, mgn, c\n else:\n return g_error_entropy, mgn", "def entropy(self):\n n = len(self.y)\n sum_ = 0\n for i in np.unique(self.y):\n v = len(self.y[self.y == i])\n sum_ += -((v/n) * log2(v/n))\n return sum_", "def hyperboloidDist(point1, point2):\n return np.arccosh(-minkowskiDot(point1, point2))", "def log_joint(self):\n return sum([\n self.log_marg_like(self.gamma, self.gamma0, self.lamb, self.nu),\n self._gamma0_distribution.logpdf(self.gamma0),\n self._nu_distribution.logpdf(self.nu),\n self._lambda_distribution.logpdf(self.lamb),\n self.probit_distribution(self.xi).logpdf(self.gamma),\n self._xi_distribution.logpdf(self.xi) if self.sample_xi else 0.0\n ])", "def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)", "def _calculate_probs_and_entropy_x(self, columns):\n #calculate x probabilities and H(Xi)\n #H(Xi) = Sum(x € Xi)(-P(Xi=x) * log(P(Xi=x)))\n for col in columns:\n self.cat_entropies[col] = 0\n xsum = 0\n for val in self.categories[col]:\n self.cat_probs[col][val] = 0\n for label in self.labels:\n self.cat_probs[col][val] += self.cat_counts[col][label][val]\n xsum += self.cat_probs[col][val]\n for val in self.categories[col]:\n self.cat_probs[col][val] /= xsum\n self.cat_entropies[col] -= self.cat_probs[col][val] * self.log(self.cat_probs[col][val])", "def entropy(self, X):\n if isinstance(X, np.ndarray):\n X = pd.DataFrame(X, index=[str(i) for i in range(len(X))])\n K = self._posterior_covariance(X)\n L = np.linalg.cholesky(K)\n D = len(X)\n return np.sum(np.log(np.diag(L))) + 0.5 * D * np.log(2*np.pi*np.exp(1))", "def joint_proba(self, X):\n return self.weights * self._bernoulli(X)", "def test_get_xentropy_components_one_time_neigh(self):\n\n this_numerator, this_denominator = (\n learning_curves._get_xentropy_components_one_time(\n actual_target_matrix=ACTUAL_TARGET_MATRIX,\n probability_matrix=PROBABILITY_MATRIX,\n eval_mask_matrix=MASK_MATRIX,\n matching_distance_px=NEIGH_DISTANCE_PX\n )\n )\n\n self.assertTrue(numpy.isclose(\n this_numerator, NEIGH_XENTROPY_NUMERATOR, atol=TOLERANCE\n ))\n self.assertTrue(numpy.isclose(\n this_denominator, NEIGH_XENTROPY_DENOM, atol=TOLERANCE\n ))", "def max_entangled(h1, h2):\n\n for h in (h1, h2):\n h.assert_ket_space()\n\n field = h1.base_field\n\n d = h1.dim()\n if h2.dim() != d:\n raise HilbertError('spaces must be of the same dimension')\n\n return (h1.H * h2).eye().transpose(h1) / field.sqrt(d)", "def logits_or(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n f = -(x + y) / 2\n t = logaddexp(logaddexp((x - y) / 2, (y - x) / 2), -f)\n return t - f", "def cross_entropy(self, yhat):\n n = len(self._y)\n c = 0.0\n for i in range(0, n):\n c += self._y[i] * log(\n yhat[i]) + (1 - self._y[i]) * log(1 - yhat[i])\n\n return c", "def prob_larger_continuous(distr1, distr2):\n\n return distr1.expect(distr2.cdf)", "def _jsd(prob_dist_p, prob_dist_q):\n prob_dist_p = check_numpy_param('prob_dist_p', prob_dist_p)\n prob_dist_q = check_numpy_param('prob_dist_q', prob_dist_q)\n norm_dist_p = prob_dist_p / (np.linalg.norm(prob_dist_p, ord=1) + 1e-12)\n norm_dist_q = prob_dist_q / (np.linalg.norm(prob_dist_q, ord=1) + 1e-12)\n norm_mean = 0.5*(norm_dist_p + norm_dist_q)\n return 0.5*(stats.entropy(norm_dist_p, norm_mean)\n + stats.entropy(norm_dist_q, norm_mean))", "def enumerate_joint_ask(X, e, P):\n Q = ProbDist(X) ## A probability distribution for X, initially empty\n Y = [v for v in P.variables if v != X and v not in e]\n for xi in P.values(X):\n Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)\n return Q.normalize()", "def _calculate_probs_and_entropies(self):\n self._calculate_probs_and_entropy_y()\n self._calculate_probs_and_entropy_x(self.cat_cols)", "def Dist(p1,p2):\n x1, y1 = p1\n x2, y2 = p2\n return (((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2)))**0.5", "def forward(self, x, y):\n distance = (x - y).norm(2, dim=-1, keepdim=True)\n z = (distance - self.mu) / self.sigma\n prob = torch.exp(-0.5 * z * z)\n return prob", "def get_probability(self, x, y, base_x, base_y):\n delta_x = x - base_x \n delta_y = y - base_y\n prob_x = self.nx_predictor.get_probability(delta_x)\n prob_y = self.ny_predictor.get_probability(delta_y)\n prob_xy = self.prob_alpha_x*prob_x*self.prob_alpha_y*prob_y\n return prob_x, prob_y, prob_xy", "def mutual_information(x, y, logfunc=np.log2, nperms=1e4):\n def entropy(freqDict):\n return -np.array([p*logFunc(p) for p in freqDict.values()]).sum()\n freqx = objhist(x)\n freqy = objhist(y)\n \n Hx = freqx.entropy()\n Hy = freqy.entropy()\n Hxy = objhist(zip(x,y)).entropy()\n M = Hx + Hy - Hxy\n Mstar = 2*M / (Hx+Hy)\n\n if len(freqx)==1 or len(freqy)==1:\n p = 1\n elif np.all([xi==yi for xi,yi in zip(x,y)]):\n p = 0\n else:\n Mperms = np.array([Hx + Hy - objhist(zip(permutation(x),y)).entropy() for i in np.arange(nperms)])\n p = (Mperms >= M).sum() / nperms\n\n return M, Mstar, p, Hx, Hy, Hxy", "def joint_likelihood_combine(x2LL, alpha1 = 0.32, alpha2 = 0.05, return_full=False):\n\n # Combine log-likelihood ratios by summing them\n x2LL_sum = np.sum([x2LL[item]['llr'] for item in x2LL], axis=0)\n\n # x-axis values (must be the same for all datasets, take the first one)\n r0_value = [x2LL[item]['val'] for item in x2LL][0]\n \n # Find minimum \n x2LL_min = np.min(x2LL_sum)\n minind = np.argmin(x2LL_sum)\n IFR = r0_value[minind]\n\n # Find first interval\n chi2 = stats.chi2.ppf(q=1-alpha1, df=1)\n ind = (np.where(x2LL_sum - x2LL_min <= chi2))[0] # Note <= not <\n CI68 = np.array([r0_value[ind[0]], r0_value[ind[-1]]])\n \n # Find second interval\n chi2 = stats.chi2.ppf(q=1-alpha2, df=1)\n ind = (np.where(x2LL_sum - x2LL_min <= chi2))[0] # Note <= not <\n CI95 = np.array([r0_value[ind[0]], r0_value[ind[-1]]])\n\n CI_val = np.array([CI95[0], CI68[0], CI68[1], CI95[1]])\n\n if not return_full:\n return IFR, CI_val\n else:\n return IFR, CI_val, x2LL_sum, r0_value", "def cross_entropy(y_prob,y):\n from numpy import log, sum\n m = y.shape[0]\n p = y_prob\n log_likelihood = -log(p[range(m),y])\n loss = sum(log_likelihood) / m\n return loss", "def dist(self, X, Y):\n raise NotImplementedError", "def add_joint(joint: str, x1: int, y1: int, x2: int, y2: int) -> str:\n return joint", "def product(self, other: \"DiscreteFactorTable\"):\n\n #Conjunction with null distribution is null table\n if (len(self.support) == 0) or (len(other.support) == 0):\n return DiscreteFactorTable([])\n\n # NOTE: can this be relaxed?\n assert type(self.support[0]) == type(other.support[0])\n\n jsupport = []\n jlogits = []\n # HACK: this should throw a warning or something if the distributions have different headers\n # i.e., dictionary keys interact in a weird way\n for si, oi in product(self.support, other.support):\n if isinstance(si, (dict, frozendict)) and isinstance(oi, (dict, frozendict)):\n if dict_match(si, oi): #not efficient if the cartesian product is large\n soi = dict_merge(si, oi)\n if soi in jsupport:\n continue\n logit = self.logit(si) + other.logit(oi)\n if logit == -np.inf:\n continue\n jsupport.append(soi)\n jlogits.append(logit)\n else:\n jsupport.append((si, oi))\n jlogits.append(self.logit(si) + other.logit(oi))\n if len(jlogits) > 0:\n logger.debug(\"Product distribution has no non-zero support\")\n return DiscreteFactorTable(support=jsupport, logits=jlogits)", "def dtw(x, y, dist):\n assert len(x)\n assert len(y)\n r, c = len(x), len(y)\n D0 = zeros((r + 1, c + 1))\n D0[0, 1:] = inf\n D0[1:, 0] = inf\n D1 = D0[1:, 1:] # view\n for i in range(r):\n for j in range(c):\n D1[i, j] = dist(x[i], y[j])\n C = D1.copy()\n for i in range(r):\n for j in range(c):\n D1[i, j] += min(D0[i, j], D0[i, j+1], D0[i+1, j])\n if len(x)==1:\n path = zeros(len(y)), range(len(y))\n elif len(y) == 1:\n path = range(len(x)), zeros(len(x))\n else:\n path = _traceback(D0)\n return D1[-1, -1] / sum(D1.shape), C, D1, path", "def join_cinfo(cooccur, percents):\n import math\n\n word1 = cooccur[0][0]\n word2 = cooccur[0][1]\n try:\n word1_percent = percents[word1]\n weight1 = 1 / word1_percent\n word2_percent = percents[word2]\n weight2 = 1 / word2_percent\n return (cooccur[0], cooccur[1], cooccur[1] *\n math.log(min(weight1, weight2)))\n except:\n return 0", "def _entropy2(labels, base=None):\n\n n_labels = len(labels)\n\n if n_labels <= 1:\n return 0\n\n value,counts = np.unique(labels, return_counts=True)\n probs = counts / n_labels\n n_classes = np.count_nonzero(probs)\n\n if n_classes <= 1:\n return 0\n\n ent = 0.\n\n # Compute entropy\n base = e if base is None else base\n for i in probs:\n ent -= i * log(i, base)\n\n # quick observation shows ent between 0.0 and 4.0.\n return ent", "def probabilities(self, x, y):\n return self.feed_and_return(x, y, self.network.a)", "def entropy(self):\r\n return 1/2 * (self.dim * (_LOG_2PI + 1) + self._log_det_cov)", "def theils_u(x,\n y,\n nan_strategy=_REPLACE,\n nan_replace_value=_DEFAULT_REPLACE_VALUE):\n\n print(x.name + ' to ' + y.name + ' with Theils U')\n\n if nan_strategy == _REPLACE:\n x, y = replace_nan_with_value(x, y, nan_replace_value)\n elif nan_strategy == _DROP:\n x, y = remove_incomplete_samples(x, y)\n\n contingency = pd.crosstab(x, y)\n c, p, dof, expected = ss.chi2_contingency(contingency)\n\n s_xy = conditional_entropy(x, y)\n x_counter = Counter(x)\n total_occurrences = sum(x_counter.values())\n p_x = list(map(lambda n: n / total_occurrences, x_counter.values()))\n s_x = ss.entropy(p_x)\n if s_x == 0:\n return 1, 0\n else:\n return (s_x - s_xy) / s_x, p, r'$U$'", "def entropyCategorical(attr, X, y):\n uniques = X[attr].unique().tolist()\n idxLists = []\n entropies = []\n weights = []\n for u in uniques:\n idxLists.append(X.index[X[attr] == u].tolist())\n entropies.append(entropy(y, idxLists[-1]))\n weights.append(len(idxLists[-1]))\n\n entropies = np.array(entropies).reshape(1, -1)\n weights = np.array(weights).reshape(-1, 1).astype(np.float32)\n weights /= np.sum(weights)\n\n return (uniques, idxLists, (entropies @ weights)[0, 0])", "def d2_bin(self, x, y):\n \n KD = KernelDensity(bandwidth=self.bandwidth,kernel=self.kernel)\n KD.fit(np.column_stack((x,y)))\n grid1 = np.linspace(np.min(x),np.max(x),self.bins)\n grid2 = np.linspace(np.min(y),np.max(y),self.bins)\n mesh = np.meshgrid(grid1,grid2)\n data = np.column_stack((mesh[0].reshape(-1,1),mesh[1].reshape(-1,1)))\n samp = KD.score_samples(data)\n samp = samp.reshape(self.bins,self.bins)\n p = np.exp(samp)/np.sum(np.exp(samp))\n\n return p" ]
[ "0.6370943", "0.63690835", "0.63631195", "0.6334478", "0.6209416", "0.6186507", "0.60127246", "0.5941165", "0.58904195", "0.5855695", "0.5840326", "0.5787013", "0.5770392", "0.57627195", "0.5750125", "0.5739848", "0.5735412", "0.56943065", "0.56919396", "0.5672802", "0.5654701", "0.5654116", "0.5653314", "0.55887705", "0.5579684", "0.55605185", "0.55579716", "0.5546054", "0.55091", "0.55074996", "0.5497917", "0.54649127", "0.54270536", "0.5422829", "0.54179776", "0.5417916", "0.5415684", "0.5415396", "0.5408408", "0.5403634", "0.53971446", "0.5395479", "0.5389612", "0.5354745", "0.5353193", "0.533573", "0.53214014", "0.53104955", "0.53034663", "0.5293938", "0.5278855", "0.52698445", "0.52678806", "0.5254893", "0.5224478", "0.5222314", "0.5217103", "0.5204719", "0.5194968", "0.5192214", "0.51861906", "0.51808023", "0.51387227", "0.51316315", "0.51276195", "0.51250756", "0.5117498", "0.5103547", "0.5097237", "0.50930595", "0.50733805", "0.5056223", "0.5049023", "0.50472176", "0.5039295", "0.50384253", "0.5034905", "0.5029633", "0.502199", "0.5016586", "0.5012152", "0.5009688", "0.5008646", "0.5006071", "0.5001007", "0.49994475", "0.49986508", "0.49882424", "0.49779013", "0.49739307", "0.49614325", "0.4954653", "0.4954478", "0.49493438", "0.4948907", "0.49487337", "0.49457723", "0.49456576", "0.49426118", "0.49263316" ]
0.7735844
0
r"""KullbackLeibler Divergence Calculates the KullbackLeibler Divergence between two discrete distributions x and y. X is considered to be an empirical discrete distribution while y is considered to be the real discrete distribution of the underlying population.
def kullback_leibler(x, y, bins, xy_probabilities=False): if xy_probabilities: # if x does not sum up to 1, raise an error if not np.isclose(sum(x),1,atol=0.0001): raise ValueError('Probabilities in vector x do not sum up to 1.') # if y does not sum up to 1, raise an error if not np.isclose(sum(y),1,atol=0.0001): raise ValueError('Probabilities in vector y do not sum up to 1.') # add a small number to all probabilities if zero occurs if x.any(0): px = x + 1e-15 py = y + 1e-15 else: px = x py = y else: # get the bins, joint bins for x and y (same_bins=True) bins = get_2D_bins(x, y, bins, same_bins=True) # calculate unconditioned histograms hist_x = np.histogram(x, bins=bins[0])[0] hist_y = np.histogram(y, bins=bins[1])[0] #calculate probabilities px = (hist_x / np.sum(hist_x)) py = (hist_y / np.sum(hist_y)) # calculate the cross entropy and unconditioned entropy of y hcross = cross_entropy(px, py, bins, xy_probabilities=True) hx = entropy(px, bins, xy_probabilities=True) return hcross - hx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kl_bern(x, y):\n x = min(max(x, eps), 1-eps)\n y = min(max(y, eps), 1-eps)\n return x*log(x/y) + (1-x)*log((1-x)/(1-y))", "def kl_divergence(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( x.flat_cpt() * np.log( x.flat_cpt() / y.flat_cpt() ) )\n\treturn distance", "def kl_divergence(x, y, thresholded=True, symmetrized=True, normalize=True):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n # assert (np.all(x.sum(1) != 0.) and np.all(y.sum(1) != 0.))\n if thresholded:\n normalize = True\n if normalize:\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n if thresholded:\n eps = np.finfo(x.dtype).eps\n x = x + eps\n y = y + eps\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n res = __kl_divergence(x, y)\n\n if symmetrized:\n res = 0.5 * res + 0.5 * __kl_divergence(y, x).transpose()\n\n return np.float64(res).reshape(res.shape)", "def kullback_leibler_divergence_loss(self, y_true=None, y_pred=None, decimal=5, **kwargs):\n y_true, y_pred, binary, representor, decimal = self.get_processed_data2(y_true, y_pred, decimal)\n y_pred = np.clip(y_pred, self.EPSILON, 1 - self.EPSILON) # Clip predicted probabilities\n if binary:\n y_true = np.clip(y_true, self.EPSILON, 1 - self.EPSILON) # Clip true labels\n res = y_true * np.log(y_true / y_pred) + (1 - y_true) * np.log((1 - y_true) / (1 - y_pred))\n res = np.mean(res)\n else:\n # Convert y_true to one-hot encoded array\n num_classes = len(np.unique(y_true))\n y_true = np.eye(num_classes)[y_true]\n y_true = np.clip(y_true, self.EPSILON, 1 - self.EPSILON) # Clip true labels\n res = np.sum(y_true * np.log(y_true / y_pred), axis=1)\n res = np.mean(res)\n return np.round(res, decimal)", "def kl_divergence(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n y_true = backend.clip(y_true, backend.epsilon(), 1)\n y_pred = backend.clip(y_pred, backend.epsilon(), 1)\n return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)", "def kl_divergence(p_dist, q_dist, n_samples_per_axis=30, n_axis=2):\r\n global COUNTER\r\n if n_axis == 2:\r\n x = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n y = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n grids = np.meshgrid(x, y)\r\n elif n_axis == 3:\r\n x = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n y = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n z = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n grids = np.meshgrid(x, y, z)\r\n elif n_axis == 1:\r\n grids = np.linspace(-1.1, 1.1, 120)\r\n print(\"Grid complete!\")\r\n if n_axis != 1:\r\n grid = np.vstack(grids).reshape((n_axis, n_samples_per_axis**n_axis)).T\r\n else:\r\n grid = grids\r\n grid = np.reshape(grid, (grid.shape[0], 1))\r\n probs_p = np.exp(p_dist.score_samples(grid))\r\n probs_q = np.exp(q_dist.score_samples(grid))\r\n print(\"prob_calc_complete\")\r\n kl = entropy(probs_p, probs_q)\r\n return kl", "def kl_divergence_from_logits(self, logits_a, logits_b):\n distribution1 = tf.contrib.distributions.Categorical(logits=logits_a)\n distribution2 = tf.contrib.distributions.Categorical(logits=logits_b)\n return tf.contrib.distributions.kl_divergence(distribution1, distribution2)", "def KL_divergence(model_1, model_2, samples):\n posterior_1 = create_posterior_object(model_1, samples)\n posterior_2 = create_posterior_object(model_2, samples)\n return posterior_1.KL(posterior_2)", "def KL_divergence(xs,ys,pdf_x=None,pdf_y=None,data_range=None):\n if data_range is None:\n data_range = list(set(xs)) + list(set(ys))\n if pdf_x is None:\n pdf_x = prob_density_func(xs,norm=True,data_range=data_range)\n if pdf_y is None:\n pdf_y = prob_density_func(ys,norm=True,data_range=data_range)\n keys = set(pdf_x.keys()+pdf_y.keys())\n PQ = []\n for k in keys:\n if k in pdf_x and k in pdf_y:\n PQ.append((pdf_x[k],pdf_y[k]))\n return np.sum([p*np.log(float(p)/float(q)) for (p,q) in PQ if q>0 and p>0])", "def _graph_fn_kl_divergence(distribution_a, distribution_b):\n if get_backend() == \"tf\":\n return tf.no_op()\n # TODO: never tested. tf throws error: NotImplementedError: No KL(distribution_a || distribution_b) registered for distribution_a type Bernoulli and distribution_b type ndarray\n #return tf.distributions.kl_divergence(\n # distribution_a=distribution_a,\n # distribution_b=distribution_b,\n # allow_nan_stats=True,\n # name=None\n #)", "def __init__(self, name='backward_kl_divergence', **kwargs):\n\n super(BackwardKLDivergence, self).__init__(name=name, **kwargs)\n\n return", "def test_divergences_to_kl2(dists, divergence):\n for dist1, dist2 in combinations(dists, 2):\n assert divergence(dist1, dist2, alpha=1) == pytest.approx(kullback_leibler_divergence(dist1, dist2))", "def get_KL_divergence(self):\n KL_loss_W = Vil.get_KL_divergence_Samples(self.mu_weight, Vil.softplus(self.rho_weight), self.weight, self.prior)\n KL_loss_b = 0\n if self.bias is not None:\n KL_loss_b = Vil.get_KL_divergence_Samples(self.mu_bias, Vil.softplus(self.rho_bias), self.bias, self.prior)\n \n KL_loss = KL_loss_W + KL_loss_b\n \n return KL_loss", "def elbo(self, x, y):\n self.forward(x, y)\n\n # prior-posterior divergence\n kl_loss = kl.kl_divergence(\n self.prior_latent_distribution, self.posterior_latent_distribution).mean()\n\n # reconstruction loss\n if not self.training:\n # resample output based on prior, not posterior\n self.forward(x)\n reconstruction_loss = self.recon_loss_fun(self.y_hat_raw, y[:, 0])\n\n # training loss\n loss = reconstruction_loss + self.beta * kl_loss\n\n # statictics about prior and posterior\n mu_prior = self.prior_latent_distribution.mean\n mu_posterior = self.posterior_latent_distribution.mean\n mu_dist = torch.norm(mu_prior - mu_posterior, dim=-1).mean()\n std_prior = self.prior_latent_distribution.stddev\n std_prior = torch.norm(std_prior - mu_posterior, dim=-1).mean()\n std_posterior = self.posterior_latent_distribution.stddev\n std_posterior = torch.norm(std_posterior - mu_posterior, dim=-1).mean()\n\n return loss, reconstruction_loss, kl_loss, mu_dist, std_prior, std_posterior", "def kl_divergence(a, b, normalize=True):\n a, b = np.array(a), np.array(b)\n\n x = np.linspace(\n min(a.min(), b.min()) - 1,\n max(a.max(), b.max()) + 1,\n 100\n )\n\n p = gaussian_kde(a)(x)\n q = gaussian_kde(b)(x)\n\n if normalize:\n p = p/np.sum(p)\n q = q/np.sum(q)\n\n return np.sum(np.where(p != 0, (p) * np.log(p / q), 0))", "def kde2D(x, y, bandwidth, xbins=100j, ybins=100j, **kwargs):\n\n # create grid of sample locations (default: 100x100)\n xx, yy = np.mgrid[x.min():x.max():xbins, \n y.min():y.max():ybins]\n\n xy_sample = np.vstack([yy.ravel(), xx.ravel()]).T\n xy_train = np.vstack([y, x]).T\n\n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n kde_skl.fit(xy_train)\n\n # score_samples() returns the log-likelihood of the samples\n z = np.exp(kde_skl.score_samples(xy_sample))\n return xx, yy, np.reshape(z, xx.shape)", "def zkl_divergence(x, y, gamma):\n return np.sum([p_i*np.log(p_i/q_i) if q_i > 0 and p_i > 0 else p_i*gamma for (p_i, q_i) in zip(x, y)])", "def find_knee(x,y):\n\n # find ranges\n if len(x) != len(y):\n raise Exception(\"bad data\")\n tot_len = len(x)\n \n \n \n # fit strait lines to both\n\n # find intercept\n knee_r = (f_top.beta[1] - f_bottom.beta[1])/(-f_top.beta[0] + f_bottom.beta[0])", "def kl_gauss(x, y, sig2=1.):\n return (x - y) ** 2 / (2 * sig2)", "def kl_div_prior_gradient(self, posterior_logits, posterior_binary_samples):\n #DVAE Eq11 - gradient of prior\n #gradient of the KLD between posterior and prior wrt to prior\n #parameters theta, i.e. generative model parameters.\n #logits to probabilities\n posterior_probs=torch.sigmoid(posterior_logits)\n positive_probs=posterior_probs.detach()\n \n #samples from posterior are labelled positive\n positive_samples=posterior_binary_samples.detach()\n\n n_split=positive_samples.size()[1]//2\n positive_samples_left,positive_samples_right=torch.split(positive_samples,split_size_or_sections=int(n_split),dim=1)\n \n #-z_left^t J z_right\n pos_first_term=torch.matmul(positive_samples_left,self.prior.get_weights())*positive_samples_right\n \n rbm_bias_left=self.prior.get_visible_bias()\n rbm_bias_right=self.prior.get_hidden_bias()\n rbm_bias=torch.cat([rbm_bias_left,rbm_bias_right])#self._h\n \n #this gives [42,400] size\n #- z^t h\n #TODO this uses positive probs. Should it not use positive samples?\n # FIXME an indication are the negative ones where samples are used! On\n #other hand this is the only place this this used\n pos_sec_term=positive_probs*rbm_bias\n # pos_sec_term=positive_samples*rbm_bias\n\n # Energy = -z_left^t J z_right - z^t h\n pos_kld_per_sample=-(torch.sum(pos_first_term,axis=1)+torch.sum(pos_sec_term,axis=1))\n #samples from rbm are labelled negative\n\n #rbm_samples Tensor(\"zeros:0\", shape=(200, 200), dtype=float32)\n #this returns the full RBM set: left and right nodes concatenated\n\n #TODO What are these samples here?\n #TODO what's the impact of doing gibbs sampling here? does this make\n #sense?\n rbm_samples=self.prior.get_samples_kld(approx_post_samples=positive_samples_left,n_gibbs_sampling_steps=1)\n negative_samples=rbm_samples.detach()\n\n # print(self.prior.get_weights())\n n_split=negative_samples.size()[1]//2\n negative_samples_left,negative_samples_right=torch.split(negative_samples,split_size_or_sections=int(n_split),dim=1)\n neg_first_term=torch.matmul(negative_samples_left,self.prior.get_weights())*negative_samples_right\n \n #FIXME see above, the positive case looks different. Why?\n neg_sec_term=negative_samples*rbm_bias\n neg_kld_per_sample=(torch.sum(neg_first_term,axis=1)+torch.sum(neg_sec_term,axis=1))\n \n kld_per_sample=pos_kld_per_sample+neg_kld_per_sample\n\n return kld_per_sample", "def kl_divergence(self, samples):\n # Check size of input\n if not len(samples.shape) == 2:\n raise ValueError('Given samples list must be n x 2.')\n if samples.shape[1] != self._n_parameters:\n raise ValueError(\n 'Given samples must have length ' + str(self._n_parameters))\n\n best_mode = np.zeros(samples.shape[0])\n for i in range(samples.shape[0]):\n a_sample = samples[i, :]\n a_log_pdf = -np.inf\n a_max_index = -1\n for j, var in enumerate(self._vars):\n a_test_log_pdf = var.logpdf(a_sample)\n if a_test_log_pdf > a_log_pdf:\n a_log_pdf = a_test_log_pdf\n a_max_index = j\n best_mode[i] = a_max_index\n\n kl = np.zeros(len(self._vars))\n for i in range(len(self._vars)):\n y = np.array(samples[best_mode == i, :], copy=True)\n # when a mode has no points use all samples\n if y.shape[0] == 0:\n y = np.array(samples, copy=True)\n m0 = np.mean(y, axis=0)\n s0 = np.cov(y.T)\n s1 = self._covs[i]\n m1 = self._modes[i]\n s1_inv = np.linalg.inv(s1)\n if len(np.atleast_1d(s0)) > 1:\n kl[i] = 0.5 * (\n np.trace(np.matmul(s1_inv, s0)) +\n np.matmul(np.matmul(m1 - m0, s1_inv), m1 - m0) -\n np.log(np.linalg.det(s0)) +\n np.log(np.linalg.det(s1)) -\n self._n_parameters)\n else:\n kl[i] = 0.5 * (\n np.sum(s1_inv * s0) +\n (m1 - m0) * s1_inv * (m1 - m0) -\n np.log(s0) +\n np.log(s1) -\n 1)\n return kl", "def kl_divergence(dist1, dist2, symmetrized=True):\n if symmetrized == True:\n kl = (\n scipy.stats.entropy(dist1, dist2) + scipy.stats.entropy(dist2, dist1)\n ) / 2.0\n return kl\n else:\n kl = scipy.stats.entropy(dist1, dist2)\n return kl", "def MyKLD(X,Y): \n mu1,mu2 = tuple(np.mean(X,axis=0))\n sigma1,sigma2 = tuple(np.std(X,axis=0))\n m1,m2 = tuple(np.mean(X,axis=0))\n s1,s2 = tuple(np.std(X,axis=0))\n rho = np.corrcoef(X,rowvar=False)[0,1]\n r = np.corrcoef(Y,rowvar=False)[0,1]\n \n return (\n ((mu1-m1)**2/s1**2 - 2*r*(mu1-m1)*(mu2-m2)/(s1*s2) + (mu2-m2)**2/s2**2) /\n (2 * (1 - r**2)) +\n ((sigma1**2-s1**2)/s1**2 - 2*r*(rho*sigma1*sigma2-r*s1*s2)/(s1*s2) + \n (sigma2**2-s2**2)/s2**2) /\n (2 * (1 - r**2)) +\n np.log((s1**2 * s2**2 * (1-r**2)) / (sigma1**2 * sigma2**2 * (1-rho**2))) / 2\n )", "def kl_divergence(self) -> Tensor:\n return torch.tensor(0.0)", "def maxkl_strategy(self):\n # TODO: rewrite to update only distribution from sampled bucket\n # Instead of computing everything again every iteration\n\n # Label model distributions\n lm_posteriors = self.bucket_probs.clip(1e-5, 1-1e-5)\n\n # Sample distributions\n # D_KL(LM distribution||Sample distribution)\n rel_entropy = np.zeros(len(lm_posteriors))\n sample_posteriors = np.zeros(lm_posteriors.shape)\n\n # Iterate over buckets\n for i in range(len(lm_posteriors)):\n # Collect points in bucket\n bucket_items = self.ground_truth_labels[np.where(self.unique_inverse == i)[0]]\n # Collect labeled points in bucket\n bucket_gt = list(bucket_items[bucket_items != -1])\n # Add initial labeled point\n if not bucket_gt:\n bucket_gt.append(\n int(np.round(\n self.probs[\"bucket_labels_train\"][0][i].clip(0, 1)\n )))\n bucket_gt = np.array(bucket_gt)\n\n # Bucket distribution, clip to avoid D_KL undefined\n eps = 1e-2 / (len(bucket_gt))\n sample_posteriors[i, 1] = bucket_gt.mean().clip(eps, 1 - eps)\n sample_posteriors[i, 0] = 1 - sample_posteriors[i, 1]\n\n # KL divergence\n rel_entropy[i] = entropy(lm_posteriors[i, :], sample_posteriors[i, :])\n self.bucket_values = rel_entropy\n\n # Select buckets with highest KL divergence\n return np.where(\n np.logical_and(\n rel_entropy == np.max(rel_entropy[self.is_valid_bucket]), self.is_valid_bucket\n )\n )[0]", "def test_divergences_to_kl(dists):\n for dist1, dist2 in combinations(dists, 2):\n assert alpha_divergence(dist1, dist2, alpha=-1) == pytest.approx(kullback_leibler_divergence(dist2, dist1))\n\n assert alpha_divergence(dist1, dist2, alpha=0) != pytest.approx(kullback_leibler_divergence(dist2, dist1))\n assert alpha_divergence(dist1, dist2, alpha=2) != pytest.approx(kullback_leibler_divergence(dist2, dist1))", "def Kernel(x, y):\n\n Result = (np.dot(x_train[x, :], x_train[y, :])+1)**5 # Polynomial\n #Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n #Gaussian\n \"\"\"\n sigma = 1\n if np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result", "def kl_poisson(x, y):\n x = max(x, eps)\n y = max(y, eps)\n return y-x+x*log(x/y)", "def test_kl_divergence(get_distributions):\n for i, dist_a in enumerate(get_distributions):\n for j, dist_b in enumerate(get_distributions):\n kl = kl_divergence(dist_a, dist_b)\n if i == j:\n assert pytest.approx(kl, 0.0001) == 0.0\n else:\n assert kl > 0", "def kl_divergence(self) -> Tensor:\n return self.variational_strategy.kl_divergence().sum(dim=1).mean()", "def kl_divergence(self):\n return self._kl_divergence_func", "def kullback_leibler_divergence(p, q):\n null = 1e-10\n return sum(p.get(key, null) * math.log(p.get(key, null) / q.get(key, null))\n for key in set(chain(p.keys(), q.keys())))", "def kl_divergence_bern_bern(z_pres_logits, prior_pres_prob, eps=1e-15):\n z_pres_probs = torch.sigmoid(z_pres_logits)\n kl = z_pres_probs * (torch.log(z_pres_probs + eps) - torch.log(prior_pres_prob + eps)) + \\\n (1 - z_pres_probs) * (torch.log(1 - z_pres_probs + eps) - torch.log(1 - prior_pres_prob + eps))\n\n return kl", "def kl_div_posterior_gradient(self, posterior_logits, posterior_binary_samples):\n #DVAE Eq12\n #gradient of the KLD between posterior and prior wrt to posterior\n #parameters phi\n \n logger.debug(\"kl_div_posterior_gradient\")\n posterior_upper_bound = 0.999*torch.ones_like(posterior_logits)\n #logits to probabilities\n posterior_probs=torch.min(posterior_upper_bound, torch.sigmoid(posterior_logits))\n \n n_split=int(posterior_binary_samples.size()[1]//2)\n #binarised samples from posterior to RBM layers\n rbm_samples_left,rbm_samples_right=torch.split(posterior_binary_samples,split_size_or_sections=n_split,dim=1)\n\n #the following prepares the variables in the calculation in tehir format\n rbm_bias_left=self.prior.get_visible_bias()\n rbm_bias_right=self.prior.get_hidden_bias()\n\n rbm_bias=torch.cat([rbm_bias_left,rbm_bias_right])#self._h\n rbm_weight=self.prior.get_weights()#self._J\n\n # this is transposed, so we multiply what we call \"right hand\" (\"hidden layer\")\n # samples with right rbm nodes\n # rbm_weight_t=torch.transpose(rbm_weight,0,1)#self._J\n \n rbm_activation_right=torch.matmul(rbm_samples_right,rbm_weight.t())\n rbm_activation_left=torch.matmul(rbm_samples_left,rbm_weight)\n\n #corresponds to samples_times_J\n rbm_activation=torch.cat([rbm_activation_right,rbm_activation_left],1)\n \n #TODO what is this scaling factor?\n #[400,400] \n hierarchy_scaling= (1.0 - posterior_binary_samples) / (1.0 - posterior_probs)\n hierarchy_scaling_left,hierarchy_scaling_right=torch.split(hierarchy_scaling, split_size_or_sections=int(n_split),dim=1)\n \n #TODO why does this happen? This seems to scale only the left side of\n #the RBM. Th right side is replaced with ones.\n hierarchy_scaling_with_ones=torch.cat([hierarchy_scaling_left,torch.ones(hierarchy_scaling_right.size())],axis=1)\n \n with torch.no_grad():\n undifferentiated_component=posterior_logits-rbm_bias-rbm_activation*hierarchy_scaling_with_ones\n undifferentiated_component=undifferentiated_component.detach()\n \n kld_per_sample = torch.sum(undifferentiated_component * posterior_probs, dim=1)\n\n return kld_per_sample", "def get_KL_divergence(self):\n KL_loss = 0\n if(self.Bayesian):\n for i in range(self.num_layers):\n KL_loss += getattr(self, 'LSTMCell%i'%(i+1)).get_KL_divergence()\n \n return KL_loss", "def kl_divergence(self, post_logits, post_samples, is_training=True):\n logger.debug(\"GumBolt::kl_divergence\")\n \n # Concatenate all hierarchy levels\n logits_q_z = torch.cat(post_logits, 1)\n post_zetas = torch.cat(post_samples, 1)\n \n # Compute cross-entropy b/w post_logits and post_samples\n cross_entropy = - self._bce_loss(logits_q_z, post_zetas)\n cross_entropy = torch.mean(torch.sum(cross_entropy, 1), 0)\n \n # Compute positive energy expval using hierarchical posterior samples\n \n # Number of hidden and visible variables on each side of the RBM\n num_var_rbm = (self.n_latent_hierarchy_lvls * self._latent_dimensions)//2\n \n # Compute positive energy contribution to the KL divergence\n post_zetas_vis, post_zetas_hid = post_zetas[:, :num_var_rbm], post_zetas[:, num_var_rbm:]\n pos_energy = self.energy_exp(post_zetas_vis, post_zetas_hid)\n \n # Compute gradient contribution of the logZ term\n rbm_visible_samples, rbm_hidden_samples = self.sampler.block_gibbs_sampling()\n rbm_vis, rbm_hid = rbm_visible_samples.detach(), rbm_hidden_samples.detach()\n neg_energy = - self.energy_exp(rbm_vis, rbm_hid)\n \n kl_loss = cross_entropy + pos_energy + neg_energy\n return kl_loss, cross_entropy, pos_energy, neg_energy", "def kl(self, other: \"Distribution\", **kwargs) -> TensorType:", "def kl(self, other: \"Distribution\", **kwargs) -> TensorType:", "def rbf_kernel(x_1, x_2, l):\n\n\tassert l > 0, \"The hyperparameter l must be > 0\"\n\tdist = euclidean_distances(x_1.reshape(-1,1), x_2.reshape(-1,1))\n\treturn np.exp(dist**2 / -(2*l**2))", "def kde_sklearn(x, x_grid, bandwidth=0.8, **kwargs):\n \n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n #kde_skl = KernelDensity()\n kde_skl.fit(x[:, np.newaxis])\n # score_samples() returns the log-likelihood of the samples\n log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])\n \n pdf = np.exp(log_pdf)\n\n fig = plt.figure(figsize=(10,10))\n ax = fig.add_subplot(111)\n #ax.imshow((1,1), cmap=plt.cm.gist_earth_r,extent=[xmin, xmax])\n ax.plot(x_grid, pdf, '.', label=\"kernel = kde_sklearn gaussian\", markersize=2)\n ax.text(700, 0.0035, \"N={0} points\".format(x.shape[0]))\n ax.legend(loc='upper left')\n ax.set_xlim([min(x), max(x)])\n ax.set_ylim(-0.001, 0.006)\n plt.show()", "def kl(self):\n weights_logvar = self.weights_logvar\n kld_weights = self.prior_stdv.log() - weights_logvar.mul(0.5) + \\\n (weights_logvar.exp() + (self.weights.pow(2) - self.prior_mean)) / (\n 2 * self.prior_stdv.pow(2)) - 0.5\n kld_bias = self.prior_bias_stdv.log() - self.bias_logvar.mul(0.5) + \\\n (self.bias_logvar.exp() + (self.bias.pow(2) - self.prior_bias_mean)) / (\n 2 * self.prior_bias_stdv.pow(2)) \\\n - 0.5\n return kld_weights.sum() + kld_bias.sum()", "def _grid_search_wl_kernel(\n k: WeisfilerLehman,\n subtree_candidates,\n train_x: list,\n train_y: torch.Tensor,\n lik: float,\n subtree_prior=None, # pylint: disable=unused-argument\n lengthscales=None,\n lengthscales_prior=None, # pylint: disable=unused-argument\n):\n # lik = 1e-6\n assert len(train_x) == len(train_y)\n best_nlml = torch.tensor(np.inf)\n best_subtree_depth = None\n best_lengthscale = None\n best_K = None\n if lengthscales is not None and k.se is not None:\n candidates = [(h_, l_) for h_ in subtree_candidates for l_ in lengthscales]\n else:\n candidates = [(h_, None) for h_ in subtree_candidates]\n\n for i in candidates:\n if k.se is not None:\n k.change_se_params({\"lengthscale\": i[1]})\n k.change_kernel_params({\"h\": i[0]})\n K = k.fit_transform(train_x, rebuild_model=True, save_gram_matrix=True)\n # self.logger.debug(K)\n K_i, logDetK = compute_pd_inverse(K, lik)\n # self.logger.debug(train_y)\n nlml = -compute_log_marginal_likelihood(K_i, logDetK, train_y)\n # self.logger.debug(f\"{i} {nlml}\")\n if nlml < best_nlml:\n best_nlml = nlml\n best_subtree_depth, best_lengthscale = i\n best_K = torch.clone(K)\n # self.logger.debug(f\"h: {best_subtree_depth} theta: {best_lengthscale}\")\n # self.logger.debug(best_subtree_depth)\n k.change_kernel_params({\"h\": best_subtree_depth})\n if k.se is not None:\n k.change_se_params({\"lengthscale\": best_lengthscale})\n k._gram = best_K # pylint: disable=protected-access", "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def kl_divergence(self):\r\n\r\n target_columns = list(self.origdst.columns[11:-3])\r\n target_columns.append(self.origdst.columns[1]) # channel\r\n target_columns.append(self.origdst.columns[2]) # program_title\r\n target_columns.append(self.origdst.columns[3]) # genre\r\n\r\n kl_dict = {}\r\n\r\n for col in target_columns:\r\n\r\n try:\r\n\r\n col_counts_orig = self.origdst[col].value_counts(normalize=True).sort_index(ascending=True)\r\n col_counts_synth = self.synthdst[col].value_counts(normalize=True).sort_index(ascending=True)\r\n\r\n kl = sum(rel_entr(col_counts_orig.tolist(), col_counts_synth.tolist()))\r\n\r\n kl_dict[col] = kl\r\n\r\n except:\r\n\r\n print('For the column ', col, ' you must generate the same unique values as the real dataset.')\r\n print('The number of unique values than you should generate for column ', col, 'is ',\r\n len(self.origdst[col].unique()))\r\n\r\n return kl_dict", "def kl_gamma(x, y, a=1):\n x = max(x, eps)\n y = max(y, eps)\n return a*(x/y - 1 - log(x/y))", "def kl_loss(y_true, y_pred):\n #y_true = tf.constant(y_true, dtype=tf.dtypes.float64)\n #y_pred = tf.constant(y_pred, dtype=tf.dtypes.float64)\n #y_true = tf.reshape(y_true, shape=[y_true.shape[0], y_true.shape[1] * y_true.shape[2]])\n #y_pred = tf.reshape(y_pred, shape=[y_pred.shape[0], y_pred.shape[1] * y_pred.shape[2]])\n loss = 0\n if y_true.shape[-1] is None:\n return 0.0\n for i in range(y_true.shape[-1]):\n hist_true = np.histogram(y_true[:,i], bins=10, density=True)\n hist_pred = np.histogram(y_pred[:,i], bins=10, density=True)\n loss += kl(hist_true[0], hist_pred[0])\n return loss", "def js_divergence(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tz = 0.5 * ( x.flat_cpt() + y.flat_cpt() )\n\tdistance = 0.5 * kl_divergence(x,z) + 0.5 * kl_divergence(y,z)\n\treturn distance", "def _kl_divergence(p, p_logits, q):\n for tensor in [p, p_logits, q]:\n if not tensor.dtype.is_floating:\n raise ValueError('Input %s must be floating type.', tensor.name)\n p.shape.assert_has_rank(2)\n p_logits.shape.assert_has_rank(2)\n q.shape.assert_has_rank(1)\n return math_ops.reduce_sum(\n p * (nn_ops.log_softmax(p_logits) - math_ops.log(q)), axis=1)", "def kl_divergence(self, params_q, params_p):\n means_q = params_q[:, :, 0]\n log_std_q = params_q[:, :, 1]\n\n means_p = params_p[:, :, 0]\n log_std_p = params_p[:, :, 1]\n\n std_q = torch.exp(log_std_q)\n std_p = torch.exp(log_std_p)\n\n kl_div = log_std_p - log_std_q + (std_q ** 2 + (means_q - means_p) ** 2) / (2.0 * std_p ** 2) - 0.5\n\n return kl_div.sum(dim=-1)", "def kl_divergence(self, logits_q, logits_p):\n return (torch.exp(logits_q) * (logits_q - logits_p)).sum(1, keepdim=True)", "def kl_neg_bin(x, y, r=1):\n return r*log((r+x)/(r+y)) - x * log(y*(r+x)/(x*(r+y)))", "def kl_divergence_symmetric(a, b, lowlim=1e-10):\n return kl_divergence(a, b, lowlim) + kl_divergence(b, a, lowlim)", "def kl_dist_smoothing(distribution1: np.array, distribution2: np.array, epsilon: float) -> float:\n # Performs smoothing\n distributions = [distribution1, distribution2]\n smoothed_distributions = []\n for distribution in distributions:\n nonzeros = np.count_nonzero(distribution)\n zeros = len(distribution) - nonzeros\n smoothed_distributions.append([epsilon if prob == 0 else prob - zeros * epsilon / nonzeros\n for prob in distribution])\n\n return sum(kl_div(smoothed_distributions[0], smoothed_distributions[1]))", "def kernel_rbf(x, y,gamma):\r\n return np.exp(- gamma * np.linalg.norm(x- y)**2)", "def _k_lin_rbf(x, hyp, y=None, diag_only=False):\n prod_rbf_lengthscale = hyp[\"prod.rbf.lengthscale\"]\n prod_rbf_variance = hyp[\"prod.rbf.variance\"]\n prod_linear_variances = hyp[\"prod.linear.variances\"]\n linear_variances = hyp[\"linear.variances\"]\n\n x_rbf = cas_reshape(x[:, 1], (-1, 1))\n y_rbf = y\n if not y is None:\n y_rbf = cas_reshape(y[:, 1], (-1, 1))\n\n k_prod_rbf = _k_rbf(x_rbf, y_rbf, prod_rbf_variance, prod_rbf_lengthscale,\n diag_only)\n\n x_prod_lin = cas_reshape(x[:, 1], (-1, 1))\n y_prod_lin = y\n if not y is None:\n y_prod_lin = cas_reshape(y[:, 1], (-1, 1))\n\n k_prod_lin = _k_lin(x_prod_lin, y_prod_lin, prod_linear_variances, diag_only)\n\n k_linear = _k_lin(x, y, linear_variances, diag_only)\n\n return k_prod_lin * k_prod_rbf + k_linear", "def kl(self, other, xs, reversesd=False, **kwargs):\n raise NotImplementedError", "def kl_divergence(id1,id2,m1,m2,cache_val_preds,val_dl,device=\"cpu\"):\n if m1 == m2:\n return 0\n \n m1_preds = F.log_softmax(get_preds(m1,val_dl,device),dim=1) if cache_val_preds[id1] == None else cache_val_preds[id1]\n m2_preds = F.log_softmax(get_preds(m2,val_dl,device),dim=1) if cache_val_preds[id2] == None else cache_val_preds[id2]\n\n cache_val_preds[id1] = m1_preds\n cache_val_preds[id2] = m2_preds\n\n print(\"PRED SHAPE:\", m1_preds.shape)\n\n kl_loss = nn.KLDivLoss(reduction=\"batchmean\",log_target=True)\n\n result = kl_loss(m1_preds, m2_preds)\n print(\"\\t\",result,\"dtype:\",result.dtype)\n\n return result", "def kl_divergence(means: Tensor, logvars: Tensor) ->Tensor:\n kl_cost = -0.5 * (logvars - means ** 2 - torch.exp(logvars) + 1.0)\n kl_cost = torch.mean(kl_cost, 0)\n return torch.sum(kl_cost)", "def kullback_leibler(p: np.ndarray, q: np.ndarray) -> float:\n kl = 0\n for pi, qi in zip(p, q):\n if pi > 0:\n if qi > 0:\n kl += pi * np.log(pi/qi)\n else:\n kl = np.inf\n return kl", "def KernelTest(x, y):\n\n Result = (np.dot(x_test[x, :], x_train[y, :])+1)**5 # Polynomial\n # Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n # Sum = DotProduct(x, y)\n #Sum = 0.0\n #for i in range(2):\n # Sum = Sum + x_train[x, i]*x_train[y, i]\n # Result = (Sum+1)**5\n \"\"\"\n #Gaussian\n sigma = 1\n if np.ndim(x_test[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_test[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_test[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_test[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_test[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_test[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result", "def klucb(x, d, div, upperbound, lowerbound=-float('inf'), precision=1e-6):\n low = max(x, lowerbound)\n up = upperbound\n while up-low > precision:\n m = (low+up)/2\n if div(x, m) > d:\n up = m\n else:\n low = m\n return (low+up)/2", "def fit(self, X: TwoDimArray, y: OneDimArray = None) -> 'KDE':\n\n self.check_params(X)\n\n if self.kde_params is None:\n kde_params = {}\n else:\n kde_params = self.kde_params\n\n self._kde = KernelDensity(**kde_params).fit(X)\n\n self.threshold_ = np.percentile(\n self.anomaly_score(), 100. * (1. - self.contamination)\n )\n\n return self", "def calculate_kld(p, q, limits, dx=0.01):\n if p.shape != q.shape:\n raise ValueError('Cannot calculate KLD between two ensembles with different shapes')\n\n # Make a grid from the limits and resolution\n grid = _calculate_grid_parameters(limits, dx)\n\n # Evaluate the functions on the grid and normalize\n pe = p.gridded(grid.grid_values)\n pn = pe[1]\n qe = q.gridded(grid.grid_values)\n qn = qe[1]\n\n # Calculate the KLD from q to p\n Dpq = array_metrics.quick_kld(pn, qn, grid.resolution)# np.dot(pn * logquotient, np.ones(len(grid)) * dx)\n\n if np.any(Dpq < 0.): #pragma: no cover\n print('broken KLD: '+str((Dpq, pn, qn, grid.resolution)))\n Dpq = epsilon*np.ones(Dpq.shape)\n return Dpq", "def kl_dirichlet(alpha, beta):\n alpha_0 = torch.sum(alpha, dim=-1, keepdim=True)\n beta_0 = torch.sum(beta, dim=-1, keepdim=True)\n t1 = torch.lgamma(alpha_0) - torch.sum(torch.lgamma(alpha), dim=-1, keepdim=True)\n t2 = torch.lgamma(beta_0) - torch.sum(torch.lgamma(beta), dim=-1, keepdim=True)\n t3 = torch.sum((alpha - beta) * (torch.digamma(alpha) - torch.digamma(alpha_0)), dim=-1, keepdim=True)\n return t1 - t2 + t3", "def kl_divergence_fn(\n args: StepFunctionArgs,\n contrast_sources: Optional[FeatureAttributionInput] = None,\n contrast_target_prefixes: Optional[FeatureAttributionInput] = None,\n contrast_targets: Optional[FeatureAttributionInput] = None,\n contrast_targets_alignments: Optional[List[List[Tuple[int, int]]]] = None,\n top_k: int = 0,\n top_p: float = 1.0,\n min_tokens_to_keep: int = 1,\n) -> SingleScorePerStepTensor:\n\n original_logits: torch.Tensor = args.attribution_model.output2logits(args.forward_output)\n contrast_output = _get_contrast_output(\n args=args,\n contrast_sources=contrast_sources,\n contrast_targets=contrast_targets,\n contrast_target_prefixes=contrast_target_prefixes,\n contrast_targets_alignments=contrast_targets_alignments,\n return_contrastive_target_ids=False,\n )\n contrast_logits: torch.Tensor = args.attribution_model.output2logits(contrast_output)\n return logits_kl_divergence(\n original_logits=original_logits,\n contrast_logits=contrast_logits,\n top_p=top_p,\n top_k=top_k,\n min_tokens_to_keep=min_tokens_to_keep,\n )", "def kernel_regression_h(x, y, silverman=False):\n xx = np.array(x)\n yy = np.array(y)\n # Check input\n assert xx.shape[0] == yy.size, (\n f'size(x, 0) != size(y): {xx.shape[0]} != {yy.size}' )\n if xx.ndim == 1: # deal with 1d-arrays\n xx = xx[:, np.newaxis]\n n = xx.shape[0]\n d = xx.shape[1]\n\n # Silverman (1986), Scott (1992), Bowman and Azzalini (1997)\n # Very similar to stats.gaussian_kde\n # h has dimension d\n h = ( (4. / float(d + 2) / float(n))**(1. / float(d + 4)) *\n np.std(xx, axis=0, ddof=1) )\n\n if not silverman:\n # Find the optimal h\n bounds = [(0.2*i, 5.0*i) for i in h]\n if n <= 100:\n res = opt.minimize(\n _cross_valid_h, h, args=(xx, yy), method='TNC', bounds=bounds,\n options={'ftol': 1e-10, 'xtol': 1e-10, 'maxfun': 1000})\n h = res.x\n else:\n res = opt.minimize(\n _boot_h, h, args=(xx, yy), method='TNC', bounds=bounds,\n options={'ftol': 1e-10, 'xtol': 1e-10, 'maxfun': 1000})\n h = res.x\n\n if len(h) == 1:\n h = h[0]\n\n return h", "def hellinger(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = ( 1 / np.sqrt( 2 ) ) * np.sqrt( np.sum( ( np.sqrt( x.flat_cpt() ) - np.sqrt( y.flat_cpt() ) )**2) )\n\treturn distance", "def kld(cls,\n y_true,\n y_pred):\n y_pred /= np.sum(y_pred)\n y_true = np.clip(y_true, cls.eps, 1)\n y_pred = np.clip(y_pred, cls.eps, 1)\n print(y_pred)\n print(y_true)\n print(y_true * np.log(y_true / y_pred))\n _kld = np.sum(y_true * np.log(y_true / y_pred), axis=-1)\n print('KLD: ' + str(_kld))\n return _kld", "def get_Kl_divergence(model1, model2, collection, lam, missing_val = 0.0001):\n smoot_m2 = {key: (1-lam)*model2.get(key, 0) + lam*collection.get(key, missing_val) for key in model1}\n\n divergence = sum([model1[key]*math.log(model1[key]/smoot_m2[key]) for key in model1])\n return divergence", "def train(self, X, y):\n m = X.shape[0]\n D = np.empty(m)\n D.fill(1/(m*1.0))\n\n hs = 0\n for t in range(self.T):\n self.h[t] = self.WL(D,X,y)\n eps = 0\n\n for j in range(m): \n if (y[j]*(1.0) != self.h[t].predict(X)[j]):\n eps+=D[j]\n\n self.w[t] =0.5*math.log((1/eps)-1)\n\n Dtmp = []\n for j in range(m):\n Dtmp.append(D[j]*math.exp(-1*self.w[t]*y[j]*(self.h[t].predict(X)[j])))\n\n Dtmp = np.array(Dtmp) \n s = np.sum(Dtmp)\n\n for i in range(m):\n D[i] = (D[i]*math.exp(-1*self.w[t]*y[i]*self.h[t].predict(X)[i]))/s", "def train_LVQ2(self, x, y):\n while self.epsilon >= 0.01:\n rnd_i = np.random.randint(0, len(x))\n rnd_s = x[rnd_i]\n target_y = y[rnd_i]\n \n self.epsilon = self.epsilon - self.epsilon_dec_factor\n \n closest_pvector = self.find_closest(rnd_s)[1]\n second_closest_pvector = self.find_runnerup(rnd_s)\n compare_distance = np.linalg.norm(closest_pvector.p_vector - rnd_s)/np.linalg.norm(second_closest_pvector.p_vector - rnd_s)\n \n if target_y == second_closest_pvector.class_id and target_y != closest_pvector.class_id and compare_distance > 0.8 and compare_distance < 1.2:\n closest_pvector.update(rnd_s, False)\n second_closest_pvector.update(rnd_s)\n elif target_y == closest_pvector.class_id:\n closest_pvector.update(rnd_s)\n elif target_y != closest_pvector.class_id:\n closest_pvector.update(rnd_s, False)\n closest_pvector.epsilon = self.epsilon\n return self.p_vectors", "def kl_divergence(a, b, lowlim=1e-10):\n mask = np.logical_and(b > lowlim, a > lowlim)\n return np.sum(a[mask] * np.log(a[mask] / b[mask]))", "def bhhh_internal(\n criterion_and_derivative,\n x,\n convergence_absolute_gradient_tolerance,\n stopping_max_iterations,\n):\n criterion_accepted, gradient = criterion_and_derivative(x)\n x_accepted = x\n\n hessian_approx = np.dot(gradient.T, gradient)\n gradient_sum = np.sum(gradient, axis=0)\n direction = np.linalg.solve(hessian_approx, gradient_sum)\n gtol = np.dot(gradient_sum, direction)\n\n initial_step_size = 1\n step_size = initial_step_size\n\n niter = 1\n while niter < stopping_max_iterations:\n niter += 1\n\n x_candidate = x_accepted + step_size * direction\n criterion_candidate, gradient = criterion_and_derivative(x_candidate)\n\n # If previous step was accepted\n if step_size == initial_step_size:\n hessian_approx = np.dot(gradient.T, gradient)\n\n else:\n criterion_candidate, gradient = criterion_and_derivative(x_candidate)\n\n # Line search\n if np.sum(criterion_candidate) > np.sum(criterion_accepted):\n step_size /= 2\n\n if step_size <= 0.01:\n # Accept step\n x_accepted = x_candidate\n criterion_accepted = criterion_candidate\n\n # Reset step size\n step_size = initial_step_size\n\n # If decrease in likelihood, calculate new direction vector\n else:\n # Accept step\n x_accepted = x_candidate\n criterion_accepted = criterion_candidate\n\n gradient_sum = np.sum(gradient, axis=0)\n direction = np.linalg.solve(hessian_approx, gradient_sum)\n gtol = np.dot(gradient_sum, direction)\n\n if gtol < 0:\n hessian_approx = np.dot(gradient.T, gradient)\n direction = np.linalg.solve(hessian_approx, gradient_sum)\n\n # Reset stepsize\n step_size = initial_step_size\n\n if gtol < convergence_absolute_gradient_tolerance:\n break\n\n result_dict = {\n \"solution_x\": x_accepted,\n \"solution_criterion\": criterion_accepted,\n \"n_iterations\": niter,\n \"message\": \"Under develpment\",\n }\n\n return result_dict", "def my_kernel(X, Y):\n S = 0.84 # parameter from rhos\n\n if dset == 1:\n gamma = 0.0005\n else:\n gamma = 0.00087 # maximise variance of kernel matrix\n if np.array_equal(X, Y):\n N = X.shape[0]\n M = (1 - S) * np.ones((N, N)) + S * np.eye(N)\n else:\n M = 1\n\n pairwise_sq_dists = cdist(X, Y, 'sqeuclidean')\n K = exp(-gamma * pairwise_sq_dists) * M\n return K", "def _kernel(self, x1, x2, beta=1):\n d = (x1 - x2)**2\n return np.exp(-beta * d)", "def test_KL_divergence(generator, noise_dim, code_dim, validation_kernels, val_codes, training_noise_dict, n_samples_per_axis=40, n_axis=1):\r\n total_KL = 0\r\n for code in validation_kernels:\r\n print(code)\r\n fakes = generate_fake_samples_constant_code(generator, code_dim, noise_dim, code, n_samples_per_axis**n_axis, val_codes, training_noise_dict)\r\n print(\"Fake generation complete\")\r\n fakes_kernel = KernelDensity(kernel='gaussian', bandwidth=KD_BANDWIDTH).fit(fakes)\r\n print(\"Kernel complete\")\r\n total_KL += kl_divergence(validation_kernels[code], fakes_kernel, n_samples_per_axis, n_axis)\r\n return total_KL", "def LD(x, y, lodict={}):\n return needleman_wunsch(x, y, lodict=lodict, gop=-1, gep=-1)", "def calculate_convergence(v1, v2):\r\n\r\n return norm(v2 - v1, ord=1)", "def compute_kl(self, df):\n value_counts = [df[col].value_counts() for col in self.hist_cols]\n next_hists = self.value_counts_to_hists(value_counts)\n\n if self.prev_hists is None:\n self.prev_hists = next_hists\n return None\n\n output = []\n for prev_h, curr_h in zip(self.prev_hists, next_hists):\n for i in range(len(prev_h)):\n prev_h[i] = prev_h[i] if prev_h[i] != 0 else 1\n curr_h[i] = curr_h[i] if curr_h[i] != 0 else 1\n kl = entropy(prev_h, curr_h)\n output.append(kl)\n\n self.prev_hists = next_hists\n return output", "def d_bernoulli_kullback_leibler_dq(p: float, q: float) -> float:\n return (1 - p) / (1 - q) - p/q", "def add_kl_loss(self, posterior_dist, prior_dist):\n if self.kl_use_exact:\n self.add_loss(\n kl_lib.kl_divergence(\n posterior_dist, prior_dist\n ) * self.kl_weight * self.kl_anneal\n )\n else:\n self.add_loss(\n self._kl_approximation(\n posterior_dist, prior_dist\n ) * self.kl_weight * self.kl_anneal\n )", "def kl(mu1, mu2):\n return (mu2-mu1)**2/2", "def kl(mu1, mu2):\n return (mu2-mu1)**2/2", "def JS_divergence(xs,ys,pdf_x=None,pdf_y=None,data_range=None):\n if data_range is None:\n data_range = list(set(xs)) + list(set(ys))\n if pdf_x is None:\n pdf_x = prob_density_func(xs,norm=True,data_range=data_range)\n if pdf_y is None:\n pdf_y = prob_density_func(ys,norm=True,data_range=data_range)\n M = {}\n for i in pdf_x:\n if i not in pdf_y:\n pdf_y[i] = 0.0\n for i in pdf_y:\n if i not in pdf_x:\n pdf_y[i] = 0.0\n for i in pdf_x:\n M[i] = .5*(pdf_x[i] + pdf_y[i])\n return .5*KL_divergence(None,\n None,\n pdf_x=pdf_x,\n pdf_y=M,\n data_range=data_range) + \\\n .5*KL_divergence(None,\n None,\n pdf_x=pdf_y,\n pdf_y=M,\n data_range=data_range)", "def lkendalltau(x,y):\r\n n1 = 0\r\n n2 = 0\r\n iss = 0\r\n for j in range(len(x)-1):\r\n for k in range(j,len(y)):\r\n a1 = x[j] - x[k]\r\n a2 = y[j] - y[k]\r\n aa = a1 * a2\r\n if (aa): # neither list has a tie\r\n n1 = n1 + 1\r\n n2 = n2 + 1\r\n if aa > 0:\r\n iss = iss + 1\r\n else:\r\n iss = iss -1\r\n else:\r\n if (a1):\r\n n1 = n1 + 1\r\n else:\r\n n2 = n2 + 1\r\n tau = iss / math.sqrt(n1*n2)\r\n svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))\r\n z = tau / math.sqrt(svar)\r\n prob = erfcc(abs(z)/1.4142136)\r\n return tau, prob", "def compute_kl(mu1, mu2, sigma1, sigma2):\n k = len(mu1)\n try:\n term1 = np.trace(np.matmul(np.linalg.inv(sigma2), sigma1))\n term2 = np.matmul(np.matmul((mu2 - mu1).T,\n np.linalg.inv(sigma2)),\n (mu2 - mu1))\n det_sigma1 = compute_determinant(sigma1)\n det_sigma2 = compute_determinant(sigma2)\n # term3 = np.log(np.linalg.det(sigma2)/np.linalg.det(sigma1))\n term3 = np.log(det_sigma2)\n term4 = np.log(det_sigma1)\n kl = (term1 + term2 - k + term3 - term4)/2.\n return kl\n except np.linalg.LinAlgError:\n return np.nan", "def kl(mu1, mu2):\n return (mu2 - mu1) ** 2 / 2", "def d2_bin(self, x, y):\n \n KD = KernelDensity(bandwidth=self.bandwidth,kernel=self.kernel)\n KD.fit(np.column_stack((x,y)))\n grid1 = np.linspace(np.min(x),np.max(x),self.bins)\n grid2 = np.linspace(np.min(y),np.max(y),self.bins)\n mesh = np.meshgrid(grid1,grid2)\n data = np.column_stack((mesh[0].reshape(-1,1),mesh[1].reshape(-1,1)))\n samp = KD.score_samples(data)\n samp = samp.reshape(self.bins,self.bins)\n p = np.exp(samp)/np.sum(np.exp(samp))\n\n return p", "def lpointbiserialr(x,y):\r\n TINY = 1e-30\r\n if len(x) <> len(y):\r\n raise ValueError, 'INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.'\r\n data = pstats.abut(x,y)\r\n categories = pstats.unique(x)\r\n if len(categories) <> 2:\r\n raise ValueError, \"Exactly 2 categories required for pointbiserialr().\"\r\n else: # there are 2 categories, continue\r\n codemap = pstats.abut(categories,range(2))\r\n recoded = pstats.recode(data,codemap,0)\r\n x = pstats.linexand(data,0,categories[0])\r\n y = pstats.linexand(data,0,categories[1])\r\n xmean = mean(pstats.colex(x,1))\r\n ymean = mean(pstats.colex(y,1))\r\n n = len(data)\r\n adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))\r\n rpb = (ymean - xmean)/samplestdev(pstats.colex(data,1))*adjust\r\n df = n-2\r\n t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))\r\n prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float\r\n return rpb, prob", "def klucb_bern(x, d, precision=1e-6):\n upperbound = min(1., klucb_gauss(x, d))\n return klucb(x, d, kl_bern, upperbound, precision)", "def bernoulli_kullback_leibler(p: float, q: float) -> float:\n kl1, kl2 = 0, np.infty\n if p > 0:\n if q > 0:\n kl1 = p*np.log(p/q)\n\n if q < 1:\n if p < 1:\n kl2 = (1 - p) * np.log((1 - p) / (1 - q))\n else:\n kl2 = 0\n return kl1 + kl2", "def KL_divergence(value_counts1, value_counts2):\n divergence = 0\n s1 = sum([value_counts1[value] for value in value_counts1])\n s2 = sum([value_counts2[value] for value in value_counts2])\n for value in set(value_counts1).union(value_counts2):\n assert(value in value_counts1 or value in value_counts2)\n if value not in value_counts1:\n s1 += KL_SMOOTHING\n if value not in value_counts2:\n s2 += KL_SMOOTHING\n for value in set(value_counts1).union(value_counts2):\n v1 = v2 = KL_SMOOTHING\n if value in value_counts1:\n v1 = value_counts1[value]\n if value in value_counts2:\n v2 = value_counts2[value]\n v1 = float(v1) / s1\n v2 = float(v2) / s2\n divergence += v1 * math.log(v1 / v2)\n if divergence > math.e:\n divergence = math.e\n return divergence", "def k(xs, ys, sigma=1, l=1):\n\n # Pairwise difference matrix.\n dx = np.expand_dims(xs, 1) - np.expand_dims(ys, 0)\n return (sigma ** 2) * np.exp(-((dx / l) ** 2) / 2)", "def distorted_data(X_train,y_train,X_test,y_test,dialation=10.):\n\n t,l1 = regularized_grad_descent(X_train,y_train,lambda_reg=1e-6)\n\n X_train[:,0] *= dialation\n X_test[:,0] *= dialation\n \n t,l2 = regularized_grad_descent(X_train,y_train,lambda_reg=1e-6)\n\n plt.plot(np.log(l1),'b--')\n plt.plot(np.log(l2),'r--')\n plt.show()\n plt.close()", "def KL(P,Q):\n epsilon = 0.00001\n \n #You may want to instead make copies to avoid changing the np arrays.\n P = P+epsilon\n Q = Q+epsilon\n \n divergence = np.sum(P*np.log(P/Q))\n return divergence", "def kl(\n self,\n x: Tensor,\n covariates: Tensor,\n use_temp: bool,\n ) -> Tuple[Tensor, Tensor]:\n log_probs, ldj_sum, _ = self.compute_probabilities(x, covariates, use_temp)\n\n return -(torch.logsumexp(log_probs, dim=1) + ldj_sum).mean()", "def kl(self, old_dist_info, new_dist_info):\n old_prob = old_dist_info[\"prob\"]\n new_prob = new_dist_info[\"prob\"]\n return np.sum(\n old_prob * (np.log(old_prob + TINY) - np.log(new_prob + TINY)),\n axis=2\n )", "def kde_2d_multiple_times(plume_x, plume_y, X, Y):\n Z = []\n positions = np.vstack([X.ravel(), Y.ravel()])\n for i in range(len(plume_x)):\n values = np.vstack([plume_x[i], plume_y[i]])\n kernel = stats.gaussian_kde(values)\n Z.append(np.reshape(kernel(positions).T, X.shape))\n return Z", "def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs):\n # Note that scipy weights its bandwidth by the covariance of the\n # input data. To make the results comparable to the other methods,\n # we divide the bandwidth by the sample standard deviation here.\n #kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)\n kde = gaussian_kde(x)\n return kde.evaluate(x_grid)", "def hellinger_distance(x, y):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n assert (np.all(x.sum(1) != 0.) and np.all(y.sum(1) != 0.))\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n x = np.sqrt(x)\n y = np.sqrt(y)\n # x (120, 40), y (100, 40), H(x,y) (120, 100)\n xx = np.tile(x, (y.shape[0], 1, 1)).transpose((1, 0, 2))\n yy = np.tile(y, (x.shape[0], 1, 1))\n xx_yy = xx - yy\n res = np.sqrt(np.sum(xx_yy ** 2, axis=-1))\n return np.float64((1. / np.sqrt(2)) * res)" ]
[ "0.70139825", "0.6948134", "0.6635383", "0.6421419", "0.63527554", "0.6301051", "0.62800944", "0.62187314", "0.6194453", "0.6156323", "0.6144021", "0.61299276", "0.61106426", "0.6105154", "0.6101856", "0.6100986", "0.6093369", "0.6083329", "0.60363257", "0.6034415", "0.6016642", "0.6007839", "0.6007176", "0.59686935", "0.5943885", "0.59380877", "0.5933738", "0.59239966", "0.58543617", "0.58409435", "0.57862157", "0.5784366", "0.57784563", "0.577789", "0.5705405", "0.56565136", "0.5646857", "0.5646857", "0.563422", "0.56067544", "0.5599206", "0.55910003", "0.55864877", "0.55722225", "0.5553416", "0.55391544", "0.55368483", "0.551811", "0.55085313", "0.55073214", "0.5505932", "0.5498376", "0.5488649", "0.5481236", "0.54645663", "0.54550767", "0.54545015", "0.54290396", "0.5420997", "0.5420184", "0.5410348", "0.5398924", "0.53917265", "0.53898317", "0.5379334", "0.5372688", "0.53723264", "0.53687805", "0.5364838", "0.53628284", "0.53588635", "0.535875", "0.53586864", "0.53429836", "0.5340839", "0.5339657", "0.5337506", "0.5334773", "0.5331853", "0.53246516", "0.53225935", "0.53181326", "0.53181326", "0.531505", "0.5303839", "0.5295617", "0.52868265", "0.52845466", "0.5266413", "0.52574503", "0.52497154", "0.52229965", "0.5222858", "0.5222764", "0.51995504", "0.51986456", "0.5198613", "0.5196632", "0.51900464", "0.5185609" ]
0.7132836
0
r"""JensenShannon Divergence Calculates the JensenShannon Divergence (JSD) between two discrete distributions x and y. JSD quantifies the difference (or similarity) between two probability distributions and uses the KL divergence to calculate a smoothed normalized score [0, 1] that is symmetrical.
def jensen_shannon(x, y, bins, calc_distance=False, xy_probabilities=False): # assert array length assert len(x) == len(y) if xy_probabilities: # if x does not sum up to 1, raise an error if not np.isclose(sum(x), 1 ,atol=0.0001): raise ValueError('Probabilities in vector x do not sum up to 1.') # if y does not sum up to 1, raise an error if not np.isclose(sum(y), 1, atol=0.0001): raise ValueError('Probabilities in vector y do not sum up to 1.') # add a small number to all probabilities if zero occurs if x.any(0): px = x + 1e-15 py = y + 1e-15 else: px = x py = y else: # get the bins, joint bins for x and y (same_bins=True) bins = get_2D_bins(x, y, bins, same_bins=True) # calculate unconditioned histograms hist_x = np.histogram(x, bins=bins[0])[0] hist_y = np.histogram(y, bins=bins[1])[0] # calculate probabilities px = (hist_x / np.sum(hist_x)) + 1e-15 py = (hist_y / np.sum(hist_y)) + 1e-15 # calculate m pm = 0.5 * (px + py) # calculate kullback-leibler divergence between px and pm & py and pm kl_xm = kullback_leibler(px, pm, bins=bins, xy_probabilities=True) kl_ym = kullback_leibler(py, pm, bins=bins, xy_probabilities=True) if calc_distance: return (0.5 * kl_xm + 0.5 * kl_ym)**0.5 else: return (0.5 * kl_xm + 0.5 * kl_ym)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_js_divergence(df_1, df_2, n_bins=30):\n a = np.concatenate((df_1, df_2), axis=0)\n e, p = prob_mass_fun(df_1, n = n_bins, range = (a.min(), a.max()))\n _, q = prob_mass_fun(df_2, n = e, range = (a.min(), a.max()))\n\n return scipy.spatial.distance.jensenshannon(p, q)", "def js_divergence(dist1, dist2):\n mean_dist = (dist1 + dist2) / 2.0\n js = (\n scipy.stats.entropy(dist1, mean_dist) + scipy.stats.entropy(dist2, mean_dist)\n ) / 2.0\n return js", "def js_divergence(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tz = 0.5 * ( x.flat_cpt() + y.flat_cpt() )\n\tdistance = 0.5 * kl_divergence(x,z) + 0.5 * kl_divergence(y,z)\n\treturn distance", "def JS_divergence(xs,ys,pdf_x=None,pdf_y=None,data_range=None):\n if data_range is None:\n data_range = list(set(xs)) + list(set(ys))\n if pdf_x is None:\n pdf_x = prob_density_func(xs,norm=True,data_range=data_range)\n if pdf_y is None:\n pdf_y = prob_density_func(ys,norm=True,data_range=data_range)\n M = {}\n for i in pdf_x:\n if i not in pdf_y:\n pdf_y[i] = 0.0\n for i in pdf_y:\n if i not in pdf_x:\n pdf_y[i] = 0.0\n for i in pdf_x:\n M[i] = .5*(pdf_x[i] + pdf_y[i])\n return .5*KL_divergence(None,\n None,\n pdf_x=pdf_x,\n pdf_y=M,\n data_range=data_range) + \\\n .5*KL_divergence(None,\n None,\n pdf_x=pdf_y,\n pdf_y=M,\n data_range=data_range)", "def kl_divergence(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( x.flat_cpt() * np.log( x.flat_cpt() / y.flat_cpt() ) )\n\treturn distance", "def _hess_j(C_j, I_j, b_j, b_j_norm, a_1_j, a_2_j):\n D_j = torch.ger(b_j, b_j)\n return C_j + (a_1_j / b_j_norm) * (I_j - D_j / (b_j_norm ** 2)) + a_2_j * I_j", "def js_divergence(x, y, normalize=True):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n assert (np.all(x.sum(1) != 0.) and np.all(y.sum(1) != 0.))\n\n if normalize:\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n xx = np.tile(x, (y.shape[0], 1, 1)).transpose((1, 0, 2))\n yy = np.tile(y, (x.shape[0], 1, 1))\n m = (xx + yy) / 2\n x_m = np.sum(xx * np.log(m), axis=2)\n y_m = np.sum(yy * np.log(m), axis=2)\n x_x = np.tile(\n np.sum(x * np.log(x), axis=1).reshape(x.shape[0], 1), (1, y.shape[0]))\n y_y = np.tile(np.sum(\n y * np.log(y), axis=1).reshape(y.shape[0], 1),\n (1, x.shape[0])).transpose()\n res = 0.5 * (x_x - x_m) + 0.5 * (y_y - y_m)\n return np.float64(res)\n # division by zero", "def JSDivergence(p_output, q_output, get_softmax=True, dim=1):\n if get_softmax:\n p_output = F.softmax(p_output, dim=dim)\n # q_output = F.softmax(q_output, dim=dim)\n log_mean_output = ((p_output + q_output )/2).log()\n return (\n F.kl_div(log_mean_output, p_output, reduction='batchmean') + \n F.kl_div(log_mean_output, q_output, reduction='batchmean')\n ) / 2", "def plot_pairwise_jsd(img_dir, mask_dir, outfn='pairwisejsd.png', nbins=200, fit_chi2=True):\n pairwise_jsd = quality.pairwise_jsd(img_dir, mask_dir, nbins=nbins)\n _, ax = plt.subplots(1, 1)\n ax.hist(pairwise_jsd, label='Hist.', density=True)\n if fit_chi2:\n from scipy.stats import chi2\n df, _, scale = chi2.fit(pairwise_jsd, floc=0)\n logger.info(f'df = {df:0.3e}, scale = {scale:0.3e}')\n x = np.linspace(0, np.max(pairwise_jsd), 200)\n ax.plot(x, chi2.pdf(x, df, scale=scale), lw=3, label=r'$\\chi^2$ Fit')\n ax.legend()\n textstr = r'$df = $' + f'{df:0.2f}'\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n ax.text(0.72, 0.80, textstr, transform=ax.transAxes,\n verticalalignment='top', bbox=props)\n ax.set_xlabel(r'Jensen-Shannon Divergence')\n ax.set_ylabel('Density')\n ax.set_title(\n r'Density of Pairwise JSD — $\\mu$ = ' + f'{np.mean(pairwise_jsd):.2e}' + r' $\\sigma$ = ' + f'{np.std(pairwise_jsd):.2e}',\n pad=20)\n ax.ticklabel_format(style='sci', axis='both', scilimits=(0, 0))\n if outfn is not None:\n plt.savefig(outfn, transparent=True, dpi=200)\n return ax", "def SSD(x,y):\n return np.sum((x-y)**2)", "def _jsd(prob_dist_p, prob_dist_q):\n prob_dist_p = check_numpy_param('prob_dist_p', prob_dist_p)\n prob_dist_q = check_numpy_param('prob_dist_q', prob_dist_q)\n norm_dist_p = prob_dist_p / (np.linalg.norm(prob_dist_p, ord=1) + 1e-12)\n norm_dist_q = prob_dist_q / (np.linalg.norm(prob_dist_q, ord=1) + 1e-12)\n norm_mean = 0.5*(norm_dist_p + norm_dist_q)\n return 0.5*(stats.entropy(norm_dist_p, norm_mean)\n + stats.entropy(norm_dist_q, norm_mean))", "def dJ(theta, x_b, y):\n return x_b.T.dot(self._sigmoid(x_b.dot(theta)) - y) / len(x_b)", "def KolmogorovSmirnoff_statistics(dd1, dd2):\n cum1 = dd1.cumulative_distribution()\n cum2 = dd2.cumulative_distribution()\n minimum = max(cum1[0][0], cum2[0][0])\n maximum = max(cum1[-1][0], cum2[-1][0])\n index1 = len(cum1) - 1\n index2 = len(cum2) - 1\n summa1 = summa2 = 0\n\n difference = 0\n for i in reversed(range(minimum, maximum+1)):\n if cum1[index1][0] == i:\n summa1 = cum1[index1][1]\n index1 -= 1\n if cum2[index2][0] == i:\n summa2 = cum2[index2][1]\n index2 -= 1\n if abs(summa1 - summa2) > difference:\n difference = abs(summa1 - summa2)\n return difference", "def js_div(d1, d2):\n part1 = - soft_relu(-d1).mean()\n part2 = - soft_relu(d2).mean() \n return part1 + part2 + np.log(4.0)", "def jssim_dist(G1, G2, nodes=None):\n if nodes is None:\n nodes = G1.nodes() | G2.nodes()\n\n sims = []\n for n in nodes:\n set1, set2 = set(G1.neighbors(n)), set(G2.neighbors(n))\n neighbors = list(set1 | set2)\n p1 = np.array([1./len(set1) if _n in set1 else 0 for _n in neighbors])\n p2 = np.array([1./len(set2) if _n in set2 else 0 for _n in neighbors])\n sims.append(1 - jsdiv(p1, p2))\n \n return sims", "def js_metric(df_1, df_2, numerical_columns, categorical_columns):\n\n res = {}\n STEPS = 100\n\n for col in categorical_columns:\n # to ensure similar order, concat before computing probability\n col_baseline = df_1[col].to_frame()\n col_sample = df_2[col].to_frame()\n col_baseline[\"source\"] = \"baseline\"\n col_sample[\"source\"] = \"sample\"\n\n col_ = pd.concat([col_baseline, col_sample], ignore_index=True)\n\n # aggregate and convert to probability array\n arr = (\n col_.groupby([col, \"source\"])\n .size()\n .to_frame()\n .reset_index()\n .pivot(index=col, columns=\"source\")\n .droplevel(0, axis=1)\n )\n arr_ = arr.div(arr.sum(axis=0), axis=1)\n arr_.fillna(0, inplace=True)\n\n # calculate js distance\n js_distance = jensenshannon(\n arr_[\"baseline\"].to_numpy(), arr_[\"sample\"].to_numpy()\n )\n\n res.update({col: js_distance})\n\n for col in numerical_columns:\n # fit gaussian_kde\n col_baseline = df_1[col]\n col_sample = df_2[col]\n kde_baseline = gaussian_kde(col_baseline)\n kde_sample = gaussian_kde(col_sample)\n\n # get range of values\n min_ = min(col_baseline.min(), col_sample.min())\n max_ = max(col_baseline.max(), col_sample.max())\n range_ = np.linspace(start=min_, stop=max_, num=STEPS)\n\n # sample range from KDE\n arr_baseline_ = kde_baseline(range_)\n arr_sample_ = kde_sample(range_)\n\n arr_baseline = arr_baseline_ / np.sum(arr_baseline_)\n arr_sample = arr_sample_ / np.sum(arr_sample_)\n\n # calculate js distance\n js_distance = jensenshannon(arr_baseline, arr_sample)\n\n res.update({col: js_distance})\n\n list_output = sorted(res.items(), key=lambda x: x[1], reverse=True)\n dict_output = dict(list_output)\n\n return dict_output", "def kl_divergence(dist1, dist2, symmetrized=True):\n if symmetrized == True:\n kl = (\n scipy.stats.entropy(dist1, dist2) + scipy.stats.entropy(dist2, dist1)\n ) / 2.0\n return kl\n else:\n kl = scipy.stats.entropy(dist1, dist2)\n return kl", "def JS_divergence(p1, p2, weights, mode='normalized'):\n p1 = np.maximum(p1, 10**-10)\n p2 = np.maximum(p2, 10**-10)\n M = 0.5 * (p1 + p2)\n if mode == 'normalized':\n return 0.5 * (np.sum(weights * p1 * np.log(p1 / M)) + np.sum(weights * p2 * np.log(p2 / M)))\n else:\n I1 = 1 - np.sum(weights * p1)\n I2 = 1 - np.sum(weights * p2)\n return 0.5 * (np.sum(weights * p1 * np.log(p1 / M)) + np.sum(weights * p2 * np.log(p2 / M)) + I1 * np.log(2 * I1 / (I1 + I2)) + I2 * np.log(2 * I2 / (I1 + I2)))", "def compute_dists(x, y):\r\n \r\n return (x - y.permute(0, 2, 1)) ** 2", "def kl_dist_smoothing(distribution1: np.array, distribution2: np.array, epsilon: float) -> float:\n # Performs smoothing\n distributions = [distribution1, distribution2]\n smoothed_distributions = []\n for distribution in distributions:\n nonzeros = np.count_nonzero(distribution)\n zeros = len(distribution) - nonzeros\n smoothed_distributions.append([epsilon if prob == 0 else prob - zeros * epsilon / nonzeros\n for prob in distribution])\n\n return sum(kl_div(smoothed_distributions[0], smoothed_distributions[1]))", "def KL_divergence(xs,ys,pdf_x=None,pdf_y=None,data_range=None):\n if data_range is None:\n data_range = list(set(xs)) + list(set(ys))\n if pdf_x is None:\n pdf_x = prob_density_func(xs,norm=True,data_range=data_range)\n if pdf_y is None:\n pdf_y = prob_density_func(ys,norm=True,data_range=data_range)\n keys = set(pdf_x.keys()+pdf_y.keys())\n PQ = []\n for k in keys:\n if k in pdf_x and k in pdf_y:\n PQ.append((pdf_x[k],pdf_y[k]))\n return np.sum([p*np.log(float(p)/float(q)) for (p,q) in PQ if q>0 and p>0])", "def compute_grassman_distance(Y1, Y2):\n Q1, _ = jnp.linalg.qr(Y1)\n Q2, _ = jnp.linalg.qr(Y2)\n\n _, sigma, _ = jnp.linalg.svd(Q1.T @ Q2)\n sigma = jnp.round(sigma, decimals=6)\n return jnp.linalg.norm(jnp.arccos(sigma))", "def zkl_divergence(x, y, gamma):\n return np.sum([p_i*np.log(p_i/q_i) if q_i > 0 and p_i > 0 else p_i*gamma for (p_i, q_i) in zip(x, y)])", "def jeffreys(self, sigma):\n return 1. / sigma", "def J_pointlike(self, dist):\n jfact = self.int_over_rho_sqr(self.max_radius) / dist**2.\n return np.log10(jfact)", "def distributions_EMD(d1, d2):\n return ss.wasserstein_distance(d1.get_probs(), d2.get_probs()) / len(d1.get_probs())", "def jaccard_dist(X, Y):\n return 1 - jaccard_sim(X, Y)", "def dynamax_jaccard(x, y):\n # feature generation\n u = np.vstack((x, y))\n m_x = fuzzify(x, u)\n m_y = fuzzify(y, u)\n # fuzzy jaccard\n m_inter = np.sum(np.minimum(m_x, m_y))\n m_union = np.sum(np.maximum(m_x, m_y))\n return m_inter / m_union", "def compute_jensen_shannon_divergence(\n input_file: str, output_file: str, config: Configuration\n) -> str:\n sanitized_input_file = input_file + \".sanitized\"\n _sanitize_jensen_shannon_divergence_input(input_file, sanitized_input_file)\n cmd = \"cd {} && python2 score_conservation.py {} > {}\".format(\n JENSE_SHANNON_DIVERGANCE_DIR,\n os.path.abspath(sanitized_input_file),\n os.path.abspath(output_file),\n )\n logging.info(\"Executing Jense Shannon Divergence script ...\")\n logging.debug(\"Executing command:\\n%s\", cmd)\n config.execute_command(cmd)\n return output_file", "def jaccard_distance_loss(y_true, y_pred, smooth=1e-5):\n intersection = K.sum(K.abs(y_true * y_pred), axis=-1)\n sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)\n jac = (intersection + smooth) / (sum_ - intersection + smooth)\n return (1 - jac) * smooth", "def Jdiff(self, x1, x2, firstsecond='both'):\n assert (firstsecond in ['first', 'second', 'both'])\n if firstsecond == 'both':\n return [self.Jdiff(x1, x2, 'first'), self.Jdiff(x1, x2, 'second')]\n dx = np.zeros(self.ndx)\n h = self.disturbance\n J = np.zeros([self.ndx, self.ndx])\n d0 = self.diff(x1, x2)\n if firstsecond == 'first':\n for k in range(self.ndx):\n dx[k] = h\n J[:, k] = self.diff(self.integrate(x1, dx), x2) - d0\n dx[k] = 0\n elif firstsecond == 'second':\n for k in range(self.ndx):\n dx[k] = h\n J[:, k] = self.diff(x1, self.integrate(x2, dx)) - d0\n dx[k] = 0\n J /= h\n return J", "def kl_divergence(x, y, thresholded=True, symmetrized=True, normalize=True):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n # assert (np.all(x.sum(1) != 0.) and np.all(y.sum(1) != 0.))\n if thresholded:\n normalize = True\n if normalize:\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n if thresholded:\n eps = np.finfo(x.dtype).eps\n x = x + eps\n y = y + eps\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n res = __kl_divergence(x, y)\n\n if symmetrized:\n res = 0.5 * res + 0.5 * __kl_divergence(y, x).transpose()\n\n return np.float64(res).reshape(res.shape)", "def jaccard_distance_loss(y_true, y_pred, smooth=100):\n\n epsilon = tf.convert_to_tensor(1e-7, dtype='float32')\n intersection = K.sum(K.abs(y_true * y_pred), axis=-1)\n sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)\n jac = (intersection + smooth) / (sum_ - intersection + smooth)\n return (1 - jac)", "def jaccard_distance_loss(y_true, y_pred, smooth=10):\n intersection = K.sum(K.abs(y_true * y_pred), axis=-1)\n sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)\n jac = (intersection + smooth) / (sum_ - intersection + smooth)\n return (1 - jac) * smooth", "def js_beta(d1, d2, beta):\n part1 = - soft_relu(-d1).mean()\n part2 = (beta + 2.0 - (- soft_relu(-d2)).exp()).log().mean()\n return part1 + part2 - np.log(1.0 + beta)", "def kl_divergence(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n y_true = backend.clip(y_true, backend.epsilon(), 1)\n y_pred = backend.clip(y_pred, backend.epsilon(), 1)\n return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)", "def func_ludwigson(eps,k1,n1,k2,n2,):\n return k1*eps**n1+np.exp(k2+n2*eps)", "def J(W1, b1, W2, b2, x, y):\n yhat = forwardPropagate(W1, b1, W2, b2, x) # OLD: yhat = softmax(x.dot(w))\n return crossEntropy(y, yhat)", "def compute_similarity(x, y, metric='kl_divergence'):\n from scipy.stats import entropy, pearsonr\n # remove zeros slightly increase divergence\n x = x[x != 0]\n y = y[y != 0]\n # outer join two distributions\n eps = min(x.min(), y.min()) / 10\n xy = pd.concat([x, y], axis=1).add(eps, fill_value=0)\n x = xy.iloc[:, 0]\n y = xy.iloc[:, 1]\n if metric == 'pearson':\n score, _ = pearsonr(x, y)\n else:\n score = entropy(x, y)\n return score", "def d_mse(x, y):\n\n return 2 * (x - y) / x.size(0) / x.size(1)", "def wasserstein_distance_1d(pers_diag_1, pers_diag_2) -> float:\n wasserstein_distance = d.wasserstein_distance(pers_diag_1[1], pers_diag_2[1],\n q=1, delta=0.2)\n return wasserstein_distance", "def dist(x, y):\n dx = x[0] - y[0]\n dy = x[1] - y[1]\n ans = dx**2 + dy**2\n ans = ans**(0.5)\n return ans", "def dist(x, y):\n dx = x[0] - y[0]\n dy = x[1] - y[1]\n ans = dx**2 + dy**2\n ans = ans**(0.5)\n return ans", "def cohen_d(x_arr, y_arr):\n delta = np.mean(x_arr) - np.mean(y_arr)\n pooled_std = np.sqrt(\n (\n (len(x_arr) - 1) * np.std(x_arr, ddof=1) ** 2 +\n (len(y_arr) - 1) * np.std(y_arr, ddof=1) ** 2\n ) / (len(x_arr) + len(y_arr))\n )\n return delta / pooled_std", "def kolmogorov_smirnov_distance(self, other, show=False):\n from scipy.interpolate import interp1d\n cumdist1 = self.get_cumulative_distribution()\n cumdist2 = other.get_cumulative_distribution()\n \n # Normalize the x-values\n # range1 = np.max(cumdist1[\"x\"]) - np.min(cumdist1[\"x\"])\n # range2 = np.max(cumdist2[\"x\"]) - np.min(cumdist2[\"x\"])\n # cumdist1[\"x\"] -= np.min(cumdist1[\"x\"])\n # cumdist1[\"x\"] *= (range2/range1) \n # cumdist1[\"x\"] += np.min(cumdist2[\"x\"])\n\n interp_cumdist1 = interp1d(cumdist1[\"x\"], cumdist1[\"P\"], kind=\"linear\",\n fill_value=(0.0, 1.0), bounds_error=False)\n\n diff = cumdist2[\"P\"] - interp_cumdist1(cumdist2[\"x\"])\n\n ks_distance = np.max(np.abs(diff))\n\n if show:\n from matplotlib import pyplot as plt\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(cumdist1[\"x\"], cumdist1[\"P\"], drawstyle=\"steps\", label=\"First\")\n ax.plot(cumdist2[\"x\"], cumdist2[\"P\"], drawstyle=\"steps\", label=\"Second\")\n ax.set_xlabel(\"Normalized distances\")\n ax.set_ylabel(\"Cummulative Distribution\")\n ax.legend(loc=\"best\")\n plt.show()\n return ks_distance", "def k(xs, ys, sigma=1, l=1):\n\n # Pairwise difference matrix.\n dx = np.expand_dims(xs, 1) - np.expand_dims(ys, 0)\n return (sigma ** 2) * np.exp(-((dx / l) ** 2) / 2)", "def HJ_path(s1, s2):\n from sage.all import xgcd, ceil, floor\n assert s1 > s2, \"s1 must be larger than s2\"\n m1 = s1.numerator()\n d1 = s1.denominator()\n m2 = s2.numerator()\n d2 = s2.denominator()\n path = [(m1, d1)]\n while m1 / d1 > s2:\n _, x, y = xgcd(m1, d1)\n if d1 * m2 - m1 * d2 > 0:\n k = ceil((x * m2 + y * d2) / (d1 * m2 - m1 * d2))\n else:\n k = floor((x * m2 + y * d2) / (d1 * m2 - m1 * d2))\n m = - y - k * m1\n d = x - k * d1\n path.append((m, d))\n m1, d1 = m, d\n\n # these test are for debugging only, and should be unnecessary by now\n assert path[0][0] / path[0][1] == s1, \"first entry not correct: {}\".format(path)\n assert path[-1][0] / path[-1][1] == s2, \"last entry not correct: {}\".format(path)\n assert all([path[i][0] * path[i + 1][1] - path[i + 1][0] * path[i][1] == 1\n for i in range(len(path) - 1)]), \"determinant condition wrong: {}\".format(path)\n\n return path", "def jeffreys(self, x):\n return np.sqrt(1. / x)", "def J(theta, x, y):\n m = len(y)\n z = theta.dot(x.T) #argument for hypothesis function\n return 1. / m * np.sum(-y * np.log(g(z)) - (1. - y) * np.log(1 - g(z)))", "def jeffreys(self, x):\n return 1./np.sqrt(x*(1.-x))", "def lks_2samp (data1,data2):\r\n j1 = 0\r\n j2 = 0\r\n fn1 = 0.0\r\n fn2 = 0.0\r\n n1 = len(data1)\r\n n2 = len(data2)\r\n en1 = n1\r\n en2 = n2\r\n d = 0.0\r\n data1.sort()\r\n data2.sort()\r\n while j1 < n1 and j2 < n2:\r\n d1=data1[j1]\r\n d2=data2[j2]\r\n if d1 <= d2:\r\n fn1 = (j1)/float(en1)\r\n j1 = j1 + 1\r\n if d2 <= d1:\r\n fn2 = (j2)/float(en2)\r\n j2 = j2 + 1\r\n dt = (fn2-fn1)\r\n if math.fabs(dt) > math.fabs(d):\r\n d = dt\r\n try:\r\n en = math.sqrt(en1*en2/float(en1+en2))\r\n prob = ksprob((en+0.12+0.11/en)*abs(d))\r\n except:\r\n prob = 1.0\r\n return d, prob", "def LJ(epsilon,sigma,r):\n P1=(sigma/r)**12\n P2=(sigma/r)**6\n return 4*epsilon*(P1-P2)", "def kl_divergence(a, b, normalize=True):\n a, b = np.array(a), np.array(b)\n\n x = np.linspace(\n min(a.min(), b.min()) - 1,\n max(a.max(), b.max()) + 1,\n 100\n )\n\n p = gaussian_kde(a)(x)\n q = gaussian_kde(b)(x)\n\n if normalize:\n p = p/np.sum(p)\n q = q/np.sum(q)\n\n return np.sum(np.where(p != 0, (p) * np.log(p / q), 0))", "def JointDistn(self):\r\n if len(self.factors)==1:\r\n return self.factors[0]\r\n F = self.factors[0]\r\n for i in range(1,len(self.factors)):\r\n F = F * self.factors[i]\r\n self.JDist = F\r\n return F", "def cohens_d(x, y):\n nx, ny = len(x), len(y)\n pooled_variance = ((nx - 1) * np.std(x, ddof=1) ** 2 +\n (ny - 1) * np.std(y, ddof=1) ** 2) / \\\n ((nx - 1) + (ny - 1))\n return (np.mean(x) - np.mean(y)) / np.sqrt(pooled_variance)", "def MyKLD(X,Y): \n mu1,mu2 = tuple(np.mean(X,axis=0))\n sigma1,sigma2 = tuple(np.std(X,axis=0))\n m1,m2 = tuple(np.mean(X,axis=0))\n s1,s2 = tuple(np.std(X,axis=0))\n rho = np.corrcoef(X,rowvar=False)[0,1]\n r = np.corrcoef(Y,rowvar=False)[0,1]\n \n return (\n ((mu1-m1)**2/s1**2 - 2*r*(mu1-m1)*(mu2-m2)/(s1*s2) + (mu2-m2)**2/s2**2) /\n (2 * (1 - r**2)) +\n ((sigma1**2-s1**2)/s1**2 - 2*r*(rho*sigma1*sigma2-r*s1*s2)/(s1*s2) + \n (sigma2**2-s2**2)/s2**2) /\n (2 * (1 - r**2)) +\n np.log((s1**2 * s2**2 * (1-r**2)) / (sigma1**2 * sigma2**2 * (1-rho**2))) / 2\n )", "def compute_distance(self, xi: NumpyFloatArray, xj: NumpyFloatArray) -> float:\n\n return pdist([xi, xj], \"braycurtis\")[0]", "def rmsd(x, y):\n\n # get the length of the dataset\n n, = x.shape\n\n return np.sqrt(np.sum((x-y)**2)/n)", "def compute_jerk(joint_trajectory):\r\n joint_vel = np.gradient(joint_trajectory, axis=1)\r\n joint_acc = np.gradient(joint_vel, axis=1)\r\n joint_jerk = np.gradient(joint_acc, axis=1)\r\n jerk = np.linalg.norm(joint_jerk)\r\n return jerk", "def jackknifed_sdf_variance(yk, eigvals, sides='onesided', adaptive=True):\r\n K = yk.shape[0]\r\n\r\n from nitime.algorithms import mtm_cross_spectrum\r\n\r\n # the samples {S_k} are defined, with or without weights, as\r\n # S_k = | x_k |**2\r\n # | x_k |**2 = | y_k * d_k |**2 (with adaptive weights)\r\n # | x_k |**2 = | y_k * sqrt(eig_k) |**2 (without adaptive weights)\r\n\r\n all_orders = set(range(K))\r\n jk_sdf = []\r\n # get the leave-one-out estimates -- ideally, weights are recomputed\r\n # for each leave-one-out. This is now the case.\r\n for i in range(K):\r\n items = list(all_orders.difference([i]))\r\n spectra_i = np.take(yk, items, axis=0)\r\n eigs_i = np.take(eigvals, items)\r\n if adaptive:\r\n # compute the weights\r\n weights, _ = adaptive_weights(spectra_i, eigs_i, sides=sides)\r\n else:\r\n weights = eigs_i[:, None]\r\n # this is the leave-one-out estimate of the sdf\r\n jk_sdf.append(\r\n mtm_cross_spectrum(\r\n spectra_i, spectra_i, weights, sides=sides\r\n )\r\n )\r\n # log-transform the leave-one-out estimates and the mean of estimates\r\n jk_sdf = np.log(jk_sdf)\r\n # jk_avg should be the mean of the log(jk_sdf(i))\r\n jk_avg = jk_sdf.mean(axis=0)\r\n\r\n K = float(K)\r\n\r\n jk_var = (jk_sdf - jk_avg)\r\n np.power(jk_var, 2, jk_var)\r\n jk_var = jk_var.sum(axis=0)\r\n\r\n # Thompson's recommended factor, eq 18\r\n # Jackknifing Multitaper Spectrum Estimates\r\n # IEEE SIGNAL PROCESSING MAGAZINE [20] JULY 2007\r\n f = (K - 1) ** 2 / K / (K - 0.5)\r\n jk_var *= f\r\n return jk_var", "def _sbd(x, y):\r\n ncc = _ncc_c(x, y)\r\n idx = ncc.argmax()\r\n dist = 1 - ncc[idx]\r\n yshift = roll_zeropad(y, (idx + 1) - max(len(x), len(y)))\r\n\r\n return dist, yshift", "def jeffreys(self, mu, sigma):\n return 1./sigma**3.", "def get_johansen(self, y, p, r=0):\n\n N, l = y.shape\n jres = coint_johansen(y, 0, p)\n trstat = jres.lr1 # trace statistic\n tsignf = jres.cvt # critical values\n\n if not r:\n for i in range(l):\n if trstat[i] > tsignf[i, 1]: # 0: 90% 1:95% 2: 99%\n r = i + 1\n if np.sign(jres.evec[0, i]) == -1: # sign of first elem\n jres.evec[:, i] = -jres.evec[:, i]\n\n jres.r = r\n jres.evecr = jres.evec[:, :r]\n\n return jres", "def _build_dist(self):\n lamb = self.params['lamb']\n p = self.params['p']\n\n jac = self.jacobian\n # build D on grids\n xg, yg, mask = self._mask_grid()\n r_max = self._r_max(xg, yg, mask)\n d_mat = self._psf_grid(xg, yg, r_max=r_max)\n # E[yy^T]\n j_j_w = np.dot(jac, jac.transpose())\n r_mat = np.diag(np.diag(j_j_w) ** p)\n jac_inv = la.inv(j_j_w + lamb*r_mat)\n # RM = E[xx^T] / E[yy^T]\n h_mat = np.dot(np.dot(d_mat, jac.transpose()), jac_inv)\n return h_mat", "def smoothnessChi2(self, other, params, useSelf = True, useOther = True):\n\t\tif not self.sectors == other.sectors:\n\t\t\traise ValueError(\"Sectors do not match\")\n\t\tDself = np.zeros((2*self.totalBins))\n\t\tDother = np.zeros((2*other.totalBins))\n\t\tparamsSelf = params[:2*self.nZero]\n\t\tparamsOther = params[2*self.nZero:]\n#\t\tprint self.nZero, other.nZero, params,\"LLLLLLLLLOOPPdssa\"\n\n\t\tampsSelf = self.getCorrectedAmplitudes(paramsSelf)\n\t\tampsOther = other.getCorrectedAmplitudes(paramsOther)\n\t\tfor s in range(self.nSect):\n\t\t\tstartSelf = self.borders[s]\n\t\t\tstartOther = other.borders[s]\n\t\t\tnBins = min(self.borders[s+1] - self.borders[s], other.borders[s+1] - other.borders[s])\n\t\t\tfor i in range(nBins):\n\t\t\t\tDself[2*(startSelf + i) ] = ampsSelf[2*(i+startSelf) ] - ampsOther[2*(i+startOther) ]\n\t\t\t\tDself[2*(startSelf + i)+1] = ampsSelf[2*(i+startSelf)+1] - ampsOther[2*(i+startOther)+1]\n\n\t\t\t\tDother[2*(startOther + i) ] = ampsSelf[2*(i+startSelf) ] - ampsOther[2*(i+startOther) ]\n\t\t\t\tDother[2*(startOther + i)+1] = ampsSelf[2*(i+startSelf)+1] - ampsOther[2*(i+startOther)+1]\n\t\tCself = np.dot(Dself, np.dot(self.comaInv, Dself))\n\t\tCother = np.dot(Dother, np.dot(other.comaInv, Dother))\n\t\tretVal = 0.\n\t\tif useSelf:\n\t\t\tretVal += Cself\n\t\tif useOther:\n\t\t\tretVal += Cother\n\t\treturn retVal", "def wealth_cons_ratio(ssyd, \n tol=1e-7, \n init_val=1, \n max_iter=1_000_000,\n verbose=False):\n\n # Unpack and set up parameters EpsteinZin parameters\n ψ, γ, β = ssyd.ssy.ψ, ssyd.ssy.γ, ssyd.ssy.β\n θ = (1 - γ) / (1 - 1/ψ)\n ζ = 1 - β\n\n K_matrix = compute_K(ssyd)\n M = ssyd.K * ssyd.I * ssyd.J\n w = np.ones(M) * init_val\n iter = 0\n error = tol + 1\n\n r = compute_spec_rad(K_matrix)\n if verbose:\n print(f\"Test value = {r**(1/θ)} and θ = {θ}\")\n print(\"Beginning iteration\\n\\n\")\n\n\n while error > tol and iter < max_iter:\n Tw = ζ + β * (K_matrix @ (w**θ))**(1/θ)\n error = np.max(np.abs(w - Tw))\n w = Tw\n iter += 1\n\n if verbose:\n print(f\"Iteration converged after {iter} iterations\") \n\n return w / ζ", "def kde_agent(beta, bid, i, m, bw = None, *args):\n if bw is None: bw = 'scott'\n\n i0 = D.index[0]\n if only_cond == {'d'}:\n s1 = D.ix[i0:i, 's1_rp']\n if len(s1) == 1:\n s1[i0+1] = sp.mean([s1, 10])\n else:\n s1est = args[0]\n ar = D.ix[i, 'out_bool']\n b1 = D.ix[i, 'bid']\n b1i = round2(b1)\n if len(s1est) == 1:\n s1est.append(sp.mean([s1est[0], 10])) \n else:\n gks = stats.gaussian_kde(s1est, bw_method = bw)\n if ar:\n aestint = gks.pdf(sp.arange(0, b1i, 0.1))\n aest = sp.dot(aestint, sp.arange(0, b1i, 0.1)) / b1i \n else:\n aestint = gks.pdf(sp.arange(b1i, 10, 0.1))\n aest = sp.dot(aestint, sp.arange(b1i, 10, 0.1)) / (10-b1i) \n s1est.append(aest) \n s1 = s1est\n gks = stats.gaussian_kde(s1, bw_method = bw)\n Ks1 = gks.pdf(B) \n Ks1 /= sum(Ks1)\n\n if m == 'SC':\n if only_cond == {'d'}:\n s2 = D.ix[i0:i, 's2_rp'].dropna()\n if len(s2) == 1:\n s2[i+1] = sp.mean([s2[i], 10]) \n else:\n s2est = args[1]\n if len(s2est) == 1:\n s2est.append(sp.mean([s2est[0], 10])) \n else:\n gks = stats.gaussian_kde(s2est, bw_method = bw)\n if ar:\n aestint = gks.pdf(sp.arange(0, b1i, 0.1))\n aest = sp.dot(aestint, sp.arange(0, b1i, 0.1)) / b1i \n else:\n aestint = gks.pdf(sp.arange(b1i, 10, 0.1))\n aest = sp.dot(aestint, sp.arange(b1i, 10, 0.1)) / (10-b1i) \n s2est.append(aest) \n s2 = s2est\n gks = stats.gaussian_kde(s2, bw_method = bw)\n Ks2 = gks.pdf(B) \n Ks2 /= sum(Ks2)\n U = kdeag_uf(Ks1, Ks2, None)\n s2orb2 = s2\n elif m == 'NC':\n U = kdeag_uf(Ks1, None, None)\n s2orb2 = None\n elif m == 'BC':\n if only_cond == {'d'}:\n b2 = D.ix[i0:i, 'b2_bid'].dropna()\n if len(b2) == 1:\n b2[i+1] = sp.mean([b2[i], 10]) \n else:\n b2est = args[1]\n if len(b2est) == 1:\n b2est.append(sp.mean([b2est[0], 10])) \n else:\n gks = stats.gaussian_kde(b2est, bw_method = bw)\n if ar:\n aestint = gks.pdf(sp.arange(0, b1i, 0.1))\n aest = sp.dot(aestint, sp.arange(0, b1i, 0.1)) / b1i \n else:\n aestint = gks.pdf(sp.arange(b1i, 10, 0.1))\n aest = sp.dot(aestint, sp.arange(b1i, 10, 0.1)) / (10-b1i) \n b2est.append(aest) \n b2 = b2est\n gks = stats.gaussian_kde(b2, bw_method = bw)\n Kb2 = gks.pdf(B) \n Kb2 /= sum(Kb2)\n U = kdeag_uf(Ks1, None, Kb2)\n s2orb2 = b2\n \n opb = B[sp.argmax(U)]\n p = boltzmann_dist(beta, U, int(bid*((Bbins-1)/10)))#p = P[int(bid*((Bbins-1)/10))]\n return p, opb, s1, s2orb2, U", "def emd(pdd, pdd_, metric='chebyshev', **kwargs):\n \n from .core.Wasserstein import wasserstein\n \n dm = cdist(pdd[:, 1:], pdd_[:, 1:], metric=metric, **kwargs)\n \n return wasserstein(pdd[:, 0], pdd_[:, 0], dm)", "def ddf(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:\n out = torch.pow(y1, 2)\n mul = y1 * y2\n mul.exp_()\n\n div = mul.reciprocal()\n div.add_(mul).add_(2)\n out.div_(div)\n return out", "def hellinger(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = ( 1 / np.sqrt( 2 ) ) * np.sqrt( np.sum( ( np.sqrt( x.flat_cpt() ) - np.sqrt( y.flat_cpt() ) )**2) )\n\treturn distance", "def Kendalls_Tau2(xlist, ylist):\n\tif len(xlist) != len(ylist):\n\t\traise StatsError(\"Data sets have different lengths.\")\n\txdata = xlist\n\tydata = ylist\n\t#for i in range(len(xlist)):\n\t#\tif xlist[i] != None and ylist[i] != None:\n\t#\t\txdata.append(xlist[i])\n\t#\t\tydata.append(ylist[i])\n\tassert len(xdata) == len(ydata)\n\t#assert len(xdata) <= len(xlist) - xlist.count(None)\n\t#assert len(ydata) <= len(ylist) - ylist.count(None)\n\t#assert len(ydata) >= len(ylist) - xlist.count(None) - ylist.count(None)\n\tif len(xdata) == 0:\n\t\traise StatsError(\"No valid data entries.\")\n\tn = len(xdata)\n\t# compute the number of concordant and discordant pairs\n\tconc = disc = 0.0 # concordant and discordant pairs\n\tnx = ny = 0.0\n\tupdown = 0\n\tfor i in range(n): # loop over all pairs\n\t\txi = xdata[i]\n\t\tyi = ydata[i]\n\t\tif xi and yi:\n\t\t\tfor j in range(i + 1, n):\n\t\t\t\tif xdata[j] and ydata[j]:\n\t\t\t\t\txd = xi - xdata[j]\n\t\t\t\t\tyd = yi - ydata[j]\n\t\t\t\t\tprod = xd * yd\n\t\t\t\t\tif prod != 0:\n\t\t\t\t\t\tnx += 1\n\t\t\t\t\t\tny += 1\n\t\t\t\t\t\tif prod > 0:\n\t\t\t\t\t\t\tupdown += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tupdown -= 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tif xd != 0:\n\t\t\t\t\t\t\tnx += 1\n\t\t\t\t\t\tif yd != 0:\n\t\t\t\t\t\t\tny += 1\n\t# Compute tau\n\tn = float(n)\n\tdenom = math.sqrt(nx*ny)\n\ttry:\n\t\ttau = float(updown) / denom\n\texcept ZeroDivisionError:\n\t\traise StatsError(\"Too few entries: {:d}\".format(n))\n\t# Compute P-value\n\tz = 3.0 * tau * math.sqrt(n * (n - 1.0)) / math.sqrt(2.0 * (2.0 * n + 5.0))\n\tprob = Prob_Z(z)\n\treturn (tau, prob, int(n))", "def _dK_computations(self, dL_dK):\r\n \r\n self._dL_dl = (dL_dK*self.variance*self._K_dvar*(self.input_dim/2.*(self._lengthscales_two.T**4 - self._lengthscales**4) + 2*self._lengthscales2*self._K_dist2)/(self._w2*self._w2*self._lengthscales)).sum(1)\r\n if self._lengthscales_two is self._lengthscales:\r\n self._dL_dl_two = None\r\n else:\r\n self._dL_dl_two = (dL_dK*self.variance*self._K_dvar*(self.input_dim/2.*(self._lengthscales**4 - self._lengthscales_two.T**4 ) + 2*self._lengthscales_two2.T*self._K_dist2)/(self._w2*self._w2*self._lengthscales_two.T)).sum(0)", "def compute_sgd_gradient(self, x_j, t_j):\n a = np.dot(x_j.T, self.w)\n return -1 * t_j * (1 / (1 + np.exp(a * t_j))) * x_j", "def alt_cohen_d(x_arr, y_arr):\n delta = np.mean(x_arr) - np.mean(y_arr)\n pooled_std = np.sqrt((np.std(x_arr, ddof=1) ** 2 +\n np.std(y_arr, ddof=1) ** 2) / 2.0)\n return delta / pooled_std", "def hellinger_distance(x, y):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n assert (np.all(x.sum(1) != 0.) and np.all(y.sum(1) != 0.))\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n x = np.sqrt(x)\n y = np.sqrt(y)\n # x (120, 40), y (100, 40), H(x,y) (120, 100)\n xx = np.tile(x, (y.shape[0], 1, 1)).transpose((1, 0, 2))\n yy = np.tile(y, (x.shape[0], 1, 1))\n xx_yy = xx - yy\n res = np.sqrt(np.sum(xx_yy ** 2, axis=-1))\n return np.float64((1. / np.sqrt(2)) * res)", "def metric(x, y):\n d = 2\n summ = []\n i = 0\n while i < len(x):\n # in this case use euclidean distance\n summ.append((x[i] - y[i])**d)\n i = i + 1\n return sum(summ) ** (1 / float(d))", "def test_jn():\n import time\n t1 = time.time()\n\n n_list = [ 3, 4, 1, 0, 9, 7 ]\n x_list = [ 0, 1.01, 0.2, 3.3, 5.9, 77. ]\n vals1 = [ galsim.bessel.jn(n,x) for n,x in zip(n_list,x_list) ]\n print 'x = ',x_list\n print 'vals1 = ',vals1\n\n try:\n import scipy.special\n vals2 = [ scipy.special.jn(n,x) for n,x in zip(n_list,x_list) ]\n print 'vals2 = ',vals2\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.jn disagrees with scipy.special.jn\")\n except ImportError:\n print 'Unable to import scipy. Skipping scipy tests of jn.'\n\n # These values are what scipy returns. Check against these, so not require scipy.\n vals2 = [ 0.0,\n 0.0025745895535573995,\n 0.099500832639236036,\n -0.34429626039888467,\n 0.018796532416195257,\n -0.082526868218916541\n ]\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.jn disagrees with reference values\")\n\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)", "def jacaard(clusters_a, clusters_b):\n return jaccard_similarity_score(clusters_a, clusters_b)", "def sdep(y, yHat):\n n = y.shape[0]\n\n numer = ((y - yHat) ** 2).sum()\n\n sdep = (numer / n) ** 0.5\n\n return sdep", "def _graph_fn_kl_divergence(distribution_a, distribution_b):\n if get_backend() == \"tf\":\n return tf.no_op()\n # TODO: never tested. tf throws error: NotImplementedError: No KL(distribution_a || distribution_b) registered for distribution_a type Bernoulli and distribution_b type ndarray\n #return tf.distributions.kl_divergence(\n # distribution_a=distribution_a,\n # distribution_b=distribution_b,\n # allow_nan_stats=True,\n # name=None\n #)", "def JS_divergence_tdp(peq1, D1, p01, peq2, D2, p02, weights, lQ1, Qx1, lQ2, Qx2, terminal_time=0.5, number_of_samples=5):\n time = np.linspace(0, terminal_time, number_of_samples)\n out = JS_divergence(p01, p02, weights) / 2\n rho0d1 = Qx1.T.dot(np.diag(weights)).dot(p01 / np.sqrt(peq1))\n rho0d2 = Qx2.T.dot(np.diag(weights)).dot(p02 / np.sqrt(peq2))\n\n for i in range(1, len(time)):\n p1 = (Qx1.dot(rho0d1.dot(\n np.diag(np.exp(-lQ1 * (time[i] - time[0])))))) * np.sqrt(peq1)\n p2 = (Qx2.dot(rho0d2.dot(\n np.diag(np.exp(-lQ2 * (time[i] - time[0])))))) * np.sqrt(peq2)\n out += JS_divergence(p1, p2, weights, 'unnormalized')\n # print(out)\n return out * (time[1] - time[0])", "def kl_bern(x, y):\n x = min(max(x, eps), 1-eps)\n y = min(max(y, eps), 1-eps)\n return x*log(x/y) + (1-x)*log((1-x)/(1-y))", "def vf_wasserstein_distance(x, y, critic):\n return torch.mean(critic(x)) - torch.mean(critic(y))", "def test_renyi_values():\n d1 = Distribution(['0', '1'], [0, 1])\n d2 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d3 = Distribution(['0', '1'], [1, 0])\n\n assert renyi_divergence(d1, d2, 1 / 2) == pytest.approx(np.log2(2))\n assert renyi_divergence(d2, d3, 1 / 2) == pytest.approx(np.log2(2))\n assert renyi_divergence(d1, d3, 1 / 2) == pytest.approx(np.inf)", "def jaccard_distance(y_true, y_pred, smooth=100):\n intersection = K.sum(K.abs(y_true * y_pred), axis=-1)\n sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)\n jac = (intersection + smooth) / (sum_ - intersection + smooth)\n return K.abs(1 - jac) * smooth", "def calc_linear_dispersion(self):\n self._check_k_columns([\"K0L\", \"K0SL\", \"K1SL\"])\n tw = self.twiss_df\n phs_adv = self.get_phase_adv()\n res = self._results_df\n coeff_fun = self._linear_dispersion_coeff\n sum_fun = self._linear_dispersion_sum\n\n # Calculate\n LOG.debug(\"Calculate Linear Dispersion\")\n with timeit(lambda t: LOG.debug(\" Time needed: {:f}\".format(t))):\n # sources\n k0_mask = tw['K0L'] != 0\n k0s_mask = tw['K0SL'] != 0\n k1s_mask = tw['K1SL'] != 0\n\n mx_mask = k0_mask | k1s_mask # magnets contributing to Dx,j (-> Dy,m)\n my_mask = k0s_mask | k1s_mask # magnets contributing to Dy,j (-> Dx,m)\n\n if not any(mx_mask | my_mask):\n LOG.warning(\" No linear dispersion contributions found. Values will be zero.\")\n res['DX'] = 0.\n res['DY'] = 0.\n self._log_added('DX', 'DY')\n return\n\n # create temporary DataFrame for magnets with coefficients already in place\n df = tfs.TfsDataFrame(index=tw.index).join(\n coeff_fun(tw.loc[:, 'BETX'], tw.Q1)).join(\n coeff_fun(tw.loc[:, 'BETY'], tw.Q2))\n df.columns = ['COEFFX', 'COEFFY']\n\n LOG.debug(\" Calculate uncoupled linear dispersion\")\n df.loc[my_mask, 'DX'] = df.loc[my_mask, 'COEFFX'] * \\\n sum_fun(tw.loc[mx_mask, 'K0L'],\n 0,\n 0,\n tw.loc[mx_mask, 'BETX'],\n tau(phs_adv['X'].loc[mx_mask, my_mask], tw.Q1)\n ).transpose()\n df.loc[mx_mask, 'DY'] = df.loc[mx_mask, 'COEFFY'] * \\\n sum_fun(-tw.loc[my_mask, 'K0SL'], # MINUS!\n 0,\n 0,\n tw.loc[my_mask, 'BETY'],\n tau(phs_adv['Y'].loc[my_mask, mx_mask], tw.Q2)\n ).transpose()\n\n LOG.debug(\" Calculate full linear dispersion values\")\n res.loc[:, 'DX'] = df.loc[:, 'COEFFX'] * \\\n sum_fun(tw.loc[mx_mask, 'K0L'],\n tw.loc[mx_mask, 'K1SL'],\n df.loc[mx_mask, 'DY'],\n tw.loc[mx_mask, 'BETX'],\n tau(phs_adv['X'].loc[mx_mask, :], tw.Q1)\n ).transpose()\n res.loc[:, 'DY'] = df.loc[:, 'COEFFY'] * \\\n sum_fun(-tw.loc[my_mask, 'K0SL'], # MINUS!\n tw.loc[my_mask, 'K1SL'],\n df.loc[my_mask, 'DX'],\n tw.loc[my_mask, 'BETY'],\n tau(phs_adv['Y'].loc[my_mask, :], tw.Q2)\n ).transpose()\n\n LOG.debug(\" Average linear dispersion Dx: {:g}\".format(\n np.mean(res['DX'])))\n LOG.debug(\" Average linear dispersion Dy: {:g}\".format(\n np.mean(res['DY'])))\n self._log_added('DX', 'DY')", "def grad_j(self,w,j):\n g = 0\n for i in range(len(self.x)):\n # Each example contributes -sigma(-y_i * x_i.w) * y_j x_ij\n g -= sigmoid(-self.y[i] * np.dot(w, self.x[i,:])) * self.y[i] * self.x[i,j]\n #regularisation\n g += self.alpha * w[j]\n return g", "def test_divergences_to_kl2(dists, divergence):\n for dist1, dist2 in combinations(dists, 2):\n assert divergence(dist1, dist2, alpha=1) == pytest.approx(kullback_leibler_divergence(dist1, dist2))", "def js_sort(d1, d2, beta):\n n = d1.shape[0]\n n_selected = int(n // (1.0 + beta))\n d1_selected = torch.topk(d1, n_selected, largest=False, sorted=False)[0]\n return js_div(d1_selected, d2)", "def compute_jacs(x_sp,params_sens_dict,integration_params,**kwargs):\n\n # check if sensitivity to all params\n if kwargs['diffeq_params'] is None:\n diffeq_params = params_sens_dict\n params_sensitivity_sp = list(params_sens_dict.values())\n\n else:\n diffeq_params = kwargs['diffeq_params'].copy()\n params_sensitivity_sp = list(params_sens_dict.values())\n for key,value in params_sens_dict.items():\n diffeq_params[key] = value\n\n SDerivSymbolic = sp.Matrix(SDeriv(0,x_sp,integration_params,diffeq_params))\n\n # derivative of rhs wrt params\n SDerivSymbolicJacParams = SDerivSymbolic.jacobian(params_sensitivity_sp)\n SDerivSymbolicJacParamsLamb = sp.lambdify((x_sp,params_sensitivity_sp), SDerivSymbolicJacParams,'numpy')\n SDerivSymbolicJacParamsLambFun = lambda t,x,params: SDerivSymbolicJacParamsLamb(x,params)\n\n # derivative of rhs wrt Conc\n SDerivSymbolicJacConc = SDerivSymbolic.jacobian(x_sp)\n SDerivSymbolicJacConcLamb = sp.lambdify((x_sp,params_sensitivity_sp),SDerivSymbolicJacConc,'numpy')\n SDerivSymbolicJacConcLambFun = lambda t,x,params: SDerivSymbolicJacConcLamb(x,params)\n\n return [SDerivSymbolicJacParamsLambFun,SDerivSymbolicJacConcLambFun]", "def kde2D(x, y, bandwidth, xbins=100j, ybins=100j, **kwargs):\n\n # create grid of sample locations (default: 100x100)\n xx, yy = np.mgrid[x.min():x.max():xbins, \n y.min():y.max():ybins]\n\n xy_sample = np.vstack([yy.ravel(), xx.ravel()]).T\n xy_train = np.vstack([y, x]).T\n\n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n kde_skl.fit(xy_train)\n\n # score_samples() returns the log-likelihood of the samples\n z = np.exp(kde_skl.score_samples(xy_sample))\n return xx, yy, np.reshape(z, xx.shape)", "def dK_dtheta(self,dL_dK,X,X2,target):\r\n if X2 is None: X2 = X\r\n dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1))\r\n invdist = 1./np.where(dist!=0.,dist,np.inf)\r\n dist2M = np.square(X[:,None,:]-X2[None,:,:])/self.lengthscale**3\r\n dvar = (1+np.sqrt(5.)*dist+5./3*dist**2)*np.exp(-np.sqrt(5.)*dist)\r\n dl = (self.variance * 5./3 * dist * (1 + np.sqrt(5.)*dist ) * np.exp(-np.sqrt(5.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis]\r\n target[0] += np.sum(dvar*dL_dK)\r\n if self.ARD:\r\n dl = (self.variance * 5./3 * dist * (1 + np.sqrt(5.)*dist ) * np.exp(-np.sqrt(5.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis]\r\n #dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis]\r\n target[1:] += (dl*dL_dK[:,:,None]).sum(0).sum(0)\r\n else:\r\n dl = (self.variance * 5./3 * dist * (1 + np.sqrt(5.)*dist ) * np.exp(-np.sqrt(5.)*dist)) * dist2M.sum(-1)*invdist\r\n #dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist)) * dist2M.sum(-1)*invdist\r\n target[1] += np.sum(dl*dL_dK)", "def dphidalpha_j(x, alpha_j, beta_j, gamma_j):\n def f(xp):\n delta = xp - gamma_j\n return csrbf(math.sqrt(beta_j*delta*beta_j*delta))\n vf = num.vectorize(f)\n return vf(x)", "def return_js_matrix(distributions1, distributions2, verbose=0):\n assert distributions1.shape[1] == distributions2.shape[1], \\\n \"Distributions must have matching dimensions. Consider using merge_energies\"\n js_matrix = np.zeros((len(distributions1), len(distributions2)))\n for i in trange(len(distributions1), desc='dist1 loop', disable=verbose<=0):\n for j in trange(len(distributions2), desc='dist2 loop', disable=verbose<=1):\n masses1 = distributions1[i]\n masses2 = distributions2[j]\n js = entropy((masses1+masses2)/2) -\\\n entropy(masses1)/2 - entropy(masses2)/2\n js_matrix[i, j] = js\n return js_matrix", "def kl_divergence(means: Tensor, logvars: Tensor) ->Tensor:\n kl_cost = -0.5 * (logvars - means ** 2 - torch.exp(logvars) + 1.0)\n kl_cost = torch.mean(kl_cost, 0)\n return torch.sum(kl_cost)", "def J(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n df1du = 2*u*g3**2 - 2*g3*u0 + 2*g3*coeffs[3]*(g1*u1-u0) + 2*g3*coeffs[4]*(g2*u2-u0)\n df1dv = -2*v*g3**2 + 2*g3*v0 - 2*g3*coeffs[3]*(g1*v1-v0) - 2*g3*coeffs[4]*(g2*v2-v0)\n df1dg1 = 2*g1*coeffs[0]*(u1**2-v1**2) + 2*(v1*v0-u1*u0)*(coeffs[0]+coeffs[1]+coeffs[3]) + 2*g2*coeffs[1]*(u1*u2-v1*v2) + 2*g3*coeffs[3]*(u1*u-v1*v)\n df1dg2 = 2*g2*coeffs[2]*(u2**2-v2**2) + 2*(v2*v0-u2*u0)*(coeffs[1]+coeffs[2]+coeffs[4]) + 2*g1*coeffs[1]*(u1*u2-v1*v2) + 2*g3*coeffs[4]*(u2*u-v2*v)\n df1dg3 = 2*g3*(u**2-v**2) + 2*(v*v0-u*u0)*(coeffs[3]+coeffs[4]+1) + 2*g1*coeffs[3]*(u1*u-v1*v) + 2*g2*coeffs[4]*(u2*u-v2*v)\n\n df2du = 0\n df2dv = 2*v*g3**2 + 2*g3*(-v0 + coeffs[3]*(g1*v1-v0) + coeffs[4]*(g2*v2-v0))\n df2dg1 = 2*g1*coeffs[0]*(v1**2-1) + 2*(1-v1*v0)*(coeffs[0]+coeffs[1]+coeffs[3]) + 2*g2*coeffs[1]*(v1*v2-1) + 2*g3*coeffs[3]*(v1*v-1)\n df2dg2 = 2*g2*coeffs[2]*(v2**2-1) + 2*(1-v2*v0)*(coeffs[1]+coeffs[2]+coeffs[4]) + 2*g1*coeffs[1]*(v1*v2-1) + 2*g3*coeffs[4]*(v2*v-1)\n df2dg3 = 2*g3*(v**2-1) + 2*(1-v*v0)*(coeffs[3]+coeffs[4]+1) + 2*g1*coeffs[3]*(v1*v-1) + 2*g2*coeffs[4]*(v2*v-1)\n\n df3du = g3*coeffs[3]*(g1*v1-v0) + g3*coeffs[4]*(g2*v2-v0) + g3*(g3*v-v0)\n df3dv = g3*coeffs[3]*(g1*u1-u0) + g3*coeffs[4]*(g2*u2-u0) + g3*(g3*u-u0)\n df3dg1 = 2*g1*coeffs[0]*u1*v1 - (v1*u0+u1*v0)*(coeffs[0]+coeffs[1]+coeffs[3]) + g2*coeffs[1]*(u1*v2+v1*u2) + g3*coeffs[3]*(v1*u+u1*v)\n df3dg2 = 2*g2*coeffs[2]*u2*v2 - (v2*u0+u2*v0)*(coeffs[1]+coeffs[2]+coeffs[4]) + g1*coeffs[1]*(u1*v2+v1*u2) + g3*coeffs[4]*(v2*u+u2*v)\n df3dg3 = 2*g3*u*v - (u*v0+v*u0)*(coeffs[3]+coeffs[4]+1) + g1*coeffs[3]*(v1*u+u1*v) + g2*coeffs[4]*(v2*u+u2*v)\n\n df4du = g3*coeffs[3]*(g1-1) + g3*coeffs[4]*(g2-1) + g3*(g3-1)\n df4dv = 0\n df4dg1 = 2*g1*coeffs[0]*u1 - (u0+u1)*(coeffs[0]+coeffs[1]+coeffs[3]) + g2*coeffs[1]*(u1+u2) + g3*coeffs[3]*(u+u1)\n df4dg2 = 2*g2*coeffs[2]*u2 - (u0+u2)*(coeffs[1]+coeffs[2]+coeffs[4]) + g1*coeffs[1]*(u1+u2) + g3*coeffs[4]*(u+u2)\n df4dg3 = 2*g3*u - (u+u0)*(coeffs[3]+coeffs[4]+1) + g1*coeffs[3]*(u+u1) + g2*coeffs[4]*(u+u2)\n\n df5du = 0\n df5dv = g3*coeffs[3]*(g1-1) + g3*coeffs[4]*(g2-1) + g3*(g3-1)\n df5dg1 = 2*g1*coeffs[0]*v1 - (v1+v0)*(coeffs[0]+coeffs[1]+coeffs[3]) + g2*coeffs[1]*(v2+v1) + g3*coeffs[3]*(v1+v)\n df5dg2 = 2*g2*coeffs[2]*v2 - (v2+v0)*(coeffs[1]+coeffs[2]+coeffs[4]) + g1*coeffs[1]*(v2+v1) + g3*coeffs[4]*(v2+v)\n df5dg3 = 2*g3*v - (v0+v)*(coeffs[3]+coeffs[4]+1) + g1*coeffs[3]*(v1+v) + g2*coeffs[4]*(v2+v)\n\n return np.array([\n [df1du, df1dv, df1dg1, df1dg2, df1dg3],\n [df2du, df2dv, df2dg1, df2dg2, df2dg3],\n [df3du, df3dv, df3dg1, df3dg2, df3dg3],\n [df4du, df4dv, df4dg1, df4dg2, df4dg3],\n [df5du, df5dv, df5dg1, df5dg2, df5dg3],\n ])", "def LJ(r, epsilon, sigma, x, y):\n A=((x/y)**(x/(x-y))/((x/y)-1))\n\n\n V=A*epsilon*((sigma/r)**x-(sigma/r)**y) #-4*Epsilon*((Sigma/Rc)**12-(Sigma/Rc)**6)\n\n return V", "def needleman_wunsch(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.NeedlemanWunsch()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity measure\n return measure.get_raw_score(s1, s2)", "def find_knee(x,y):\n\n # find ranges\n if len(x) != len(y):\n raise Exception(\"bad data\")\n tot_len = len(x)\n \n \n \n # fit strait lines to both\n\n # find intercept\n knee_r = (f_top.beta[1] - f_bottom.beta[1])/(-f_top.beta[0] + f_bottom.beta[0])", "def KSStat(xs,ys,reweight=False,cdf_x=None,cdf_y=None,data_range=None):\n if cdf_x is None and cdf_y is None and data_range is None:\n data_range = list(set(xs)) + list(set(ys))\n if cdf_x is None:\n cdf_x = cum_density_func(xs,norm=True,rank=False,data_range=data_range)\n if cdf_y is None:\n cdf_y = cum_density_func(ys,norm=True,rank=False,data_range=data_range)\n keys = set(cdf_x.keys()+cdf_y.keys())\n SP = []\n for k in keys:\n if k in cdf_x and k in cdf_y:\n SP.append((cdf_x[k],cdf_y[k]))\n if reweight:\n return np.max([np.abs(s-p)/np.sqrt(p*(1.0-p)) for (s,p) in SP])\n else:\n return np.max([np.abs(s-p) for (s,p) in SP])" ]
[ "0.7116401", "0.6758938", "0.6689075", "0.654391", "0.6027384", "0.60244334", "0.6005269", "0.59004337", "0.58797586", "0.58616793", "0.58290076", "0.58030003", "0.57559144", "0.57503366", "0.569734", "0.5691851", "0.5684341", "0.5684016", "0.5652677", "0.5649112", "0.5614351", "0.56037545", "0.55800897", "0.55391544", "0.5523207", "0.5495863", "0.5484064", "0.54637396", "0.5449351", "0.5437303", "0.5434162", "0.5433914", "0.5402381", "0.53985476", "0.53901345", "0.5387793", "0.53687453", "0.53558046", "0.53152585", "0.53126055", "0.5297076", "0.52926975", "0.52903086", "0.5278519", "0.526033", "0.5255974", "0.5255236", "0.52487403", "0.52478635", "0.5245473", "0.52281076", "0.5206418", "0.518174", "0.51780736", "0.51732624", "0.5172519", "0.51678056", "0.5157482", "0.5141861", "0.514042", "0.5140277", "0.51364803", "0.513556", "0.5126144", "0.5119942", "0.5118737", "0.5110861", "0.5105823", "0.5104839", "0.510277", "0.50997126", "0.5089965", "0.5087287", "0.5077184", "0.50745445", "0.5068356", "0.5057886", "0.5057605", "0.505078", "0.50496805", "0.5045632", "0.5040716", "0.50399345", "0.50391704", "0.5038244", "0.5035356", "0.50346226", "0.5030205", "0.50217545", "0.502063", "0.50194097", "0.50165117", "0.50153285", "0.5005708", "0.5002518", "0.50012136", "0.50011337", "0.49997187", "0.49924824", "0.4989253" ]
0.6525315
4
Method for parsing CLI arguments using argparse.
def parse_parameters(): parser = argparse.ArgumentParser( description="Get all dependent review IDs") parser.add_argument("-r", "--review-id", type=str, required=True, help="Review ID") parser.add_argument("-o", "--out-file", type=str, required=False, help="The out file with the reviews IDs") return parser.parse_args()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_cli_arguments():\n parser = argparse.ArgumentParser('Generates a MANIFEST file used by the '\n 'HMP2 AnADAMA2 workflows.')\n parser.add_argument('-b', '--broad-data-sheet', required=True,\n help='Broad data product status spreadsheet. '\n 'Contains entries indicating new files to be '\n 'processed.')\n parser.add_argument('-o', '--output-manifest', required=True,\n help='Path to desired output manifest file.')\n parser.add_argument('-oi', '--origin-institute', required=True,\n help='Name of institute submitting new files '\n 'to be processed.')\n parser.add_argument('-oc', '--origin-contact', required=True,\n help='Contact person for corresponding origin '\n 'institute.')\n parser.add_argument('-oe', '--origin-contact-email', required=True,\n help='Contact email for contact person.')\n parser.add_argument('-p', '--project-name', dest='project', \n required=True,\n help='Project that sequence files belong too.')\n\n return parser.parse_args()", "def parse_cli():\n parser = OptionParser()\n return parser.parse_args()", "def parse_command_line(self, argv):\n from optparse import OptionParser\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n\n (options, args) = parser.parse_args(argv)", "def parse_cli_args() -> ArgumentParser:\n parser = ArgumentParser()\n\n group = parser.add_argument_group(\"Run parameters\")\n group.add_argument(\"--url\", type=str, default=\"DEFAULT\", help=\"URL to run the workflow on.\")\n group.add_argument(\n \"--output\",\n type=Path,\n help=\"Where to save the result locally. If save, remember to also add save flag for config.\",\n default=None,\n )\n group.add_argument(\n \"--windows\",\n type=str,\n nargs=\"*\",\n default=[wtl.Workflow.SINGLE_TAB],\n help=\"Tab names (comma-separated). Use space separation for multiple windows.\",\n )\n group.add_argument(\n \"--config\",\n type=str,\n nargs=\"*\",\n default=[],\n required=False,\n help=\"Names of config files in config/, such as \" '\"iphone_x_mobile\", or key=value pairs.',\n )\n\n cli_args = parser.parse_args()\n cli_args.config.insert(0, \"default\")\n\n if cli_args.url == \"DEFAULT\":\n cli_args.url = start_server()\n\n return cli_args", "def parse_cli():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"ENV\", help=\"Enviorment SCANNER, PC, REMOMTE\")\n args = parser.parse_args()\n\n return args", "def parse_cli_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(description='Virtual analog synthesizer')\n parser.add_argument(\n '-i', '--input_path', type=str, required=True,\n help='path to input TSV file with definition of a track to be played'\n )\n parser.add_argument(\n '-p', '--presets_path', type=str, required=True,\n help='path to YAML file with definitions of timbres to be used'\n )\n parser.add_argument(\n '-o', '--output_path', type=str, required=True,\n help='path to output file where result is going to be saved as WAV'\n )\n parser.add_argument(\n '-c', '--config_path', type=str, default=None,\n help='path to configuration file'\n )\n parser.add_argument(\n '-s', '--safe_mode', dest='safe', action='store_true',\n help='validate parsed timbres before core tasks'\n )\n parser.set_defaults(safe=False)\n\n cli_args = parser.parse_args()\n return cli_args", "def parse_cli_args():\r\n parser = argparse.ArgumentParser(\r\n description=\"list all installed packages\")\r\n\r\n parser.add_argument(\"-v\", \"--verbose\",\r\n help=\"increase output verbosity\",\r\n action=\"store_true\")\r\n\r\n parser.add_argument(\"-d\", \"--debug\",\r\n help=\"enable debug output\",\r\n action=\"store_true\")\r\n\r\n parser.add_argument(\"-N\", \"--dry-run\",\r\n help=\"Do not perform any actions, only simulate them.\",\r\n action=\"store_true\")\r\n\r\n args = parser.parse_args()\r\n\r\n # set debug log state\r\n DebugLog.enabled = args.debug\r\n\r\n with DebugLogScopedPush(\"cli arguments:\"):\r\n DebugLog.print(str(args))\r\n\r\n return args", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n help_str = \\\n 'The collection folder to sort files into. ' \\\n 'If the folder does not exist, it will be created along with the ' \\\n 'necessary contents.'\n parser.add_argument('-c', '--collection', help=help_str)\n\n help_str = \\\n 'The source folder to import files from. Has to exist and ' \\\n 'has to be a folder.'\n parser.add_argument('-s', '--source', help=help_str, required=False)\n\n help_str = \\\n 'View the gallery in random order auto skpping after the' \\\n 'given amount of seconds'\n parser.add_argument('-v', '--view', help=help_str, required=False)\n\n return parser.parse_args()", "def parse_command_line() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'pet_database',\n type=str,\n help='path to pet database'\n )\n parser.add_argument(\n '--image_dir',\n default='data/images'\n )\n parser.add_argument(\n '--log',\n default=None,\n help='log file path'\n )\n\n args = parser.parse_args()\n args.pet_database = os.path.abspath(os.path.expanduser(args.pet_database))\n args.image_dir = os.path.abspath(os.path.expanduser(args.image_dir))\n args.log = os.path.abspath(os.path.expanduser(args.log)) if args.log else None\n return args", "def __parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--force', action=\"store_true\", default=False,\n help='overwrite existing database files during import')\n parser.add_argument('-e', '--extension', action=\"store\", default='txt',\n help='specify file extension. default is \"txt\"')\n parser.add_argument('-d', '--delimiter', action=\"store\", default='\\t',\n help='specify column delimiter. default is tab (\\\\t)')\n parser.add_argument('-m', '--mark', action=\"store\", default='.',\n help='specify decimal mark for numeric data. default is'\n ' dot (.)')\n parser.add_argument('-o', '--outformat', action=\"store\", default='npz',\n help='specify output database format. default is \"npz\"'\n ' for numpy database. use \"mat\" for matlab '\n ' database format.')\n parser.add_argument('-r', '--recursive', action=\"store_true\", default=False,\n help='recursively walk through all sub-directories of'\n ' current working directory')\n parser.add_argument('-p', '--pcs', action=\"store_true\", default=True,\n help='indicate if files are pcs files.')\n parser.add_argument('-c', '--colheadlines', action=\"store\", default='1',\n help='number of lines spanned by the column headers')\n args = parser.parse_args()\n return args", "def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description='Google reminders cli',\n epilog=usage,\n formatter_class=argparse.RawTextHelpFormatter)\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input_path\", required=True)\n parser.add_argument(\"-c\", \"--config\", required=True)\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # Optional Argument\n parser.add_argument('-l', '--length', metavar='length', type=float, default=2, help='length (meter)')\n parser.add_argument('-k', '--conductivity', metavar='conductivity', type=float, default=0.5, help='constant thermal conductivity (W/m.K)')\n parser.add_argument('-q', '--heatgeneration', metavar='heatgeneration', type=float, default=1000, help='uniform heat generation (kW/m^3)')\n parser.add_argument('-TA', '--tempA', metavar='tempA', type=int, default=100, help='temperature at A (Celcius)')\n parser.add_argument('-TB', '--tempB', metavar='tempB', type=int, default=200, help='temperature at A (Celcius)')\n parser.add_argument('-n', '--nodes', metavar='nodes', type=int, default=5, help='nodes (positive integer)')\n parser.add_argument('-A', '--area', metavar='area', type=float, default=1, help='area (m^2)')\n parser.add_argument('-nf', '--nofigure', action='store_true', help='disable figure')\n parser.add_argument('-nd', '--nodetail', action='store_true', help='disable detail')\n return parser.parse_args()", "def _parse_cmd_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--root\", required=True,\n help=\"Root directory of data and logs.\")\n parser.add_argument(\"--log_dir\", default=\"logs\",\n help=\"Sub-directory of tensorboard logs.\")\n parser.add_argument(\"--gpu\", default=\"0\", help=\"GPU device ID.\")\n\n args = parser.parse_args()\n\n return args", "def parse_cli_args(self):\n parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode')\n parser.add_argument('--list', action='store_true', default=True,\n help='List nodes (default: True)')\n parser.add_argument('--host', action='store',\n help='Get all the variables about a specific node')\n parser.add_argument('--refresh-cache', action='store_true', default=False,\n help='Force refresh of cache by making API requests to Linode (default: False - use cache files)')\n self.args = parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\",\n \"--debug\",\n help=\"Print lots of debugging statements\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.DEBUG,\n default=logging.ERROR,\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Be verbose\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.INFO,\n )\n parser.add_argument(\"runscript\", default=None)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n # If user doesn't specify an input file, read from standard input. Since\n # encodings are the worst thing, we're explicitly expecting std\n parser.add_argument('-i', '--infile',\n type=lambda x: open(x, encoding=ENCODE_IN),\n default=io.TextIOWrapper(\n sys.stdin.buffer, encoding=ENCODE_IN)\n )\n # Same thing goes with the output file.\n parser.add_argument('-o', '--outfile',\n type=lambda x: open(x, 'w', encoding=ENCODE_OUT),\n default=io.TextIOWrapper(\n sys.stdout.buffer, encoding=ENCODE_OUT)\n )\n # Set the verbosity level for the logger. The `-v` option will set it to\n # the debug level, while the `-q` will set it to the warning level.\n # Otherwise use the info level.\n verbosity = parser.add_mutually_exclusive_group()\n verbosity.add_argument('-v', '--verbose', action='store_const',\n const=logging.DEBUG, default=logging.INFO)\n verbosity.add_argument('-q', '--quiet', dest='verbose',\n action='store_const', const=logging.WARNING)\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser(description='CLI to store Actisense-NGT Gateway values to InfluxDB and publish via MQTT')\n parser.add_argument('--config', '-c', type=str, required=True, help='JSON configuraton file with path')\n return parser.parse_args()", "def _ParseCommandArguments():\n arg_parser = argparse.ArgumentParser()\n arg_parser.usage = __doc__\n\n arg_parser.add_argument('--download-dir',\n type=str,\n required=True,\n help='Directory into which corpora are downloaded.')\n arg_parser.add_argument('--build-dir',\n required=True,\n type=str,\n help='Directory where fuzzers were built.')\n args = arg_parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser(description=_program_description)\n parser.add_argument('input_file', help=_input_file_description)\n #parser.add_argument('-v', '--verbose', action='store_true', \n # default=False, help='show progress')\n args = parser.parse_args()\n return args", "def parse_cmd_arguments():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', required=True, help='input JSON file')\n parser.add_argument('-o', '--output', required=True,\n help='ouput JSON file')\n parser.add_argument('-d', '--debug', required=False,\n help='log level. Can be 0-3. Defaults to 0')\n\n return parser.parse_args()", "def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument()\n return p.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description='Run DAFI.')\n parser.add_argument('input_file', help='Name (path) of input file')\n return parser.parse_args()", "def parse_args(self, argv=None):\n self.opts, self.args = self.cli_parser.parse_args(argv)\n self.check_arguments()\n self._post_process_opts_and_args()\n return self.opts, self.args", "def _cli_parser():\n parser = argparse.ArgumentParser()\n # Original flags\n parser.add_argument('-s', type=str, metavar='input_vol',\n help=\"Absolute path to input volume. Input should be \"\n \"in nifti format\")\n parser.add_argument('-o', type=str, metavar='output_dir',\n help=\"Absolute path to output directory\")\n parser.add_argument('-p', type=str, metavar='template_type', default='MNI152_orig',\n help=\"Type of volumetric template used in index files. \"\n \"Use MNI152_orig or Colin27_orig when -r is \"\n \"RF_ANTs. Use MNI152_norm or Colin27_norm when \"\n \"-r is RF_M3Z. Otherwise, an exception is raised. \"\n \"Ensure that the template matches the standard \"\n \"space of -i (i.e., use MNI152_* if -i is \"\n \"in MNI152-space). Default: MNI152_orig\")\n parser.add_argument('-r', type=str, metavar='RF_type', default='RF_ANTs',\n help=\"Type of Registration Fusion approaches used to \"\n \"generate the mappings (RF_M3Z or RF_ANTs). \" \n \"RF_M3Z is recommended if data was registered \" \n \"from subject's space to the volumetric atlas \" \n \"space using FreeSurfer. RF_ANTs is recommended \" \n \"if such registrations were carried out using \" \n \"other tools, especially ANTs. Default: RF_ANTs\")\n parser.add_argument('-i', type=str, metavar='interp', default='linear',\n help=\"Interpolation (linear or nearest). If \"\n \"-g is label.gii, then interpolation is always set \"\n \"to nearest and a warning is raised. Default: \"\n \"linear\")\n # New flags\n parser.add_argument('-t', type=str, metavar='out_type', default='nii.gz',\n help=\"File type of surface files. nii.gz is true to \"\n \"the original Wu et al (2018) implementation. \"\n \"Note that gifti formats, either \"\n \"func.gii or label.gii, are often preferred. \"\n \"Default: nii.gz\")\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n fromfile_prefix_chars='@',\n description='Convert CVAT XML annotations to masks'\n )\n\n parser.add_argument(\n '--cvat-xml', metavar='FILE', required=True,\n help='input file with CVAT annotation in xml format'\n )\n\n parser.add_argument(\n '--background-color', metavar='COLOR_BGR', default=\"0,0,0\",\n help='specify background color (by default: 0,0,0)'\n )\n\n parser.add_argument(\n '--label-color', metavar='LABEL:COLOR_BGR', action='append',\n default=[],\n help=\"specify a label's color (e.g. 255 or 255,0,0). The color will \" +\n \"be interpreted in accordance with the mask format.\"\n )\n\n parser.add_argument(\n '--mask-bitness', type=int, choices=[8, 24], default=8,\n help='choose bitness for masks'\n )\n\n parser.add_argument(\n '--output-dir', metavar='DIRECTORY', required=True,\n help='directory for output masks'\n )\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n return parser.parse_args()", "def parse_cli_args():\n\n parser = argparse.ArgumentParser(\n description='Create AMICI model and data for steadystate example.')\n\n parser.add_argument('-s', '--sbml', dest='sbml_file_name',\n required=True,\n help='SBML model filename (PEtab format)')\n\n parser.add_argument('-d', '--model-dir', dest='model_dir',\n help='Model directory containing the python module')\n\n parser.add_argument('-n', '--model-name', dest='model_name',\n required=True,\n help='Name of the AMICI model module')\n\n parser.add_argument('-m', '--measurements', dest='measurement_file_name',\n required=True,\n help='Name of measurement table (PEtab format)')\n\n parser.add_argument('-c', '--conditions', dest='condition_file_name',\n required=True,\n help='Condition table (PEtab format)')\n\n parser.add_argument('-p', '--parameters', dest='parameter_file_name',\n required=True,\n help='Condition table (PEtab format)')\n\n parser.add_argument('-o', dest='hdf5_file_name', default='data.h5',\n help='Name of HDF5 file to generate')\n\n args = parser.parse_args()\n\n return args", "def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n #parser.add_argument(\"args\", metavar=\"N\", type=str, nargs=\"*\", help=\"Positional arguments.\")\n #parser.add_argument(\"\", dest=\"\", type=\"\", default=, help=)\n #parser.add_argument(\"--version\", action=\"version\", version=\"<the version>\")\n\n return parser.parse_args()", "def parse_cmd_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-m', '--path_model', type=str, help='Path to trained model.')\n parser.add_argument('-t', '--type', type=str, help='Either \"torch\" or \"sklearn\".')\n # parser.add_argument('-l', '--location', type=str, help='Either \"local\", \"midgard\" or \"rattle\"')\n parser.add_argument('-i', '--path_in', type=str, help='Path to input file.')\n parser.add_argument('-o', '--path_out', type=str, help='Path to output file.')\n parser.add_argument('-c', '--path_config', type=str, help='Path to config file.')\n parser.add_argument('-g', '--gpu', type=int, default=0, help='Number of the gpu to be used.')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"in_fq\", help=\"The fastq file containing Hi-C reads.\")\n parser.add_argument(\n \"-r\",\n \"--reference\",\n required=True,\n help=\"Path to the reference genome, in FASTA format.\",\n )\n parser.add_argument(\n \"-p\",\n \"--nb_processors\",\n default=1,\n type=int,\n help=\"number of CPUs used for alignment.\",\n )\n parser.add_argument(\n \"-o\",\n \"--out_sam\",\n help=\"Path to the output SAM file for the alignment of in_fq.\",\n )\n parser.add_argument(\n \"-T\",\n \"--tempdir\",\n default=\".\",\n help=\"Directory to write temporary files. Defaults to current directory.\",\n )\n parser.add_argument(\n \"-m\",\n \"--minimap2\",\n default=False,\n action=\"store_true\",\n help=\"Use minimap2 instead of bowtie for the alignment.\",\n )\n parser.add_argument(\n \"-l\",\n \"--min_len\",\n type=int,\n default=20,\n help=\"Minimum length to which reads should be truncated.\",\n )\n return parser.parse_args()", "def parseargs() -> argparse.ArgumentParser:\n\n parser = worker.parseargs(\"ACT hybrid-analysis.com Client\")\n\n parser.add_argument(\n \"--feed\", action=\"store_true\", help=\"Download the public feed only, no lookup\"\n )\n\n parser.add_argument(\n \"--apikey\", default=\"\", help=\"community apikey for hybrid-analysis.com\"\n )\n\n parser.add_argument(\n \"--user-agent\", default=\"Falcon Sandbox\", help=\"User agent while talking to API\"\n )\n\n parser.add_argument(\n \"--no-check-certificate\",\n action=\"store_true\",\n help=\"Do not check SSL certificate\",\n )\n\n return parser", "def parse_args():\n parser = argparse.ArgumentParser(\n description='Convert CVAT XML annotations to PASCAL VOC format'\n )\n\n parser.add_argument(\n '--cvat-xml', metavar='FILE', required=True,\n help='input file with CVAT annotation in xml format'\n )\n\n parser.add_argument(\n '--image-dir', metavar='DIRECTORY', required=True,\n help='directory which contains original images'\n )\n\n parser.add_argument(\n '--output-dir', metavar='DIRECTORY', required=True,\n help='directory for output annotations in PASCAL VOC format'\n )\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=RawDescriptionHelpFormatter,\n description=\"This tool compares Master lab data with new data CSV files\",\n epilog=\"E.g.: ./cdc.py Master/Master.csv TestCaptures/data1.csv \"\n \"TestCaptures/data2.csv TestCaptures/data3.csv TestCaptures/data4.csv\",\n )\n parser.add_argument(\n \"master\", help=\"A Master CLIA CDC CSV file to process\",\n )\n parser.add_argument(\n \"new_files\",\n type=argparse.FileType(\"r\"),\n nargs=\"+\",\n help=\"A number of new CLIA CSV files to compare with Master\",\n )\n parser.add_argument(\n \"-e\", \"--extra\", action=\"store_true\", help=\"Display some extra data\",\n )\n parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n help=\"Bypass safety rails - very dangerous\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"turn on verbose messages, commands and outputs\",\n )\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"auth\",\n help=\"authentication string for Infermedica API: \"\n \"APP_ID:APP_KEY or path to file containing it.\")\n parser.add_argument(\"--model\",\n help=\"use non-standard Infermedica model/language, \"\n \"e.g. infermedica-es\")\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"CUDAPOA Python API sample program.\")\n parser.add_argument('-m',\n help=\"Run MSA generation. By default consensusis generated.\",\n action='store_true')\n parser.add_argument('-p',\n help=\"Print output MSA or consensus for each POA group.\",\n action='store_true')\n parser.add_argument('-l',\n help=\"Use long or short read sample data.\",\n action='store_true')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description='Convert CVAT XML annotations to YOLO format'\n )\n\n parser.add_argument(\n '--cvat-xml', metavar='FILE', required=True,\n help='input file with CVAT annotation in xml format'\n )\n\n parser.add_argument(\n '--image-dir', metavar='DIRECTORY', required=False,\n help='directory which contains original images'\n )\n\n parser.add_argument(\n '--output-dir', metavar='DIRECTORY', required=True,\n help='directory for output annotations in YOLO format'\n )\n\n parser.add_argument(\n '--username', metavar='USERNAME', required=False,\n help='Username from CVAT Login page, required to download images'\n )\n\n parser.add_argument(\n '--password', metavar='PASSWORD', required=False,\n help='Password from CVAT Login page, required to download images'\n )\n\n parser.add_argument(\n '--labels', metavar='ILABELS', required=False,\n help='Labels (separated by comma) to extract. Example: car,truck,motorcycle'\n )\n\n return parser.parse_args()", "def _parse_args():\n args = sys.argv[1:]\n cmd_parser = argparse.ArgumentParser()\n cmd_parser.add_argument(\n '--produce-sub',\n dest='produce_sub',\n help='Produce submision file',\n default=False,\n action='store_true',\n )\n cmd_parser.add_argument(\n '--search-cv',\n dest='search_cv',\n help='Perform Search of parameters',\n default=False,\n action='store_true',\n )\n cmd_opts = cmd_parser.parse_args(args=args)\n return cmd_opts", "def parse_command_args():\n parser = argparse.ArgumentParser(prog='iota.intercept')\n parser.add_argument('path', type=str, nargs = '?', default = None,\n help = 'Path to data file')\n parser.add_argument('--backend', type=str, default='dials',\n help='Backend for processing')\n parser.add_argument('--paramfile', type=str, default=None,\n help='Parameter file for processing')\n parser.add_argument('--output', type=str, default=None,\n help='Output filename')\n parser.add_argument('--termfile', type=str, default='.stop',\n help='Termination signal filename')\n parser.add_argument('--index', type=int, default=1,\n help='Numerical index of the image')\n parser.add_argument('--nproc', type=int, default=None,\n help='Number of processors')\n parser.add_argument('--action', type=str, default='spotfind',\n help='Code for how far to go; available codes: '\n 'spotfind, index, integrate')\n parser.add_argument('--verbose', action = 'store_true',\n help='Print information to stdout')\n\n return parser", "def parse_cli():\n parser = argparse.ArgumentParser(\n description=\"\"\"\n Get information about Red Hat OpenStack overcloud servers and roles.\n All output is JSON for further processing.\n \"\"\")\n\n parser.add_argument('-d', '--debug', action='store_true',\n default=False,\n help=\"Print additional information for status and diagnosis\")\n selector_group = parser.add_mutually_exclusive_group()\n selector_group.add_argument('-s', '--server',\n help=\"Find the role of a specified server\")\n selector_group.add_argument('-r', '--role',\n help=\"Find the servers under a specified role\")\n selector_group.add_argument('-R', '--list-roles', dest=\"list_roles\",\n action='store_true', default=False,\n help=\"list the roles defined for this cluster\")\n selector_group.add_argument('-o', '--orphan-nodes', action=\"store_true\",\n default=False,\n help='list any undeployed or untagged nodes')\n\n env_group = parser.add_mutually_exclusive_group()\n env_group.add_argument('-V', '--require-env', dest=\"require_env\", action='store_true', default=True)\n env_group.add_argument('--no-require-env', dest=\"require_env\", action='store_false')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--length', required=True, type=int, help='Test length in seconds')\n parser.add_argument('--txs-per-ledger', required=True, type=int, help='Transaction rate to submit (spam) in parallel for every ledger round')\n parser.add_argument('--prioritizer-seeds-file', required=True, type=str, help='File path to prioritizer seeds file')\n parser.add_argument('--spammer-seeds-file', required=True, type=str, help='File path to spammer seeds file')\n parser.add_argument('--out', default='spam-results-{}.json'.format(str(int(time.time()))), type=str, help='Spam results JSON output')\n parser.add_argument('--avg-block-time', type=int, default=5, help='Average block time. Controls the time delay between every spam round and the one just after that')\n\n parser.add_argument('--passphrase', type=str, help='Network passphrase')\n parser.add_argument('--horizon', action='append',\n help='Horizon endpoint URL (use multiple --horizon flags for multiple addresses)')\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\n \"config_path\",\n type=str,\n help=\"Path to the JSON configuration file containing the image transformation settings.\",\n )\n parser.add_argument(\n \"img_path\",\n type=str,\n help=\"Path to the input image file to apply transformations.\",\n )\n return parser.parse_args()", "def parse_cli_args():\n\n parser = argparse.ArgumentParser(\n description='Create AMICI model PEtab files for steadystate example.')\n\n parser.add_argument('-o', '--model-output-dir', dest='model_output_dir',\n default='model_steadystate_scaled',\n help='Name of the AMICI model directory to be created')\n\n parser.add_argument('-f', dest='hdf5_file_name',\n default='example_data.h5',\n help='Name of HDF5 file to generate')\n\n parser.add_argument('-p', dest='petab_dir',\n default='steadystate_petab',\n help='Directory to write PEtab files to')\n\n args = parser.parse_args()\n\n return args", "def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input-model\", help=\"Path to read input model from\")\n options = parser.parse_args()\n return options", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # All reference encoders\n parser.add_argument(\"--step\", dest=\"step\", default=\"10\", type=int, help=\"step size\")\n parser.add_argument(\"--repeats\", dest=\"repeats\", type=int, default=1, help=\"repeats\")\n\n parser.add_argument(dest=\"image\", default=None,\n help=\"select the test image to run\")\n\n args = parser.parse_args()\n return args", "def parse_args():\n\n parser = argparse.ArgumentParser(description='Disk metric sender')\n parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')\n parser.add_argument('--debug', action='store_true', default=None, help='Debug?')\n\n return parser.parse_args()", "def _parse_args(argv):\n parser = make_parser()\n args = parser.parse_args(argv)\n LOGGER.setLevel(to_log_level(args.loglevel))\n\n if not args.inputs:\n if args.list:\n tlist = \", \".join(API.list_types())\n _exit_with_output(\"Supported config types: \" + tlist)\n elif args.env:\n cnf = os.environ.copy()\n _output_result(cnf, args.output, args.otype or \"json\", None, None)\n sys.exit(0)\n else:\n parser.print_usage()\n sys.exit(1)\n\n if args.validate and args.schema is None:\n _exit_with_output(\"--validate option requires --scheme option\", 1)\n\n return args", "def arg_parse():\n parser = argparse.ArgumentParser()\n # defines command line arguments\n parser.add_argument('-i', '--input_file', help=\"The input .txt file \" +\n \"within the current directory (the file you \" +\n \"downloaded from VEP\")\n parser.add_argument('-o', '--output_file', help=\"Name for the output \" +\n \".txt file\")\n\n return parser.parse_args()", "def _parse_cli_opts(self, args):\n self._args = args\n for opt, group in self._all_cli_opts():\n opt._add_to_cli(self._oparser, group)\n\n return self._parse_config_files()", "def parse_command_line():\n parser = argparse.ArgumentParser(prog='scoring')\n parser.add_argument(\"pdb_list\", help=\"list of PDB structures\")\n script_args = parser.parse_args()\n return script_args", "def parse_command_line():\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter,\n epilog=\"For a list of table formats check this page: \"\n \"https://github.com/astanin/python-tabulate#table-format\"\n )\n requir = parser.add_argument_group(\"required arguments\")\n requir.add_argument(\"-f\", \"--find\",\n required=True,\n help=\"Search string to identify\"\n )\n requir.add_argument(\"-k\", \"--client_id\",\n required=True,\n help=\"CrowdStrike API client ID\"\n )\n requir.add_argument(\"-s\", \"--client_secret\",\n required=True,\n help=\"CrowdStrike API client secret\"\n )\n parser.add_argument(\"-r\", \"--reverse\",\n help=\"Reverse the sort.\",\n default=False,\n action=\"store_true\"\n )\n parser.add_argument(\"-t\", \"--types\",\n help=\"Types to search (indicator, report or actor). Comma delimited.\"\n )\n parser.add_argument(\"-tf\", \"--table_format\",\n help=\"Set the table format.\",\n default=\"fancy_grid\"\n )\n parser.add_argument(\"-o\", \"--output_prefix\",\n help=\"Output filename prefix for storing results (CSV format).\",\n default=None\n )\n\n parsed = parser.parse_args()\n allow = [\"indicator\", \"report\", \"actor\"]\n parsed.types = [t for t in parsed.types.split(\",\") if t in allow] if parsed.types else allow\n\n return parsed", "def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments", "def parse_args():\n parser = ArgumentParser(\n description=\"This is a script for auto apply ipex optimization.\"\n \"\\n################################# Basic usage ############################# \\n\"\n \"\\n 1. Apply ipex optimization with fp32 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex python_script args \\n\"\n \"\\n 2. Apply ipex optimization with bf16 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex --dtype bfloat16 python_script args \\n\",\n formatter_class=RawTextHelpFormatter,\n )\n\n add_auto_ipex_params(parser, auto_ipex_default_enabled=True)\n\n # positional\n parser.add_argument(\n \"program\",\n type=str,\n help=\"The full path to the proram/script to be launched. \"\n \"followed by all the arguments for the script\",\n )\n # rest from the training program\n parser.add_argument(\"program_args\", nargs=REMAINDER)\n return parser.parse_args()", "def parse_args() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(description=__doc__)\n\n # Enable command line arguments\n parser.add_argument('--process',\n action='store_true',\n help='Run data processing')\n parser.add_argument('--classify',\n action='store_true',\n help='Run classification')\n parser.add_argument('--lcs_file',\n type=str,\n help='Path to lcs file')\n parser.add_argument('--datasets_file',\n type=str,\n help='Path to datasets file')\n parser.add_argument('--mode',\n type=str,\n help=('Type of data to classify. r=realtime, f=full, r'\n 'fp=realtime+force_photo, ffp=full+force_photo'))\n parser.add_argument('--results_outfile',\n type=str,\n help='Filename to store results',\n default='KNC-Live_Results.csv')\n parser.add_argument('--results_dir',\n type=str,\n help='Directory to save results',\n default='knc_results/')\n parser.add_argument('--rfc_dir',\n type=str,\n help='Path to directory containing classifiers',\n default='classifiers/')\n parser.add_argument('--id_map_file',\n type=str,\n help='Name of ID map file in classifier directory',\n default='id_map.npy')\n parser.add_argument('--verbose',\n action='store_true',\n help='Print status updates')\n parser.add_argument('--skip_cv',\n action='store_true',\n help='Skip hyperparam optimization')\n parser.add_argument('--distribute',\n action='store_true',\n help='Use multiprocessing')\n\n return parser", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Hyper parameter\")\n parser.add_argument(\n \"--model\", help=\"Model to use\", default=\"DenseNet169\", type=str)\n parser.add_argument(\n \"--optimizer\", help=\"which optimizer to use\", default=\"Adam\", type=str)\n parser.add_argument(\n \"--lr\", help=\"learning rate\", default=2e-5, type=float)\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n \n parser.add_argument('--p', dest='path_in',\n action='store', type=str, required=True, default='',\n help=\"Path relative to the data/ directory, to the input ATL01, ANC13, and ANC27 files.\")\n parser.add_argument('--atl01', dest='atl01_file',\n action='store', type=str, required=False, default=None,\n help=\"Path + filename to directory of the ATL01.\")\n parser.add_argument('--anc13', dest='anc13_path',\n action='store', type=str, required=False, default=None,\n help=\"Path to outputs directory of the ANC13.\") \n parser.add_argument('--anc27', dest='anc27_path',\n action='store', type=str, required=False, default=None,\n help=\"Path to directory of the ANC27.\")\n\n args = parser.parse_args()\n \n return args", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--start\",\n type=str,\n default=\"\",\n required=False,\n help=\"The start square of the agent, in the form row,col. If not specified, this is randomized\"\n )\n parser.add_argument(\n \"--actions\",\n type=str,\n required=True,\n nargs = '+',\n help=\"The actions the agent takes, comma delimited\"\n )\n parser.add_argument(\n \"--observations\",\n type=str,\n required=True,\n nargs = '+',\n help=\"The observations the agent makes, comma delimited\"\n )\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--version',\n metavar=\"<str>\",\n help=\"Input data version number\",\n type=str,\n required=True\n )\n args = parser.parse_args()\n return args", "def parse_args(*opts_args):\n parser = argparse.ArgumentParser()\n cmd = Command()\n parser = cmd.create_parser(\"import_measures\", \"\")\n options = parser.parse_args(opts_args)\n return cmd.parse_options(options.__dict__)", "def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-i1\",\n help=\"\"\"viral alignments\"\"\",\n dest=\"viral\",\n required=True)\n parser.add_argument(\"-i2\",\n help=\"\"\"GTA alignments\"\"\",\n dest=\"gta\",\n required=True)\n parser.add_argument(\"-o\",\n dest=\"output\",\n help=\"output image file\")\n return parser", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--env', default='production',\n help='Environment to check: integration, staging, production.')\n parser.add_argument('-l', '--log_type', default='govuk_assets',\n help='Which logs to check: govuk_assets, govuk_www.')\n parser.add_argument('-c', '--critical_age_minutes', type=int, default=60,\n help='If the newest logs are older than this many minutes, '\n 'return CRITICAL status.')\n parser.add_argument('-F', '--fake_time', type=fromisoformat,\n help='For testing purposes, use the given time as if it\\'s the current '\n 'time. Requires the format YYYY-MM-DDTHH:MM. Assumes UTC.')\n parser.add_argument('-v', '--verbose', action='count',\n help='Show DEBUG log messages.')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--zarr_dir',\n type=str,\n help='path to directory of zarr files',\n )\n parser.add_argument(\n '--tiff_dir',\n type=str,\n help='path to directory of tiff files',\n )\n parser.add_argument(\n '--output_dir',\n type=str,\n help='path to directory for writing',\n )\n parser.add_argument(\n '--config_path',\n type=str,\n default=None,\n help='path to yaml preprocess config file',\n )\n \n args = parser.parse_args()\n return args", "def cmdline_parser():\n\n # http://docs.python.org/dev/howto/argparse.html\n parser = argparse.ArgumentParser(description=__doc__)\n \n parser.add_argument(\"--verbose\",\n action=\"store_true\",\n help=\"Be verbose\")\n parser.add_argument(\"--debug\",\n action=\"store_true\",\n help=\"Enable debugging\")\n parser.add_argument(\"-b\", \"--bam\",\n required=True,\n help=\"Input BAM file matching vcf\")\n parser.add_argument(\"-i\", \"--vcf\",\n help=\"Input VCF file containing variants to analyze\"\n \" (clashes with --var)\")\n parser.add_argument(\"-v\", \"--var\",\n help=\"Report reads for this variant only. Format: chr:pos:ref-alt\"\n \" (clashes with --vcf)\")\n default = 0\n parser.add_argument(\"--mq-filter\",\n dest=\"min_mq\",\n type=int,\n default=default,\n help=\"Ignore reads with mapping quality below this value (default=%d)\" % default)\n default = 5\n parser.add_argument(\"--bq-filter\",\n dest=\"min_bq\",\n type=int,\n default=default,\n help=\"Ignore reads with bases below this value (default=%d)\" % default)\n parser.add_argument(\"-a\", \"--use-orphan\",\n action=\"store_true\",\n help=\"Don't ignore orphan-reads / anomalous read-pairs\")\n\n return parser", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--urls_dirpath', type=unicode)\n parser.add_argument('-r', '--resources_dir', type=unicode)\n parser.add_argument('-t', '--total_docs', type=int)\n parser.add_argument('-m', '--mapping', type=unicode,\n help='File with the yago to lkif mapping')\n\n return parser.parse_args()", "def parse_args():\n global Args\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n pars_simulation(subparsers)\n pars_analyze(subparsers)\n Args = parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--source-account', required=True, type=str, help='Source account to pay transaction fees')\n parser.add_argument('--channel-seeds-file', required=True, type=str, help='File path to channel seeds file')\n parser.add_argument('--accounts', required=True, type=int, help='Amount of accounts to create')\n parser.add_argument('--passphrase', required=True, type=str, help='Network passphrase')\n parser.add_argument('--horizon', action='append', help='Horizon endpoint URL (use multiple --horizon flags for multiple addresses)')\n parser.add_argument('--json-output', required=False, type=bool, help='Export output to json format')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Re-ID feature extractor\")\n parser.add_argument(\n \"--model\",\n default=\"resources/networks/mars-small128.ckpt-68577\",\n help=\"Path to checkpoint file\")\n parser.add_argument(\n \"--loss_mode\", default=\"cosine\", help=\"Network loss training mode\")\n parser.add_argument(\n \"--mot_dir\", help=\"Path to MOTChallenge directory (train or test)\",\n required=True)\n parser.add_argument(\n \"--detection_dir\", help=\"Path to custom detections. Defaults to \"\n \"standard MOT detections Directory structure should be the default \"\n \"MOTChallenge structure: [sequence]/det/det.txt\", default=None)\n parser.add_argument(\n \"--output_dir\", help=\"Output directory. Will be created if it does not\"\n \" exist.\", default=\"detections\")\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\"BaselineMembershipInferenceAttack\")\n parser.add_argument(\"--batch_size\",\n type=int, default=128,\n help=\"The batch size of normal training.\")\n parser.add_argument(\"--train_epoch\",\n type=int, default=10,\n help=\"The epoch of training.\")\n parser.add_argument(\"--train_lr\",\n type=float, default=0.0002,\n help=\"The learning rate of training.\")\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser(description='baseline Mask R-CNN')\n parser.add_argument('--dataset', required=True,\n metavar=\"/path/to/dataset/\",\n help='Directory of the dataset')\n parser.add_argument('--continue_train', type=str, required=False, default='None',\n metavar=\"/path/to/latest/weights.h5\", help=\"Path to lastest training weights .h5 file\")\n parser.add_argument('--weight', required=False,\n metavar='/path/to/pretrained/weight.h5', help=\"Path to trained weight\")\n parser.add_argument('--image', required=False,\n metavar='/path/to/testing/image/directory', help=\"Path to testing image directory\")\n parser.add_argument('--video', required=False,\n metavar='/path/to/testing/image/directory', help=\"Path to testing image directory\")\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"SLOWFAST for AVA Dataset\")\n parser.add_argument(\"--pipeline\", type=str,\n default=\"../data/config/slowfast.pipeline\", help=\"SDK infer pipeline\")\n parser.add_argument(\"--data_dir\", type=str, default=\"../data/input\",\n help=\"Dataset contain frames and ava_annotations\")\n args_opt = parser.parse_args()\n return args_opt", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"auth\",\n help=\"authentication string for Infermedica API: \"\n \"APP_ID:APP_KEY or path to file containing it.\")\n parser.add_argument(\"--model\",\n help=\"use non-standard Infermedica model/language, \"\n \"e.g. infermedica-es\")\n # TODO: Check if `verbose` actually does anything.\n parser.add_argument(\"-v\", \"--verbose\",\n dest=\"verbose\", action=\"store_true\", default=False,\n help=\"dump internal state\")\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser(description=app_description,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"--input-json\", dest=\"input_json_path\", required=True,\n help=input_json_help)\n\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\",\n \"--duration\",\n type=mesos_maintenance.parse_timedelta,\n default=\"1h\",\n help=\"Duration of the maintenance window. Any pytimeparse unit is supported.\",\n )\n parser.add_argument(\n \"-s\",\n \"--start\",\n type=mesos_maintenance.parse_datetime,\n default=str(mesos_maintenance.now()),\n help=\"Time to start the maintenance window. Defaults to now.\",\n )\n parser.add_argument(\n \"action\",\n choices=[\n \"cluster_status\",\n \"down\",\n \"drain\",\n \"is_host_down\",\n \"is_host_drained\",\n \"is_host_draining\",\n \"is_hosts_past_maintenance_end\",\n \"is_hosts_past_maintenance_start\",\n \"is_safe_to_drain\",\n \"is_safe_to_kill\",\n \"schedule\",\n \"status\",\n \"undrain\",\n \"up\",\n ],\n help=\"Action to perform on the specified hosts\",\n )\n parser.add_argument(\n \"hostname\",\n nargs=\"*\",\n default=[getfqdn()],\n help=\"Hostname(s) of machine(s) to start draining. \"\n \"You can specify <hostname>|<ip> to avoid querying DNS to determine the corresponding IP.\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n dest=\"verbose\",\n default=0,\n help=\"Print out more output.\",\n )\n return parser.parse_args()", "def __parse_cmd_args():\n parser = argparse.ArgumentParser(description='Python Image Downloader.')\n parser.add_argument(\"-f\", \"--file\",\n help=\"Where the URL file is located.\")\n parser.add_argument(\"-d\", \"--dir\",\n help=\"Where the downloaded files are to be stored.\")\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser(description='Crawl an Android app store for apk files.')\n parser.add_argument('--store', dest='api', choices=['GooglePlay', 'F-Droid'], required=True,\n help='Specifies the store to crawl. At the moment only Google Play is supported.')\n parser.add_argument('--meta', dest='meta', required=False, action='store_const', default=False, const=True,\n help='If set, no apps will be downloaded, but the meta_data will be saved.')\n parser.add_argument('--basedir', dest='base_dir', type=str, default=os.getenv('HOME'),\n required=False, help='Specifies the base path for both logs and apk_downloads.')\n parser.add_argument('--credentials', dest='credentials', type=str, required=False, default=None,\n help='Specifies the path to a credential file in .toml format.')\n parser.add_argument('--limit', dest='limit', type=int, required=False, default=None,\n help='Specifies the maximum number of apks per category to download.')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Reads in iClicker data from the submission \"\n \"directory, and then writes it to the given \"\n \"remote file.\")\n parser.add_argument(\"submission_directory\", type=str, help=\"Directory of submissions that \"\n \"contain a 'textbox_0.txt' file that references iClicker ID\")\n parser.add_argument(\"remote_id_file\", type=str)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Subscription Watch CSV file packaging script\", prog=sys.argv[0])\n\n # required args\n parser.add_argument(\"-f\", \"--filepath\", required=True,\n help=\"path to files to package\")\n parser.add_argument(\n \"-s\",\n \"--max-size\",\n type=int,\n default=DEFAULT_MAX_SIZE,\n help=f\"Maximum size of packages in MiB. (Default: {DEFAULT_MAX_SIZE} MiB)\",\n )\n parser.add_argument(\n \"-o\", \"--overwrite\", action=\"store_true\", default=False, help=\"whether to overwrite existing files.\"\n )\n parser.add_argument(\"--ocp-cluster-id\", required=True,\n help=\"OCP Cluster ID\")\n parser.add_argument(\"-v\", \"--verbosity\", action=\"count\",\n default=0, help=\"increase verbosity (up to -vvv)\")\n return parser.parse_args()", "def parse_command_line(argv):\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n \"--logfile\",\n default=None,\n type=str,\n help=\"If specified, write logs to this file.\",\n )\n parser.add_argument(\n \"--loglevel\",\n help=\"Set logging level (DEBUG, INFO, WARNING, ERROR, or CRITICAL).\",\n default=\"INFO\",\n )\n return parser.parse_args(argv[1:])", "def parse_args():\n parser = argparse.ArgumentParser(\n description='Run the destination IoT program (CTRL-C to exit)')\n\n parser.add_argument('-v',\n '--verbose',\n default=False,\n action='store_true',\n help='Print all debug logs')\n\n parser.add_argument('-p',\n '--port',\n metavar='<port number>',\n default=7777,\n type=int,\n help='Default: 7777')\n\n parser.add_argument('-a',\n '--address',\n metavar='<email_address>',\n nargs='*',\n help='Email address(es) to receive notifications')\n\n args = parser.parse_args()\n return args", "def parse_args(args):\n parser = argparse.ArgumentParser(\n description=\"Lookup table generator for Image Comparison\")\n parser.add_argument(\n \"--version\",\n action=\"version\",\n version=\"lookuptable {ver}\".format(ver=__version__))\n parser.add_argument(\n \"-f\",\n \"--folder\",\n dest=\"imagefolder\",\n help=\"path to image folder\",\n type=str,\n metavar=\"STRING\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n dest=\"loglevel\",\n help=\"set loglevel to INFO\",\n action=\"store_const\",\n const=logging.INFO)\n parser.add_argument(\n \"-vv\",\n \"--very-verbose\",\n dest=\"loglevel\",\n help=\"set loglevel to DEBUG\",\n action=\"store_const\",\n const=logging.DEBUG)\n return parser.parse_args(args)", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Deep SORT\")\n parser.add_argument(\n \"--input\", help=\"Path to MOTChallenge sequence directory\",\n default=None, required=True)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Check the ERC 20 conformance\", usage=\"fortress-check-erc project contractName\",\n )\n\n parser.add_argument(\"project\", help=\"The codebase to be tested.\")\n\n parser.add_argument(\n \"contract_name\",\n help=\"The name of the contract. Specify the first case contract that follow the standard. Derived contracts will be checked.\",\n )\n\n parser.add_argument(\n \"--erc\",\n help=f\"ERC to be tested, available {','.join(ERCS.keys())} (default ERC20)\",\n action=\"store\",\n default=\"erc20\",\n )\n\n parser.add_argument(\n \"--json\",\n help='Export the results as a JSON file (\"--json -\" to export to stdout)',\n action=\"store\",\n default=False,\n )\n\n # Add default arguments from crytic-compile\n cryticparser.init(parser)\n\n return parser.parse_args()", "def parse_command_line_arguments(argv):\n print(\"reading command line arguments in...\")\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-i', '--input', help='Location of input csv file', required=True)\n parser.add_argument('-p', '--predicting', help='The column name containing the category to predict', required=True)\n parser.add_argument('-s', '--scoring', help='The scoring type to be used with model evaluation', required=False)\n parser.add_argument('-c', '--scale', help='List of column names to scale values for', nargs='+', required=False)\n args = parser.parse_args()\n\n return args.input, args.predicting, args.scoring, args.scale", "def parse_args():\n p = argparse.ArgumentParser(\n description='Parse system logs, for fun or something')\n p.add_argument('-l', '--log', dest='log_file', help='The log file')\n p.add_argument('-f', '--filter', dest='filter', help='filter by daemon')\n return p.parse_args()", "def parse_args(self, argv=None):\n self.opts, self.args = self.cli_parser.parse_args(argv)\n self._begin_logging()\n if argv is None:\n argv = sys.argv\n logger.info(' '.join(argv))\n self._process_input_files()\n self._construct_links_of_interest()\n self._open_output_files()\n data = self._construct_data_struct()\n return data", "def parse_arguments(args=sys.argv[1:]):\n \n parser = argparse.ArgumentParser()\n \n parser.add_argument('-i', '--input',\n help=\"Path of input file to read. Default: {d}\".format(d=INPUT_FILE),\n default=INPUT_FILE)\n \n return parser.parse_args(args)", "def parse_arguments():\n parser = argparse.ArgumentParser(prog='AdapterRunner', description='Adapter Runner Application')\n parser.add_argument('-a', '--application', action='store', dest='app_name', help='Application Name',\n metavar='<application_name>')\n parser.add_argument('-fi', '--fetch_interval', action='store', dest='fetch_stats_interval', help='Fetch Stats Interval',\n metavar='<fetch_interval in seconds>')\n return parser.parse_args()", "def parse_cmdline_args():\n parser = argparse.ArgumentParser(description=\"Guesses the functional element for host.\")\n ##\n ## Internal options\n ##\n parser.add_argument(\"--json\", dest=\"json\", action='store_true', help=\"output in JSON\")\n\n ##\n ## PuppetDB options\n ##\n pdbconf = PdbConfig()\n pdbconf.add_standard_args(parser)\n\n parser.add_argument(\"host\", metavar=\"HOST\",\n help=\"hostnames to query for FE\")\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Bandits algorithms on a click-through \"\n \"rate dataset.\")\n parser.add_argument('--plot', action='store_true')\n return parser.parse_args()", "def parse_cli():\n args = {}\n arg_name = None\n arg_values = None\n parameters = {}\n\n cli_args = sys.argv\n found_params = False\n skip = True\n iterator = enumerate(cli_args)\n\n for idx, arg in iterator:\n if skip:\n skip = False\n continue\n else:\n skip = True\n\n if arg == \"--params\":\n if arg_name:\n args[arg_name] = \" \".join(arg_values)\n found_params = True\n skip = False\n\n elif arg[0:2] == \"--\" and not found_params:\n if arg_name:\n args[arg_name] = \" \".join(arg_values)\n arg_name = arg[2:]\n arg_values = []\n skip = False\n\n elif arg[0:2] == \"--\" and found_params:\n raise ValueError(\"You are trying to specify an argument after the \"\n \"--params argument. Please change the order.\")\n\n elif arg[0] == \"-\" and arg[0:2] != \"--\" and found_params:\n parameters[cli_args[idx][1:]] = cli_args[idx+1]\n\n elif arg[0] == \"-\" and arg[0:2] != \"--\" and not found_params:\n raise ValueError(\"You either try to use arguments with only one lea\"\n \"ding minus or try to specify a hyperparameter bef\"\n \"ore the --params argument. %s\" %\n \" \".join(cli_args))\n elif arg[0:2] != \"--\" and not found_params:\n arg_values.append(arg)\n skip = False\n\n elif not found_params:\n raise ValueError(\"Illegal command line string, expected an argument\"\n \" starting with -- but found %s\" % (arg,))\n\n else:\n raise ValueError(\"Illegal command line string, expected a hyperpara\"\n \"meter starting with - but found %s\" % (arg,))\n\n return args, parameters", "def parse_command_line():\n\n desc = \"Perform fluid dynamics simulations.\"\n parser = argparse.ArgumentParser(description=desc)\n\n # Parameter file\n help_txt = \"name of the configuration file (default is 'config.ini.')\"\n parser.add_argument(\"-f\", \"--file\", metavar=\"FILE\", default=\"config.ini\",\n required=False, dest=\"config_file\", help=help_txt)\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\"Run arguments for system submitted tasks\")\n\n parser.add_argument(\"-f\", \"--funcs\", type=str, nargs=\"?\", required=True,\n help=\"path to pickle file containing a list of \"\n \"functions/methods that should be run by the \"\n \"submitted process\"\n )\n parser.add_argument(\"-k\", \"--kwargs\", type=str, nargs=\"?\", required=False,\n default=None,\n help=\"path to pickle file containing a dictionary of \"\n \"keyword argumnets that should be passed to the \"\n \"functions\")\n parser.add_argument(\"-e\", \"--environment\", type=str, nargs=\"?\",\n required=False,\n help=\"Optional comma-separated environment variables, \"\n \"which should be given as \"\n \"VARNAME1=value1,VARNAME2=value2 and so on. These \"\n \"will be separated and instantiated into Python's \"\n \"os.environ\")\n\n return parser.parse_args()", "def parse_args():\n\tparser = argparse.ArgumentParser(description='Show video statistics.')\n\tparser.add_argument('--sort', metavar='FIELD', choices=['views', 'likes', 'dislikes'],\n\t default='views',\n\t help='sort by the specified field. Options are views, likes and dislikes.')\n\tparser.add_argument('--max', metavar='MAX', type=int, help='show the top MAX entries only.')\n\tparser.add_argument('--csv', action='store_true', default=False,\n\t help='output the data in CSV format.')\n\tparser.add_argument('--table', action='store_true', default=False,\n\t help='output the data in an ascii table.')\n\tparser.add_argument('--workers', type=int, default=8,\n\t help='number of workers to use, 8 by default.')\n\treturn parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Sequence Cleaner: Remove Duplicate Sequences, etc\",\n epilog=\"example > sequence_cleaner -q INPUT -o OUTPUT\")\n parser.add_argument('-v', '--version', action='version', version='sequence_cleaner {}'.format(version))\n parser.add_argument(\"-q\", \"--query\", help=\"Path to directory with FAST(A/Q) files\", required=True)\n parser.add_argument(\"-o\", \"--output_directory\", help=\"Path to output files\", required=True)\n parser.add_argument(\"-ml\", \"--minimum_length\", help=\"Minimum length allowed (default=0 - allows all the lengths)\",\n default=\"0\")\n parser.add_argument(\"-mn\", \"--percentage_n\", help=\"Percentage of N is allowed (default=100)\", default=\"100\")\n parser.add_argument('--keep_all_duplicates', help='Keep All Duplicate Sequences', action='store_false', required=False)\n parser.add_argument('--remove_ambiguous', help='Remove any sequence with ambiguous bases', action='store_true', required=False)\n\n parser.add_argument('-l', '--log', help='Path to log file (Default: STDOUT).', required=False)\n\n return parser.parse_args()", "def parse_args():\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'config',\n help='Config file')\n parser.add_argument(\n '--quiet',\n '-q',\n action='store_true',\n help='do not print to console'\n )\n parser.add_argument(\n '--password',\n '-p',\n action='store_true',\n help='Set password in keyring.'\n )\n parser.add_argument(\n '--update',\n '-u',\n action='store_true',\n help='Only add transactions after last date in database.'\n )\n parser.add_argument(\n '--mark_seen',\n '-m',\n action='store_true',\n help='Mark fetched emails as seen.'\n )\n\n return parser.parse_args()", "def command_line_parse(iargs=None):\n\n parser = create_parser()\n inps = parser.parse_args(args=iargs)\n\n return inps", "def _parse_args(self):\n parser = argparse.ArgumentParser()\n _, args = parser.parse_known_args()\n self.args = [a for a in args if a != '']", "def parse_args():\n parser = argparse.ArgumentParser()\n argparse.ArgumentParser(argument_default=False)\n parser.add_argument('-r', type=str, required=True, metavar='[reference.fa]', help=\"Reference file in fasta format\")\n parser.add_argument('-s', type=int, metavar='number of SNPs', help=\"Number of SNPs to introduce\")\n parser.add_argument('-i', type=int, metavar='number of indels', help=\"Number of small indels to introduce. Indel sizes randomly sampled between 1-100bp with a ratio of 9:1 for size being < 11bp\")\n parser.add_argument('-d', type=int, metavar='number of large deletions', help=\"Number of large deletions to introduce. Deletion size randomly sampled between 100-5000bp\")\n parser.add_argument('-t', type=int, metavar='number of translocations', help=\"number of translocations, regions will be randomly selected ranging between 500-5000bp in size\")\n parser.add_argument('-nc', type=bool, default=False, metavar='add at least one non-conserved translocation', help=\"True/False add one non-conserved translocation. Deletion occurs from end of the translocation. Default=False\")\n parser.add_argument('-f', type=str, required=True, metavar='output fasta filename', help=\"Output fasta filename and file path\")\n parser.add_argument('-v', type=str, required=True, metavar='output vcf file of mutations', help=\"Output vcf filename and file path\")\n\n return parser.parse_args()" ]
[ "0.80792505", "0.8012675", "0.78432906", "0.78108615", "0.7765357", "0.7751876", "0.77110696", "0.76705295", "0.7647746", "0.7644992", "0.7644233", "0.76380914", "0.75653404", "0.7562569", "0.75440955", "0.7535946", "0.75321084", "0.75190026", "0.75150883", "0.7505613", "0.7491292", "0.7490559", "0.748476", "0.74844605", "0.745384", "0.7451498", "0.74460983", "0.74348456", "0.74266624", "0.7423198", "0.7418547", "0.74038947", "0.7401476", "0.73931134", "0.73880523", "0.7384215", "0.73823494", "0.7372509", "0.73584163", "0.73545295", "0.73405594", "0.73344547", "0.7329731", "0.73226017", "0.73218924", "0.7320626", "0.7316476", "0.73070085", "0.73057014", "0.7296637", "0.7292377", "0.7287461", "0.7287459", "0.7287178", "0.7284422", "0.7273681", "0.7267626", "0.7262759", "0.7262249", "0.7261698", "0.7256247", "0.7254192", "0.72524196", "0.7244782", "0.72432053", "0.72330594", "0.72301143", "0.722756", "0.7222257", "0.7215478", "0.721014", "0.7210101", "0.7209053", "0.7208629", "0.7205561", "0.7199625", "0.71985763", "0.71982235", "0.71981925", "0.7196416", "0.7194187", "0.71856034", "0.71819746", "0.71795917", "0.7174014", "0.7172088", "0.7171325", "0.7170273", "0.716829", "0.716717", "0.71669114", "0.71668017", "0.71638316", "0.71593165", "0.7158985", "0.7157887", "0.7154573", "0.7144226", "0.7138401", "0.7137068", "0.7130928" ]
0.0
-1
Main method to get dependent review IDs of a specific review request on the ReviewBoard.
def main(): parameters = parse_parameters() review_request_url = "%s/api/review-requests/%s/" % (REVIEWBOARD_URL, parameters.review_id) handler = ReviewBoardHandler() review_request = handler.api(review_request_url)["review_request"] review_ids = handler.get_dependent_review_ids(review_request) if parameters.out_file: with open(parameters.out_file, 'w') as f: for r_id in review_ids: f.write("%s\n" % (str(r_id))) else: for r_id in review_ids: print("%s\n" % (str(r_id)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_review_request(self, rid):\r\n rsp = self.api_call('api/review-requests/%s/' % rid)\r\n return rsp['review_request']", "def get_review_request(self, request_id, api_root):\n try:\n request = api_root.get_review_request(review_request_id=request_id)\n except APIError, e:\n raise CommandError(\"Error getting review request: %s\" % e)\n\n return request", "def get_reviews(business_id):\n\n reviews_path = BUSINESS_PATH + business_id + '/reviews'\n\n return request(reviews_path)", "def fetch_reviews(self, rb_id, start=0, max_results=25):\r\n return self.api_call('/api/review-requests/%s/reviews/?start=%s&max-results=%s'\r\n % (rb_id, start, max_results))['reviews']", "def get_reviews(bearer_token, business_id):\n reviews_path = BUSINESS_PATH + business_id + '/reviews'\n\n return request(API_HOST, reviews_path, bearer_token)", "def get_reviews(review_url):\n print review_url\n html = urllib.urlopen(review_url).read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n\n rating_scores = soup.findAll(\"span\", \"ratingScore\")\n num_ratings = len(rating_scores) - 1\n\n current_reviews = soup.findAll(\"div\", \"currentVintageProfessinalReviews\")\n num_cur_reviews = str(current_reviews).count('ratingProvider')\n past_reviews = soup.findAll(\"ul\", \"pastVintagesProfessionalReviews\")\n num_past_reviews = str(past_reviews).count('ratingProvider')\n\n print 'There are {0} reviews for prior vintages of this wine.'.format(num_past_reviews)\n print 'There are {0} current reviews for this vintage.\\n'.format(num_cur_reviews)\n\n rating_provider = soup.findAll(\"span\", \"ratingProvider\")\n rating_score = soup.findAll(\"span\", \"ratingScore\")\n reviewers = re.findall('(?<![A-Z])[>]([A-Z]+(?![A-Z]))', str(rating_provider))\n ratings = re.findall('(?<![A-Z])[0-9]{2}(?![A-Z])', str(rating_score))\n\n print \"Ratings List:\", ratings\n print \"Current Reviews: \", num_cur_reviews\n\n currentreviews = []\n for j in range(num_cur_reviews):\n print \"Current Review #\"+str(j+1)+\":\", reviewers[j], ratings[j]\n currentreviews.append((reviewers[j], ratings[j]))\n print currentreviews\n\n print \"\\nPast Reviews: \", num_past_reviews\n past_review_ratings = []\n for k in range(num_cur_reviews, num_past_reviews+num_cur_reviews):\n #print \"Past Review #\"+str(k-num_cur_reviews+1)+\":\", reviewers[k], int(ratings[k])\n past_review_ratings.append(float(ratings[k]))\n if k > 30:\n break\n if num_past_reviews != 0:\n avg_past_reviews = sum(past_review_ratings)/len(past_review_ratings)\n round(avg_past_reviews, 2)\n else:\n avg_past_reviews = 0\n\n print \"Average of Past Reviews: \", avg_past_reviews\n\n return currentreviews, avg_past_reviews", "def test_get_url_on_review_request(self) -> None:\n review_request = self.create_review_request()\n\n self.assertEqual(\n self.action.get_url(context=self._create_request_context(\n review_request=review_request,\n url_name='review-request-detail')),\n '/r/%s/diff/raw/' % review_request.display_id)", "def get_completed_chart_reviews(self, request):\n reqParams = request.GET\n project = reqParams.get('project', None)\n cohort = reqParams.get('cohort', None)\n patient_id = reqParams.get('patient_id', None)\n queryset = self.search_chart_review_data(project, cohort, patient_id)\n page = self.paginate_queryset(queryset)\n data = []\n\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n data = serializer.data\n\n return self.get_paginated_response(data)", "def get_review(self, id):\n endpoint = '/v3/educator/reviews/%s' % id\n result = self.request(endpoint)", "def build_indices(review_ids):\n\n review_indices = {}\n\n # Load qrel_abs_train txt file\n clef_data = pd.read_csv(config.TRAIN_QREL_LOCATION, sep=\"\\s+\", names=['review_id', 'q0', 'pmid', 'included'])\n\n # Get index of documents for each review\n for review_id in review_ids:\n index = clef_data.index[clef_data['review_id'] == review_id].tolist()\n\n # Get the range of index for all documents within each review\n review_indices[review_id] = (min(index), max(index) + 1)\n\n return review_indices", "def get_review_status(pr_id):\n reviews = get_status_json(pr_id, 'reviews')\n requests = get_status_json(pr_id, 'reviewRequests')\n\n requested_authors = [r[\"login\"] for r in requests]\n\n review_status = {}\n for r in reviews:\n author = r['author']['login']\n date = datetime.fromisoformat(r['submittedAt'].strip('Z'))\n state = r['state']\n if author not in review_status:\n review_status[author] = ReviewComment(state, date, author)\n elif state != 'COMMENTED' and review_status[author].date < date:\n review_status[author] = ReviewComment(state, date, author)\n for a in review_status:\n if a in requested_authors:\n review_status[a] = ReviewComment('REVIEW_REQUESTED', review_status[a].date, a)\n for a in requested_authors:\n if a not in review_status:\n review_status[a] = ReviewComment('UNRESPONSIVE', None, a)\n return review_status, requested_authors", "def get_review(review_id):\n return get(cls, review_id)", "def _request_reviews(self, token, owner, repo, number, reviewers):\n post_data = {'reviewers': reviewers.split(',')}\n headers = {'Authorization': 'Basic ' + token}\n response = requests.post(\n flask.current_app.config['GITHUB_API_CREATE_REVIEW_REQUEST'].format(owner=owner, repo=repo, number=number),\n data=json.dumps(post_data), headers=headers)\n\n return response", "def get_parent_rr(review_request_details, commit_data=None):\n commit_data = fetch_commit_data(review_request_details, commit_data)\n\n if not is_pushed(review_request_details, commit_data):\n return None\n\n if is_parent(review_request_details, commit_data):\n return review_request_details\n\n identifier = commit_data.get_for(review_request_details, IDENTIFIER_KEY)\n\n return ReviewRequest.objects.get(\n commit_id=identifier,\n repository=review_request_details.repository)", "def parse_parameters():\n parser = argparse.ArgumentParser(\n description=\"Get all dependent review IDs\")\n parser.add_argument(\"-r\", \"--review-id\", type=str, required=True,\n help=\"Review ID\")\n parser.add_argument(\"-o\", \"--out-file\", type=str, required=False,\n help=\"The out file with the reviews IDs\")\n return parser.parse_args()", "def find_reviews():\n print(\"***** Find Reviews of a Business *****\")\n while (True):\n print()\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n id = business_object['business_id']\n review_object = review_col.find({\"business_id\": id})\n print(f'{business_object[\"name\"]} has'\n f' {business_object[\"review_count\"]} '\n f'reviews:')\n for review in review_object:\n userid = review['user_id']\n print(f'- ({review[\"stars\"]}):'\n f' {review[\"text\"]}.'\n f' {review[\"date\"]}')", "def get_request_ids(request_id=None, workload_id=None, session=None):\n if request_id:\n return [request_id]\n return get_request_ids_by_workload_id(workload_id)", "def get_reviews(recipe_id=None):\n\n recipe = storage.get(Recipe, recipe_id)\n print(recipe)\n if not recipe:\n abort(404)\n reviews = []\n for review in recipe.reviews:\n reviews.append(review.to_dict())\n return jsonify(reviews)", "def one_review(review_id=None):\n if review_id:\n for item in storage.all(Review).values():\n if review_id == item.id:\n return (jsonify(item.to_dict()))\n abort(404)", "def getTaskIds(self, director):\n # the computation record\n computation = self._getComputationRecord(director)\n \n # search for tasks\n iworker = self.inventory.iworker\n tasks = computation.findTasks(director.clerk.db, iworker)\n\n ids = [t.id for t in tasks]\n return ','.join(ids)", "def get_reviews(review_id):\n if review_id:\n review = storage.get(Review, review_id) # retrieves obj\n if review is None:\n return jsonify({'error': 'Not found'}), 404\n if request.method == 'DELETE':\n storage.delete(review) # deletes\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n js = request.get_json()\n if js is None:\n return jsonify({'error': 'Not a JSON'}), 400\n js.pop('id', None)\n js.pop('user_id', None)\n js.pop('place_id', None)\n js.pop('created_at', None)\n js.pop('updated_at', None)\n for key, value in js.items():\n setattr(review, key, value) # updates\n review.save()\n return jsonify(review.to_dict()), 200\n else:\n return jsonify(review.to_dict()), 200\n\n if request.method == 'POST':\n js = request.get_json()\n if js is None:\n return jsonify({'error': 'Not a JSON'}), 400\n if js.get('user_id', None) is None:\n return jsonify({'error': 'Missing user_id'}), 400\n if js.get('text', None) is None:\n return jsonify({'error': 'Missing text'}), 400\n obj = Review(**js) # creates\n obj.save()\n return jsonify(obj.to_dict()), 201\n\n reviews = []\n reviews_obj = storage.all('Review') # retrieves list obj\n for obj in reviews_obj:\n reviews.append(reviews_obj[obj].to_dict())\n return jsonify(reviews)", "def reviews(self):\n reviewList = []\n for review in storage.all(Review).values():\n if review.getattr('place_id') == self.id:\n reviewList.append(review)\n return(reviewList)", "def dependent_prs(self):\n comments = self.data['body'].replace('\\r\\n', ' ')\n for comment in self.comments():\n comments += comment['body'].replace('\\r\\n', ' ')\n\n dependent_prs = []\n dependent_keywords = ['depends on']\n for keyword in dependent_keywords:\n pattern = r'%s %s/(\\S+)/(\\S+)/pull/(\\d+)' % (keyword, GITHUB)\n LOGGER.info(\"Finding dependent PRs by '%s' in the comments\")\n dependent_prs += re.findall(pattern, comments)\n return set(dependent_prs)", "def task_reviews_collection(request, task_id):\n try:\n task = Task.objects.get(id=task_id)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n reviews = Review.objects.filter(task=task).all()\n serializer = ReviewSerializer(reviews, many=True)\n return Response(serializer.data)", "def get_context_data(\n self,\n **kwargs,\n ) -> Dict[str, Any]:\n review_request = self.review_request\n draft = review_request.get_draft(self.request.user)\n\n # We only want to show one label. If there's a draft, then that's\n # the most important information, so we'll only show that. Otherwise,\n # we'll show the submitted/discarded state.\n label = None\n\n if draft:\n label = ('review-request-infobox-label-draft', _('Draft'))\n elif review_request.status == ReviewRequest.SUBMITTED:\n label = ('review-request-infobox-label-submitted', _('Submitted'))\n elif review_request.status == ReviewRequest.DISCARDED:\n label = ('review-request-infobox-label-discarded', _('Discarded'))\n\n if label:\n label = format_html('<label class=\"{0}\">{1}</label>', *label)\n\n # Fetch information on the reviews for this review request.\n review_count = (\n review_request.reviews\n .filter(public=True, base_reply_to__isnull=True)\n .count()\n )\n\n # Fetch information on the draft for this review request.\n diffset = None\n\n if draft and draft.diffset_id:\n diffset = draft.diffset\n\n if not diffset and review_request.diffset_history_id:\n try:\n diffset = (\n DiffSet.objects\n .filter(history__pk=review_request.diffset_history_id)\n .latest()\n )\n except DiffSet.DoesNotExist:\n pass\n\n if diffset:\n diff_url = '%s#index_header' % local_site_reverse(\n 'view-diff-revision',\n args=[review_request.display_id, diffset.revision],\n local_site=review_request.local_site)\n else:\n diff_url = None\n\n return {\n 'review_request': review_request,\n 'review_request_label': label or '',\n 'review_request_details': draft or review_request,\n 'issue_total_count': (review_request.issue_open_count +\n review_request.issue_resolved_count +\n review_request.issue_dropped_count +\n review_request.issue_verifying_count),\n 'review_count': review_count,\n 'diffset': diffset,\n 'diff_url': diff_url,\n }", "def test_get_dealer_reviews(self):\n pass", "def getReviewNumbers(singleStoryFooter):\n\twords = singleStoryFooter.get_text()\n\treview = re.compile(r\"Reviews: \\d+\").search(words)\n\tif review:\n\t\treviewNum = review.group()[9:]\n\t\treturn int(reviewNum)\n\telse: return 0", "def _dependency_id(self):\n if self._dependency_ids:\n return self._dependency_ids[0]", "def save_draft(self, review_request):\r\n self.api_call('api/review-requests/%s/draft/save/' %\r\n review_request['id'])\r\n self.debug('Review request draft saved')", "def get_open_reviews(args):\n args['status'] = 'pending'\n if 'max_results' not in args:\n args['max_results'] = 100\n\n client = RBClient(REVIEWBOARD_URL)\n\n # If we have a username and password, login\n if REVIEWBOARD_USERNAME and REVIEWBOARD_PASSWORD:\n client.login(REVIEWBOARD_USERNAME, REVIEWBOARD_PASSWORD)\n\n root = client.get_root()\n\n if not root:\n logger.error(u'Could not get RBClient root')\n return None\n\n try:\n req = root.get_review_requests(**args)\n except APIError:\n logger.exception(u'Error querying API')\n return None\n\n ret = {'total': req.total_results, 'reviews': []}\n review_fmt = u\"[{user}] {summary} ({url}/r/{id})\"\n\n for review in req:\n ret['reviews'].append(review_fmt.format(user=review.get_submitter().username,\n summary=review.summary,\n url=REVIEWBOARD_URL,\n id=review.id))\n\n return ret", "def get_review(self, id_):\n cursor = self._connection.cursor()\n select_command = make_select_command(\"reviews\")\n select_command += \" WHERE id_ = ?\"\n cursor.execute(select_command, (id_,))\n for row in cursor:\n return expandable_from_tuple(row, FIELD_DESCRIPTIONS) \n return None", "def dependent_ids(self) -> List[str]:\n return list(map(as_text, self.connection.smembers(self.dependents_key)))", "def load_reviews(id_reviews=(), load_polarities=False, load_sentences=False, load_words=False, load_deptrees=False):\n from loacore.conf import DB_TIMEOUT\n reviews = []\n conn = sql.connect(DB_PATH, timeout=DB_TIMEOUT)\n c = conn.cursor()\n if len(id_reviews) > 0:\n for id_review in id_reviews:\n c.execute(\"SELECT ID_Review, Review.ID_File, File_Index, Review \"\n \"FROM Review WHERE ID_Review = \" + str(id_review) + \" ORDER BY File_Index\")\n result = c.fetchone()\n if result is not None:\n reviews.append(Review(result[0], result[1], result[2], result[3]))\n else:\n c.execute(\"SELECT ID_Review, Review.ID_File, File_Index, Review FROM Review\")\n results = c.fetchall()\n for result in results:\n reviews.append(Review(result[0], result[1], result[2], result[3]))\n\n conn.close()\n\n if load_polarities:\n # Load Polarities\n import loacore.load.polarity_load as polarity_load\n polarity_load.load_polarities_in_reviews(reviews)\n\n if load_sentences:\n # Load Sentences\n import loacore.load.sentence_load as sentence_load\n sentence_load.load_sentences_in_reviews(reviews, load_words=load_words, load_deptrees=load_deptrees)\n\n return reviews", "def get_reviews(place_id):\n return get_all_cls(parent_cls, place_id, \"reviews\")", "def review_by_id(review_id):\n obj = storage.get(\"Review\", review_id)\n if obj is None:\n abort(404)\n return jsonify(obj.to_dict())", "def get_reviews(item_id, shop_id, review_num=10) -> list:\n get_url = f\"{_shopee_base_url}/api/v2/item/get_ratings?filter=0&flag=1&itemid={item_id}&limit={review_num}&offset=0&shopid={shop_id}\"\n r = requests.get(get_url, headers=_user_agent_header, proxies=proxy_dict)\n ratings = r.json()['data']['ratings']\n reviews = []\n for rating in ratings:\n reviews.append({\n 'origin': 'Shopee',\n 'author': rating['author_username'],\n 'rating': rating['rating_star'],\n 'review': rating['comment'], \n 'review_likes': rating['like_count'],\n 'summary': 'Summary is very nice. Amazing!'\n })\n return reviews", "def multiple(critic, parameters):\n\n review = jsonapi.v1.reviews.Reviews.deduce(critic, parameters)\n return api.log.rebase.fetchAll(critic, review=review)", "def get_req_ids(actual_pose, target, req_ids, person_ids):\n train_x = []\n train_y = []\n\n for i in req_ids:\n id_mask = (person_ids == i)\n train_x.append(actual_pose[id_mask])\n train_y.append(target[id_mask, 0])\n\n train_x = np.concatenate(train_x)\n train_y = np.concatenate(train_y)\n \n return train_x, train_y", "def get_reviewers(ctx, tickets):\n if ctx.options['--no-reviewer']:\n return []\n reviewers = ctx.options['--reviewer']\n if ctx.pagure and not reviewers:\n reviewers = set()\n for ticket in tickets:\n if ticket.reviewer:\n reviewers.add(ticket.reviewer)\n if len(reviewers) > 1:\n print('Reviewers found: %s' % ', '.join(reviewers))\n ctx.die('Too many reviewers found in ticket(s), '\n 'specify --reviewer explicitly')\n if not reviewers:\n ctx.die('No reviewer found in ticket(s), '\n 'specify --reviewer explicitly')\n username_map = ctx.config.get('trac-username-map', {})\n reviewers = [username_map.get(r, r) for r in reviewers]\n if not reviewers:\n ctx.die('No reviewer found, please specify --reviewer')\n return [normalize_reviewer(ctx, r) for r in reviewers]", "def check_review_files(input, data_review):\n if \"versions\" not in input:\n return None\n\n input_version_ids = []\n for v in input[\"versions\"]:\n _, version_id = from_global_id(v)\n input_version_ids.append(version_id)\n\n # Check all versions exist\n versions = (\n Version.objects.filter(pk__in=input_version_ids)\n .select_related(\"root_file__study\")\n .all()\n )\n if len(versions) != len(input[\"versions\"]):\n raise GraphQLError(\n \"Error in modifying data_review. All file versions in data \"\n \"review must exist.\"\n )\n\n # Check all versions come from same study\n studies = set(\n [v.root_file.study.pk for v in versions] + [data_review.study.pk]\n )\n if len(studies) > 1:\n raise GraphQLError(\n \"Error in modifying data_review. All file versions in data \"\n \"review must have files that belong to the same study.\"\n )\n\n # Check if data review file versions changed\n if data_review:\n if set(data_review.versions.values_list(\"pk\", flat=True)) == set(\n input_version_ids\n ):\n input_version_ids = None\n\n return input_version_ids", "def reviews(self, **kwargs):\n\n path = self._get_movie_id_path('reviews')\n resp = self._get_method(path, kwargs)\n return resp", "def test_get_specific_review_sucess(self):\n # Get the User's Auth Token.\n url = '/api-token-auth/'\n data = {'username': 'adam', 'password': '123'}\n response = Client().post(url, data)\n content = json.loads(response.content)\n user_token = content['token']\n\n # Prepare the header with the client's token.\n http_authorization = 'Token %s' % user_token\n client = Client(HTTP_AUTHORIZATION=http_authorization)\n\n # GET the Reviews.\n response = client.get('/reviews/1/')\n self.assertEqual(response.status_code, 200)\n\n # Check if only reviews related to the user were retrieved.\n content = json.loads(response.content)\n expected = {\n 'id': 1,\n 'rating': 5,\n 'title': 'Loved it!',\n 'summary': 'I loved it! Pretty good!',\n 'submission_date': '2020-10-12',\n 'ip_address': '127.0.0.1',\n 'reviewer': 1,\n 'company': 1\n }\n self.assertDictEqual(content, expected)", "def process_reviews(reviews: list):\n\n def process_review(review, i, n):\n print(f'\\rProcessing {i + 1} of {n} reviews', end='')\n return nltk.pos_tag(nltk.word_tokenize(review[0].strip())), review[1]\n\n n = len(reviews)\n processed = [process_review(review, i, n)\n for i, review in enumerate(reviews)]\n return processed", "def get_restaurant_reviews( self, restaurant_id, set_of_users ):\n\n\t\tcondition = ( ( self.df['business_id'] == restaurant_id ) & \n\t\t\t\t\t ( self.df['user_id'].isin(set_of_users) ) )\n\t\treviews = self.df[condition]\n\n\t\t# remove for duplicated user id \n\t\treviews[ reviews['user_id'].duplicated() == False ] \n\t\treturn reviews", "def get_diffstats(review_request, user, rev=None):\n # Ensure we're working with the base review request, not a draft.\n review_request = review_request.get_review_request()\n\n if rev is None:\n # If the user is the submitter we might want to use the draft diffset.\n draft = review_request.get_draft(user=user)\n diffset = ((draft and draft.diffset) or\n review_request.get_latest_diffset())\n else:\n diffset = (\n DiffSet.objects\n .filter(history__pk=review_request.diffset_history_id)\n .filter(revision=rev)).latest()\n\n return diffstats(diffset)", "def delete_review(review_id=None):\n if review_id:\n for item in storage.all(Review).values():\n if review_id == item.id:\n item.delete()\n storage.save()\n return (jsonify({}))\n abort(404)", "def _get_current_reviewers_and_counts(project_name):\n reviewer_change_count_per_project = current_load_fetcher.\\\n get_open_change_reviewers_per_project()\n\n if project_name not in reviewer_change_count_per_project and \\\n project_name != PROJECT_ALL:\n logging.warning(\"Project %s does not have any current reviewers\",\n project_name)\n return []\n\n if project_name == PROJECT_ALL:\n # go through all projects and combine open change counts for each\n # reviewer\n reviewers_changes_counts = \\\n _get_current_change_counts_across_projects(\n reviewer_change_count_per_project\n )\n else:\n reviewers_changes_counts = \\\n reviewer_change_count_per_project[project_name]\n\n return _create_reviewer_current_change_count_info(reviewers_changes_counts)", "def find_n_reviews(x, n, review_books_df):\n asin_1 = x['asin_1']\n asin_2 = x['asin_2']\n\n overall_reviews_1 = review_books_df.query('asin == @asin_1').sort_values(\n 'unixReviewTime').iloc[0:(n+1)].overall.tolist()\n overall_reviews_2 = review_books_df.query('asin == @asin_2').sort_values(\n 'unixReviewTime').iloc[0:(n+1)].overall.tolist()\n\n dic_1 = {'asin': asin_1}\n for i, val in enumerate(overall_reviews_1):\n dic_1[str(i)+\"-th-review\"] = val\n\n dic_2 = {'asin': asin_2}\n for i, val in enumerate(overall_reviews_2):\n dic_2[str(i)+\"-th-review\"] = val\n \n return [dic_1, dic_2]", "def _get_approved_reviewers(self, pull_request_number: int) -> List[str]:\n reviews = get_pull_request_reviews(\n self._repo_name, pull_request_number, self._auth)\n if not reviews:\n return []\n approved_reviewers = set()\n for review in reviews:\n if review['state'] == 'APPROVED':\n if review['user']:\n approved_reviewers.add(review['user']['login'])\n else:\n approved_reviewers.add(\"\")\n return list(approved_reviewers)", "def GetId(self, request=[], default=None):\n idd = False\n \n partName, partRev, _ = request\n# partName, partRev, updateDate = request\n# if updateDate:\n# if partRev:\n# criteria=[('engineering_code', '=', partName), ('engineering_revision', '=', partRev),\n# ('write_date', '>', updateDate)]\n# else:\n# criteria=[('engineering_code', '=', partName), ('write_date', '>', updateDate)]\n# else:\n# if partRev:\n# criteria=[('engineering_code', '=', partName), ('engineering_revision', '=', partRev)]\n# else:\n# criteria=[('engineering_code', '=', partName)]\n if isinstance(partRev, int):\n criteria=[('engineering_code', '=', partName), ('engineering_revision', '=', partRev)]\n else:\n criteria=[('engineering_code', '=', partName)]\n\n partIds = self.search(criteria, order='engineering_revision')\n if len(partIds) > 0:\n idd=partIds[len(partIds) - 1].id\n return idd", "def reviews(self):\n cts = storage.all(Review)\n ltcts = []\n for objects in cts.values():\n if self.id == objects.state_id:\n ltcts.append(objects)\n return ltcts", "def get_review(review_id):\n obj = storage.get(Review, review_id)\n if obj is None:\n abort(404)\n return jsonify(obj.to_dict())", "def dependents_key(self):\n return self.dependents_key_for(self.id)", "def run(self, __eatery_id):\n self.start = time.time()\n\t \n print __eatery_id\n instance = ClassifyReviews([eatery_id])\n instance.run()\n #return group(callback.clone([arg, __eatery_id]) for arg in __review_list)()", "def department_reviews(request, department_code):\n department = get_object_or_404(Department, code=department_code)\n\n topic_id_to_course = dict()\n recent_courses = list(\n Course.objects.filter(\n course_filters_pcr_allow_xlist,\n department=department,\n )\n .distinct()\n .values(\"semester\", \"topic_id\", course_title=F(\"title\"), course_code=F(\"full_code\"))\n )\n for c in recent_courses:\n c[\"exclude_from_recent\"] = True\n topic_id = c[\"topic_id\"]\n if (\n topic_id not in topic_id_to_course\n or topic_id_to_course[topic_id][\"semester\"] < c[\"semester\"]\n ):\n topic_id_to_course[topic_id] = c\n\n reviews = list(\n review_averages(\n Review.objects.filter(section__course__department=department),\n reviewbit_subfilters=Q(review_id=OuterRef(\"id\")),\n section_subfilters=Q(id=OuterRef(\"section_id\")),\n fields=ALL_FIELD_SLUGS,\n prefix=\"bit_\",\n extra_metrics=True,\n )\n .annotate(\n topic_id=F(\"section__course__topic_id\"),\n semester=F(\"section__course__semester\"),\n )\n .values()\n )\n for review in reviews:\n course = topic_id_to_course[review[\"topic_id\"]]\n review[\"course_code\"] = course[\"course_code\"]\n review[\"course_title\"] = course[\"course_title\"]\n\n all_courses = reviews + list(topic_id_to_course.values())\n courses = aggregate_reviews(all_courses, \"course_code\", code=\"course_code\", name=\"course_title\")\n\n return Response({\"code\": department.code, \"name\": department.name, \"courses\": courses})", "def review_ask(request):\n\n result = {}\n u = request.user\n p = Product.objects.get_by_sku(request.POST['sku'])\n\n if p is None:\n result[\"result\"] = '0'\n\n # change wish review request pending status\n for w in Wishlist.objects.filter(product=p, party=u):\n w.review = Wishlist.REVIEW_REQUESTED \n w.save()\n\n req, created = ReviewRequest.objects.get_or_create(requester=u, product=p)\n\n # could replace above line with these lines if it causes trouble\n #if not ReviewRequest.objects.filter(requester=u.party, product=p).exists():\n # r = ReviewRequest(requester=u.party, product=p)\n # r.save()\n\n # any previous reviews related to this request is linked\n for rev in Review.objects.filter(product=p):\n rev.reply_to.add(req)\n\n result['result'] = str(req.id)\n\n # add a feed\n f = Feed(actor=u, action=Feed.REQUESTED, product=p) \n f.save()\n\n # TODO: notify others that review requested\n\n return JSONHttpResponse(result)", "def find_data_breach_record_reviews(self, breach_id=None):\n return self._request('/api/data-breach-record/'+str(breach_id)+'/reviews')", "def process_gen_reviews(self, gen_reviews):\n clusters = []\n reviews = []\n for rev in gen_reviews:\n label_delimiter = rev.find('>')\n clusters.append(int(rev[5:label_delimiter]))\n if rev[-1]=='>':\n reviews.append(rev[label_delimiter+2:-6])\n else:\n reviews.append(rev[label_delimiter+2:])\n\n return clusters, reviews", "def test_create_with_new_draft(self):\n user1 = User.objects.create(username='reviewer1')\n user2 = User.objects.create(username='reviewer2')\n\n group1 = self.create_review_group(name='group1')\n group2 = self.create_review_group(name='group2')\n\n dep_review_request_1 = self.create_review_request(publish=True)\n dep_review_request_2 = self.create_review_request(publish=True)\n\n review_request = self.create_review_request(\n publish=True,\n bugs_closed='1,20,300',\n commit_id='abc123',\n description_rich_text=True,\n depends_on=[dep_review_request_1, dep_review_request_2],\n rich_text=True,\n target_groups=[group1, group2],\n target_people=[user1, user2],\n testing_done_rich_text=True,\n extra_data={\n 'key': {\n 'values': [1, 2, 3],\n },\n 'mybool': True,\n })\n\n active_file_attachment_1 = self.create_file_attachment(review_request)\n active_file_attachment_2 = self.create_file_attachment(review_request)\n inactive_file_attachment = self.create_file_attachment(review_request,\n active=False)\n\n active_screenshot_1 = self.create_screenshot(review_request)\n active_screenshot_2 = self.create_screenshot(review_request)\n inactive_screenshot = self.create_screenshot(review_request,\n active=False)\n\n # Create the draft.\n draft = ReviewRequestDraft.create(review_request)\n\n # Make sure all the fields are the same.\n self.assertEqual(draft.branch, review_request.branch)\n self.assertEqual(draft.bugs_closed, review_request.bugs_closed)\n self.assertEqual(draft.commit_id, review_request.commit_id)\n self.assertEqual(draft.description, review_request.description)\n self.assertEqual(draft.description_rich_text,\n review_request.description_rich_text)\n self.assertEqual(draft.extra_data, review_request.extra_data)\n self.assertEqual(draft.rich_text, review_request.rich_text)\n self.assertEqual(draft.summary, review_request.summary)\n self.assertEqual(draft.testing_done, review_request.testing_done)\n self.assertEqual(draft.testing_done_rich_text,\n review_request.testing_done_rich_text)\n\n self.assertEqual(list(draft.depends_on.order_by('pk')),\n [dep_review_request_1, dep_review_request_2])\n self.assertEqual(list(draft.target_groups.all()),\n [group1, group2])\n self.assertEqual(list(draft.target_people.all()),\n [user1, user2])\n self.assertEqual(list(draft.file_attachments.all()),\n [active_file_attachment_1, active_file_attachment_2])\n self.assertEqual(list(draft.inactive_file_attachments.all()),\n [inactive_file_attachment])\n self.assertEqual(list(draft.screenshots.all()),\n [active_screenshot_1, active_screenshot_2])\n self.assertEqual(list(draft.inactive_screenshots.all()),\n [inactive_screenshot])\n\n self.assertIsNotNone(draft.changedesc)", "def _get_reviewers(project_name, from_datetime):\n logging.debug(\n \"Getting reviewers for project: %s from datetime: %r\",\n project_name, from_datetime)\n if project_name == PROJECT_ALL:\n # reviewers with changes across all projects after from_datetime\n reviewers = Reviewer.objects.filter(\n changes__timestamp__gte=from_datetime).distinct()\n else:\n # reviewers with changes in given project after from_datetime\n reviewers = Reviewer.objects.filter(\n changes__project_name=project_name,\n changes__timestamp__gte=from_datetime).distinct()\n\n logging.debug(\"Found reviewers: %r\", reviewers)\n return reviewers", "def ids(self):\n\n if not hasattr(self, \"_ids\"):\n query = db.Query(\"pub_proc_cg c\", \"c.id\").unique().order(\"c.id\")\n query.join(\"query_term t\", \"t.doc_id = c.id\")\n query.join(\"query_term s\", \"s.doc_id = t.int_val\")\n query.where(\"t.path = '/Term/SemanticType/@cdr:ref'\")\n query.where(\"s.path = '/Term/PreferredName'\")\n query.where(\"s.value = 'Drug/agent'\")\n rows = query.execute(self.cdr_cursor).fetchall()\n self._ids = [row.id for row in rows]\n self.logger.info(\"found %d drug terms\", len(self._ids))\n return self._ids", "def get_recommendar(\n\treviews,\n\tuser_city,\n\trest_city,\n\trest_id_to_int,\n\tmodel=\"baseline\",\n\tk=TOP_K,\n\tremoveSeen=True,\n\tinfer_loc_by_latest_rating_only=True,\n\tlatest_rating_limiter=3\n\t):\n\treturn LocalRecommendar(\n\t\treviews=reviews,\n\t\tuser_city=user_city,\n\t\trest_city=rest_city,\n\t\trest_id_to_int=rest_id_to_int,\n\t\tmodel=model,\n\t\tk=k,\n\t\tremoveSeen=removeSeen,\n\t\tinfer_loc_by_latest_rating_only=infer_loc_by_latest_rating_only,\n\t\tlatest_rating_limiter=latest_rating_limiter\n\t\t)", "def _is_pull_request_reviewer_approved(querier, org, repo, pr_id):\n query = \"\"\"\n query($org:String!, $repo:String!, $pullRequestID:Int!){\n repository(owner: $org, name: $repo){\n protectedBranches(first: 100){\n nodes{\n name\n requiredApprovingReviewCount\n }\n }\n pullRequest(number: $pullRequestID){\n baseRefName\n reviews(last: 100){\n nodes{\n state\n author{\n login\n }\n }\n }\n }\n }\n }\n \"\"\"\n variables = {\n \"pullRequestID\": pr_id,\n \"org\": org,\n \"repo\": repo,\n }\n\n data = querier(query, variables)\n\n base_ref_name = jmespath.search(\n \"data.repository.pullRequest.baseRefName\",\n data\n )\n\n required_approval_count = jmespath.search(\n \"\"\"\n data\n .repository\n .protectedBranches\n .nodes[?name == '{branch}']\n |[0]\n .requiredApprovingReviewCount\n \"\"\".format(branch=base_ref_name),\n data\n )\n\n review_history = jmespath.search(\n \"\"\"\n data\n .repository\n .pullRequest\n .reviews\n .nodes[*]\n .{approver: author.login, state: state}\n \"\"\",\n data\n )\n reviews = {r[\"approver\"]: r[\"state\"] for r in review_history}\n approval_count = reviews.values().count(\"APPROVED\")\n\n if required_approval_count and approval_count < required_approval_count:\n is_approved = False\n else:\n is_approved = True\n\n return is_approved", "def get_project_ids(self, *criterion):\n from wkcdd.models.helpers import get_project_ids\n return get_project_ids([self.id], *criterion)", "def review_by_id(review_id):\n review = storage.get(\"Review\", review_id)\n if review is None:\n abort(404)\n return jsonify(review.to_json())", "def get_data_requests(self, request):\n\n not_reviewed_yet = Task.objects.filter(\n Q(approver_email=request.user.email),\n Q(state=Task.SUCCESS, review_output=True)\n | Q(state=Task.ERROR, review_output=True),\n ).order_by(\"-registered_on\")\n\n reviewed = Task.objects.filter(\n Q(author_email=request.user.email),\n Q(state=Task.RUNNING)\n | Q(state=Task.OUTPUT_RELEASED)\n | Q(state=Task.RELEASE_REJECTED)\n | Q(state=Task.SUCCESS, review_output=False)\n | Q(state=Task.ERROR, review_output=False),\n ).order_by(\"-registered_on\")\n\n return Response(\n {\n \"not_reviewed_yet\": TaskSerializer(not_reviewed_yet, many=True).data,\n \"reviewed\": TaskSerializer(reviewed, many=True).data,\n }\n )", "def get_playbook_run_ids(request1_body=None, **kwargs):\n ############################ Custom Code Goes Below This Line #################################\n import json\n import phantom.rules as phantom\n \n outputs = {}\n \n for element in request1_body:\n phantom.debug(element)\n \n # Write your custom code here...\n \n # Return a JSON-serializable object\n assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable\n return outputs", "def GetReviewers(host, change):\n path = '%s/reviewers' % _GetChangePath(change)\n return FetchUrlJson(host, path)", "def getDependenceIdAt(self, pos):\n return self.sentence[pos].getDependenceId()", "def scraper(storyid, reviews_num, rate_limit=3):\n\n # There may be up to 15 reviews on a single page, therefore the number of\n # pages the reviews are stored on is equal to the following:\n number_of_pages = (reviews_num // 15) + 1\n\n # Returns a list of tuples (based on the contents of _reviews_in_table)\n list_of_review_tuples = []\n\n for p in range(number_of_pages):\n\n soup = soupify('https://www.fanfiction.net/r/' + storyid +\n '/0/' + str(p+1) + '/',\n rate_limit=rate_limit)\n\n for review in _reviews_in_table(soup):\n list_of_review_tuples.append(review)\n\n return list_of_review_tuples", "def get_review_comments_for_pr(owner, repo, pr_number, session=None):\n url = f'{GITHUB_API_URL}/repos/{owner}/{repo}/pulls/{pr_number}/comments'\n return get_one_item_at_a_time(url, session=session)", "def ballot_get_decisions_ids_by_contest(contest_id):\r\n return make_request({\"method\": \"ballot_get_decisions_by_contest\",\r\n \"params\": [contest_id],\r\n \"jsonrpc\": \"2.0\",\r\n \"id\": 0, })", "def review_list_handler():\n def fetch_params():\n release_group = Parser.uuid('uri', 'release_group', optional=True)\n user_id = Parser.uuid('uri', 'user_id', optional=True)\n sort = Parser.string('uri', 'sort', valid_values=['rating', 'created'], optional=True) or 'rating'\n limit = Parser.int('uri', 'limit', min=1, max=50, optional=True) or 50\n offset = Parser.int('uri', 'offset', optional=True) or 0\n include = Parser.list('uri', 'inc', Review.allowed_includes, optional=True) or []\n language = Parser.string('uri', 'language', min=2, max=3, optional=True)\n if language and language not in supported_languages:\n raise InvalidRequest(desc='Unsupported language')\n return release_group, user_id, sort, limit, offset, include, language\n release_group, user_id, sort, limit, offset, include, language = fetch_params()\n reviews, count = Review.list(release_group, user_id, sort, limit, offset, language)\n return jsonify(limit=limit, offset=offset, count=count,\n reviews=[p.to_dict(include) for p in reviews])", "def reviews(self):\n list_reviews = []\n all_reviews = models.storage.all(Review)\n for review_item in all_reviews.items():\n if review_item.place_id == self.id:\n list_review.append(review_item)\n\n return list_review", "def get_reviewers(mentions, pull_request_body, issue_comments, review_comments):\n reviewers_mapping = {}\n\n # Reviewers are supposed to be those mentions who have given thumbs up.\n for comment in issue_comments:\n if comment.user.login in mentions and bool(thumbs_up.search(comment.body)):\n reviewers_mapping[comment.user.login] = dict(\n tagged_at=None,\n responded_at=None,\n gave_thumbs_up_at=comment.created_at\n )\n\n # We have reviewers, now, look for the time for their first response in issue comments.\n for comment in issue_comments:\n if comment.user.login in reviewers_mapping.keys() \\\n and reviewers_mapping[comment.user.login]['responded_at'] is None:\n reviewers_mapping[comment.user.login]['responded_at'] = comment.created_at\n\n # TODO: Scrap the time when the PR's body was updated.\n # Story behind: Mostly, people just create the PR and update their body with description and reviewers\n # once the PR is ready for the review. Unfortunately, github v3 api dont have end point for knowing the\n # time when the PR's body was updated. So, most likely, we will not get precise 'tagged_at' attribute\n # for the reviewers that are in PR's body.\n\n # Look for time when each of reviewer was tagged, this should be\n # searched in pull request body and issue comments.\n for login in reviewers_mapping.keys():\n if login in comment.body and reviewers_mapping[login]['tagged_at'] is None:\n if reviewers_mapping[login]['responded_at'] is None \\\n or reviewers_mapping[login]['responded_at'] > comment.created_at:\n reviewers_mapping[login]['tagged_at'] = comment.created_at\n\n # Check for reviewers' first response in review comments too, and\n # update the reviewers_mapping if necessary.\n for comment in review_comments:\n if comment.user.login in reviewers_mapping.keys():\n if reviewers_mapping[comment.user.login]['responded_at'] is None:\n reviewers_mapping[comment.user.login]['responded_at'] = comment.created_at\n elif reviewers_mapping[comment.user.login]['responded_at'] > comment.created_at:\n reviewers_mapping[comment.user.login]['responded_at'] = comment.created_at\n\n return reviewers_mapping", "def test_get_ids(civic, main_data, updated_data):\n assert len(civic._get_ids(main_data['assertions'])) == 0\n assert len(civic._get_ids(main_data['variants'])) == 1\n assert len(civic._get_ids(main_data['genes'])) == 2\n assert len(civic._get_ids(main_data['evidence'])) == 1\n\n assert len(civic._get_ids(updated_data['assertions'])) == 1\n assert len(civic._get_ids(updated_data['variants'])) == 1\n assert len(civic._get_ids(updated_data['genes'])) == 1\n assert len(civic._get_ids(updated_data['evidence'])) == 1", "def get_review(review_id=None):\n\n review = storage.get(Review, review_id)\n if not review:\n abort(404)\n return jsonify(review.to_dict())", "def get_review(review_id):\n review_obj = storage.get(Review, review_id)\n if review_obj:\n return jsonify(review_obj.to_dict())\n else:\n abort(404)", "def test_request_review(self):\n other_pk = self.nodes[1].overlay.my_peer.public_key.key_to_bin()\n self.nodes[0].overlay.request_review(1, other_pk)\n yield self.deliver_messages()\n\n pending_review_requests = self.nodes[1].overlay.trustchain.persistence.get_pending_review_requests(other_pk)\n self.assertTrue(pending_review_requests)\n\n self.nodes[1].overlay.respond_to_review_request(pending_review_requests[0].hash, True)\n yield self.deliver_messages()\n\n pending_review_requests = self.nodes[1].overlay.trustchain.persistence.get_pending_review_requests(other_pk)\n self.assertFalse(pending_review_requests)\n self.assertEqual(self.nodes[0].overlay.trustchain.persistence.get_number_of_known_blocks(), 2)", "def _get_review_comments_body(\n self, pull_request_number: int) -> List[Tuple[str, str]]:\n review_comments = get_pull_request_review_comments(\n self._repo_name, pull_request_number, self._auth)\n if not review_comments:\n return []\n review_comments_msg = []\n for comment in review_comments:\n review_comments_msg.append((comment['path'], comment['body']))\n return review_comments_msg", "def get_training_ids(mydb):\n\treturn list(mydb.fetch_table(table_name='training_set',field_names=['CaseId']).CaseId)", "def get_repo_review_comments(owner, repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{owner}/{repo}/pulls/comments'\n return get_one_item_at_a_time(url, session=session)", "def ballot_get_decisions_ids_by_voter(voter_id):\r\n return make_request({\"method\": \"ballot_get_decisions_by_voter\",\r\n \"params\": [voter_id],\r\n \"jsonrpc\": \"2.0\",\r\n \"id\": 0, })", "def review(args):\n try:\n pr = gh.get_pr(owner, repo, args.pull_request)\n except requests.exceptions.HTTPError:\n print('Couldn\\'t find pull request #%s in %s/%s.' %\n (args.pull_request, owner, repo))\n print('Make sure the number is correct and that you have read '\n 'permissions for this GitHub repository.')\n sys.exit(1)\n\n clone_url = pr['head']['repo']['clone_url']\n fork_branch = pr['head']['ref']\n fork_owner = pr['head']['repo']['owner']['login']\n\n repo_lib.fetch_fork(clone_url, fork_branch, fork_owner)\n repo_lib.checkout(fork_branch, fork_owner)\n sys.exit(0)", "def get_pred_ids(predictions):\n le_classes = ['Emotet', 'Mirai', 'Zeus'] \n malwares_dict = {'Emotet': 1, 'Mirai': 2, 'Zeus': 3}\n predicted_ids = []\n \n for idx in predictions:\n pred_name = le_classes[idx]\n pred_id = malwares_dict[pred_name]\n predicted_ids.append(pred_id)\n \n return predicted_ids", "def getRelevantPRData():\n prInfoFromAPI = getPRsFromAPI()\n diffHeader = headers.copy()\n diffHeader['Accept'] = \"application/vnd.github.v3.diff\"\n textForReviewPRs = []\n\n for PR in prInfoFromAPI:\n labels = [label[\"name\"] for label in PR['labels']]\n if \"Text for Review\" in labels:\n diffResponse = requests.get(PR[\"url\"], headers=diffHeader)\n diff = diffResponse.text\n # Add the info the list\n textForReviewPRs.append({\n \"pull_request_link\": PR[\"html_url\"],\n \"diff\": diff\n })\n if int(diffResponse.headers[\"X-RateLimit-Remaining\"]) <= 2:\n print('GitHub api rate limit will be exceeded; the GITHUB_TOKEN env variable needs to be set.')\n break\n return textForReviewPRs", "def get_outputs(self, input_reviews):\r\n split_list = [1] * self.item_pad_num\r\n splitted_review_wordId_intputs = tf.split(input_reviews, split_list, 1)\r\n cnn_outputs = []\r\n for i in range(self.item_pad_num):\r\n input_review = tf.squeeze(splitted_review_wordId_intputs[i], [1])\r\n cnn_output = self.get_single_output(input_review=input_review, index=i)\r\n cnn_outputs.append(cnn_output)\r\n\r\n return cnn_outputs", "def get_possible_ids(self):\n ids = []\n\n dest_data = requests.get(\"https://api.wdpro.disney.go.com/facility-service/destinations/{}\".format(self.__anc_dest_id), headers=getHeaders()).json()\n data = requests.get(dest_data['links']['entertainmentVenues']['href'], headers=getHeaders()).json()\n\n for entry in data['entries']:\n try:\n ids.append(entry['links']['self']['href'].split('/')[-1].split('?')[0])\n except:\n pass\n\n return ids", "def get_dependent_objective_id_terms(self):\n return # osid.search.terms.IdTerm", "def find_issue_id(self):", "def AllocateIds(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def offers_reviews_collection(request, task_id):\n try:\n task = Task.objects.get(id=task_id)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n offers = Offer.objects.filter(task=task).all()\n taskers = offers.values_list(\"tasker\", flat=True)\n reviews = Review.objects.filter(reviewee__id__in=taskers, task__assignee__isnull=False).all().order_by(\"-timestamp\")\n serializer = ReviewSerializer(reviews, many=True)\n return Response(serializer.data)", "def classify_review(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _get_playground_id(self):\n\n def playground_filter(page: int = 0):\n return {\"filter\": {\"type\": [9], \"page\": page}}\n\n answer = self.client.search_investigations(filter=playground_filter())\n if answer.total == 0:\n raise RuntimeError(\"No playgrounds were detected in the environment.\")\n elif answer.total == 1:\n result = answer.data[0].id\n else:\n # if found more than one playground, try to filter to results against the current user\n user_data, response, _ = self.client.generic_request(\n path=\"/user\",\n method=\"GET\",\n content_type=\"application/json\",\n response_type=object,\n )\n if response != 200:\n raise RuntimeError(\"Cannot find username\")\n username = user_data.get(\"username\")\n\n def filter_by_creating_user_id(playground):\n return playground.creating_user_id == username\n\n playgrounds = list(filter(filter_by_creating_user_id, answer.data))\n\n for i in range(int((answer.total - 1) / len(answer.data))):\n playgrounds.extend(\n filter(\n filter_by_creating_user_id,\n self.client.search_investigations(\n filter=playground_filter(i + 1)\n ).data,\n )\n )\n\n if len(playgrounds) != 1:\n raise RuntimeError(\n f\"There is more than one playground to the user. \"\n f\"Number of playgrounds is: {len(playgrounds)}\"\n )\n result = playgrounds[0].id\n\n logger.debug(f\"Playground ID: {result}\")\n\n return result", "def cron_partner_ids(self):\n pass", "def review_entity_handler(review_id):\n review = Review.query.get_or_404(str(review_id))\n if review.is_archived is True:\n raise NotFound\n include = Parser.list('uri', 'inc', Review.allowed_includes, optional=True) or []\n return jsonify(review=review.to_dict(include))", "def getREVIssues(db):\n return map(trimmedREVDoc,\n db.reviews.find({\"done\": False, \"lgtms\": {\"$exists\": False}}))", "def test_get_hidden_on_review_request(self) -> None:\n self.assertTrue(self.action.get_visible(\n context=self._create_request_context(\n url_name='review-request-detail')))", "def open_request_review_modal(obj, selenium):\n (_get_ui_service(selenium, obj).open_info_page_of_obj(obj).\n open_submit_for_review_popup())\n modal = request_review.RequestReviewModal(selenium)\n modal.wait_until_present()\n return modal", "def _get_information(self):\n reviews = self._tab.find_all(\"div\", class_=\"review\", attrs={'itemprop': 'review'})\n return [(self._get_review(elem), self._get_published_date(elem)) for elem in reviews]" ]
[ "0.57654417", "0.5477173", "0.5378847", "0.5101541", "0.5080031", "0.4982286", "0.49632528", "0.49589026", "0.49061635", "0.48880798", "0.488128", "0.4835317", "0.48325068", "0.4829637", "0.48200688", "0.48102397", "0.47959515", "0.4773191", "0.4731234", "0.4722772", "0.4721991", "0.4713366", "0.46949184", "0.46884874", "0.46866146", "0.46725786", "0.46725452", "0.46543872", "0.46502155", "0.4646397", "0.46461037", "0.46435386", "0.46341643", "0.46335217", "0.46236423", "0.46070418", "0.46060032", "0.45929664", "0.45798194", "0.45728022", "0.45700163", "0.4569141", "0.45608562", "0.45533565", "0.45463666", "0.45317265", "0.4530752", "0.45104018", "0.45101324", "0.45082387", "0.44920763", "0.44887868", "0.4481015", "0.44685504", "0.4463508", "0.4462432", "0.44607398", "0.4441797", "0.44417593", "0.44281328", "0.4424175", "0.44237086", "0.44235528", "0.4421035", "0.4409393", "0.43953493", "0.43803963", "0.4375565", "0.43721598", "0.4366847", "0.43590173", "0.43555844", "0.43555325", "0.4352946", "0.43527824", "0.43464768", "0.4342137", "0.4321572", "0.43209848", "0.43180037", "0.4312672", "0.43121743", "0.43095982", "0.4305824", "0.43053895", "0.4302593", "0.43010953", "0.42956054", "0.4294625", "0.42868307", "0.4284321", "0.42836323", "0.42808568", "0.4280707", "0.42761636", "0.4276151", "0.42601857", "0.42575967", "0.42550382", "0.4253029" ]
0.6459412
0
Initalize with a usersupplied list of segments.
def __init__(self, segments, lemma = None, case = None): self.segments = segments if isinstance(self.segments, str): self.segments = [Segment.new_segment(s) for s in self.segments] self.lemma = lemma self.case = case
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])", "def form_segment(self, node_oid):\n # init empty segment and stuff\n new_segment = Segment()\n new_segment.save()\n name = \"%s_seg_%s\" % (self.PREFIX, new_segment.id)\n node = Node.objects(id=node_oid)[0]\n list_id = DripCampaign.objects(id=node[\"drip_campaign_id\"])[0][\"list_id\"]\n node.update(set__segment_oid=new_segment.id, set__updated_at=datetime.utcnow())\n\n # gather all users that apply for this node after triggers on previous nodes\n all_euids = set()\n if node[\"initial\"]:\n all_euids = set(List.objects(list_id=list_id)[0][\"members_euid\"])\n else:\n for trg in Trigger.objects(node_to=node_oid):\n for euids, to_node_oid in self.segment_by_triggers(trg[\"node_from\"]):\n if to_node_oid == node_oid:\n all_euids.update(set(euids))\n\n # # intersect euids with current state of the list\n # # it might be the case that some people are removed from the list since previous email\n self.fetch_members_for_list(list_id)\n all_euids = all_euids & set(List.objects(list_id=list_id)[0][\"members_euid\"])\n\n all_euids = list(all_euids)\n\n # apply the user list to segment n stuff\n # if user list is empty, save only meta info and don't actually work with mailchimp\n if all_euids:\n segment_id = self.mw.create_segment(list_id, name)\n self.mw.update_segment_members(list_id, segment_id, all_euids)\n else:\n segment_id = None\n new_segment.update(set__segment_id=segment_id, set__name=name, members_euid=all_euids,\n set__updated_at=datetime.utcnow())", "def __load_segments(self):\r\n self.__segments = []\r\n if len(self.points) > 1:\r\n s = self.points[0]\r\n k = 1\r\n while k < len(self.points):\r\n e = self.points[k]\r\n self.__segments.append(Segment(s, e))\r\n s = e \r\n k += 1\r\n e = self.points[0]\r\n self.__segments.append(Segment(s, e))", "def setSegments(self, segments):\n for point, segment in zip(self.points, segments):\n point.set(segment.p1)", "def __init__(self, sets: List[ColdStartUserSet]):\n self.sets = sets", "def __init__(self, segments, display_res=\"1920x1080\", stream_id=None):\n self.segments = segments\n self.display_res = display_res\n self.stream_id = stream_id\n self.o22 = []\n self.mode = None", "def create_network_segments(self, tenant_id, network_id,\n network_name, segments):", "def _setup_splits(self):\n #ntot = self.reredux_conf['nperfile']\n ntot = self.reredux_conf['Ngals']\n npersplit = self.runconf['nper']\n\n self.beglist, self.endlist = get_splits(ntot, npersplit)", "def __init__(__self__, *,\n segment_name: Optional[pulumi.Input[str]] = None):\n if segment_name is not None:\n pulumi.set(__self__, \"segment_name\", segment_name)", "def _trainBySegments(self, divisions, trainingSet):\n # train the subdomain ROMs\n counter, remainder = divisions\n roms = self._trainSubdomainROMs(self._templateROM, counter, trainingSet, self._romGlobalAdjustments)\n # if there were leftover domain segments that didn't go with the rest, train those now\n if remainder:\n unclusteredROMs = self._trainSubdomainROMs(self._templateROM, remainder, trainingSet, self._romGlobalAdjustments)\n roms = np.hstack([roms, unclusteredROMs])\n self._roms = roms", "def __init__(self, segments='CCCVV', root_length=3):\n # residue should be at least 1 segment long\n assert(root_length < len(segments))\n\n self.segments = segments\n self.root_length = root_length\n self.residue_length = len(segments) - root_length", "def __init__(self):\n super().__init__()\n self._points = 0\n self._segments = []\n self.fill_list()\n # i = random.randint(0, len(self._segments) - 1)\n # self.set_text(self._segments[i])\n self.reset()", "def initialize_vasp_runs(self):\n\n\t\treference_polarization_path = self.get_extended_path('reference_polarization')\n\t\tdistorted_polarization_path = self.get_extended_path('distorted_polarization')\n\n\t\t#if not Path.exists(reference_polarization_path):\n\t\tself.create_new_vasp_run(reference_polarization_path, self.reference_structure)\n\n\t\t# if not Path.exists(distorted_polarization_path):\n\t\tself.create_new_vasp_run(distorted_polarization_path, self.distorted_structure)", "def add_segments(self, *segments):\n for s in segments:\n self._add_one(s)", "def prepareParrallelize(self,segs):\n\n angles = numpy.array([s.angle for s in segs ])\n angles[numpy.where(angles<0)] += _pi # we care about direction, not angle orientation\n clList = clusterValues(angles, 0.15, refScaleAbs='abs')\n\n for cl in clList:\n meanA = angles[list(cl)].mean()\n for i in cl:\n seg = segs[i]\n seg.newAngle = meanA if seg.angle>=0. else meanA-_pi", "def __init__(self, path_list):\n self.path_list = path_list", "def __init__(\n self,\n segments: Tuple[\"BaseSegment\", ...],\n # These are tuples of segments but we're expecting them to\n # be tuples of length 1. This is because we'll almost always\n # be doing tuple arithmetic with the results and constructing\n # 1-tuples on the fly is very easy to misread.\n start_bracket: Tuple[BaseSegment],\n end_bracket: Tuple[BaseSegment],\n pos_marker: Optional[PositionMarker] = None,\n uuid: Optional[UUID] = None,\n ):\n if not start_bracket or not end_bracket: # pragma: no cover\n raise ValueError(\n \"Attempted to construct Bracketed segment without specifying brackets.\"\n )\n self.start_bracket = start_bracket\n self.end_bracket = end_bracket\n super().__init__(segments=segments, pos_marker=pos_marker, uuid=uuid)", "def __init__(self, focalPoint, focalDist, angles, segments):\n self.focalPoint = focalPoint\n self.focalDist = focalDist\n self.angles = angles\n self.segments = segments", "def set_calculated_segments(self, total_lights, segments):\n self.set_segments(segments)\n self.set_lights_per_segment(int(total_lights / segments))", "def __init__(self, tag: str, *elements: Union[str, List[str]]):\n if type(tag) != str:\n raise TypeError(\"'tag' argument must be a str\")\n if tag == \"\":\n raise ValueError(\"The tag of a segment must not be empty.\")\n self.tag = tag\n\n # The data elements for this segment.\n # this is converted to a list (due to the fact that python creates a tuple\n # when passing a variable arguments list to a method)\n self.elements = list(elements)", "def __init__(self):\n self.s_sect = []", "def __init__(self, name, ssn, address, courses=None):\n super().__init__(name, ssn, address)\n if courses is None:\n courses = []\n if courses == isinstance(courses, list):\n self.courses = courses\n else:\n self.courses = list(courses)", "def initialize(self):\n self.path = []\n self.sectorClean = False", "def __init__(self,numSegments,startX,startY):\n \n self.numSegments = numSegments\n\n # Construct the body\n self.body = []\n\n for i in range(numSegments):\n self.body.append([startX-i, startY])", "def set_market_segments(self, segments):\r\n \"\"\"\r\n Q1-2. Implement this method, which takes an iterable of MarketSegments\r\n to which this Account will be attached. This method REPLACES all\r\n MarketSegment associations, so be sure to update each\r\n MarketSegment's internal representation of associated Accounts\r\n appropriately.\r\n \"\"\"\r\n for existing_segment in self._market_segments:\r\n # only need to remove the ones that aren't in the new list\r\n if existing_segment not in segments:\r\n existing_segment.remove_account(self)\r\n for segment in segments:\r\n # add segments, catch ValueErrors which means the segment was\r\n # already part of this account, therefor no followup action is\r\n # needed\r\n try:\r\n self._market_segments.append(segment)\r\n # add_ms_to_account needs to be False because we've already\r\n # added the segment to this account\r\n segment.add_account(self, add_ms_to_account=False)\r\n except ValueError:\r\n # this account was already associated to that segment,\r\n # continue on\r\n continue", "def __init__(self, word_string, feature_table):\n self.word_string = word_string\n self.feature_table = feature_table\n self.segments = [Segment(char, self.feature_table) for char in self.word_string]", "def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents", "def test_getting_segments(self):\n pass", "def populate_vertices(self, vertices_list):\n vertices = []\n for vertex in vertices_list:\n vertex_id = vertex[0]\n vertices.append(Vertex(vertex_id))\n self.vertices = vertices", "def create_snake(self):\n for position in SNAKE_STARTING_POSITIONS:\n self.add_segment(position)", "def __init__(self, N, S, students, leaders):\n self.N = N\n self.S = S\n self.G = int(math.ceil(N/S))\n self.partitions = []\n self.students = students\n self.leaders = leaders", "def getsegs (bounds, split):\n segmentslist=bisect_rectange(split, bounds[0], bounds[1], bounds[2], bounds[3])\n count=1\n segpass=0\n \n #Get list of segment ids currently in database\n query=\"\"\"select seg_id from segment;\"\"\"\n df = pd.read_sql_query(query,con=engine)\n segids=set(df.seg_id)\n \n while count < len(segmentslist):\n try:\n for i in segmentslist:\n segments=getsegmentinfo(i)\n \n \n for seg in segments:\n #If running function several times for different splits, this ignores existing segments and prints a message\n if seg.id in segids: \n segpass+=1\n if (segpass % 10 == 0): \n print (\"{} segments already exist\".format(segpass))\n #Else this is a new segment, so get details from the strava and geocodio apis and save them to a dataframe and eventually to the database\n else:\n location = geocodio_client.reverse((seg.start_latlng[0], seg.start_latlng[1]))\n zipcode=location['results'][0]['address_components']['zip']\n \n newrow = {'seg_id' : seg.id,\n 'resource_state': seg.resource_state,\n 'climb_category':seg.climb_category,\n 'climb_category_desc':seg.climb_category_desc,\n 'average_grade':seg.avg_grade,\n 'elev_difference': str(seg.elev_difference).split()[0],\n 'distance': str(seg.distance).split()[0],\n 'name' : seg.name,\n 'start_lat' : seg.start_latlng[0],\n 'start_long' : seg.start_latlng[1],\n 'end_lat' : seg.end_latlng[0],\n 'end_long' : seg.end_latlng[1],\n 'points' : seg.points,\n 'starred':seg.starred,\n 'zipcode':zipcode\n }\n df=pd.DataFrame(newrow, index=[0])\n \n try:\n #Save dataframe to database\n df.to_sql('segment', engine,index=False,if_exists='append')\n except:\n pass\n\n #Prints message which keeps track of number of sub bounds completed \n if (count % 10) == 0:\n print (\"Getting segments in bound {} of {}\".format(count, len(segmentslist)))\n count+=1\n except Exception as inst:\n print (inst) \n return None", "def __init__(self, *segments, **params):\n self.url = URL(*segments, **params)", "def submitlist(jb, ls):\n segstart, segend = calculatestartend(ls) # Get the segment id for the current segment\n seg = None\n opp = None\n with jb.lock: # Lock the segments dictionary\n segments = jb.segments\n if segstart in segments:\n seg, opp = segments.pop(segstart, None)\n elif segend in segments:\n seg, opp = segments.pop(segend, None)\n if seg:\n segments.pop(opp)\n else:\n segments[segstart] = (ls, segend)\n segments[segend] = (ls, segstart)\n if seg:\n reqq.put((\"merge\", (ls, seg)), )", "def new_segments_center_of_mass_set(self, segments_center_of_mass):\n if segments_center_of_mass.time.size != 1:\n raise IndexError(\"Segments center of mass should be from one frame only\")\n self.segments_center_of_mass = segments_center_of_mass\n\n # Remove previous actors from the scene\n for actor in self.segments_center_of_mass_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.segments_center_of_mass_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(segments_center_of_mass.channel.size):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.segments_center_of_mass_actors.append(vtkActor())\n self.segments_center_of_mass_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.segments_center_of_mass_actors[i])\n\n # Update marker position\n self.update_segments_center_of_mass(self.segments_center_of_mass)", "def __init__(__self__, *,\n domains: Optional[Sequence[str]] = None,\n verticals: Optional[Sequence[str]] = None):\n if domains is not None:\n pulumi.set(__self__, \"domains\", domains)\n if verticals is not None:\n pulumi.set(__self__, \"verticals\", verticals)", "def initialize(self, keys: List[str]):", "def create_from_bounds(self, lbs, ubs):\n self.base_vertices = (np.array([lbs])+np.array([ubs])).T/2\n self.base_vectors = np.diag((np.array(ubs)-np.array(lbs))/2)", "def _trainBySegments(self, divisions, trainingSet):\n # subdivide domain and train subdomain ROMs, as with the segmentation\n ## TODO can we increase the inheritance more here, or is this the minimum cutset?\n counter, remainder = divisions\n # store delimiters\n if len(remainder):\n self.raiseADebug('\"{}\" division(s) are being excluded from clustering consideration.'.format(len(remainder)))\n ## train ROMs for each segment\n roms = self._trainSubdomainROMs(self._templateROM, counter, trainingSet, self._romGlobalAdjustments)\n # collect ROM features (basic stats, etc)\n clusterFeatures = self._gatherClusterFeatures(roms, counter, trainingSet)\n # future: requested metrics\n ## TODO someday\n # store clustering info, unweighted\n self._clusterInfo['features'] = {'unscaled': copy.deepcopy(clusterFeatures)}\n # weight and scale data\n ## create hierarchy for cluster params\n features = sorted(clusterFeatures.keys())\n hierarchFeatures = defaultdict(list)\n for feature in features:\n _, metric, ident = feature.split('|', 2)\n # the same identifier might show up for multiple targets\n if ident not in hierarchFeatures[metric]:\n hierarchFeatures[metric].append(ident)\n ## weighting strategy, TODO make optional for the user\n weightingStrategy = 'uniform'\n clusterFeatures = self._weightAndScaleClusters(features, hierarchFeatures, clusterFeatures, weightingStrategy)\n self._clusterInfo['features']['scaled'] = copy.deepcopy(clusterFeatures)\n # perform clustering\n labels = self._classifyROMs(self._divisionClassifier, features, clusterFeatures)\n uniqueLabels = sorted(list(set(labels))) # note: keep these ordered! Many things hinge on this.\n self.raiseAMessage('Identified {} clusters while training clustered ROM \"{}\".'.format(len(uniqueLabels), self._romName))\n # if there were some segments that won't compare well (e.g. leftovers), handle those separately\n if len(remainder):\n unclusteredROMs = self._trainSubdomainROMs(self._templateROM, remainder, trainingSet, self._romGlobalAdjustments)\n else:\n unclusteredROMs = []\n # make cluster information dict\n self._clusterInfo['labels'] = labels\n ## clustered\n self._clusterInfo['map'] = dict((label, roms[labels == label]) for label in uniqueLabels)\n ## unclustered\n self._clusterInfo['map']['unclustered'] = unclusteredROMs\n # TODO what about the unclustered ones? We throw them out in truncated representation, of necessity.\n self._roms = list(self._clusterInfo['map'][label][0] for label in uniqueLabels)", "def onSegmentButton(self):\n markupsNode = slicer.mrmlScene.GetFirstNodeByName(\"MarkupsFiducial\")\n\n seedsFileName = self.fileNameSeedsLineEdit.text\n marginMask = int(self.marginMask.value)\n distance = int(self.distance.value)\n gamma = float(self.gammaSpinBox.value)\n regularizationDiameter = int(self.regularizationDiameter.value)\n minThreshold = int(self.minThresholdSlider.value)\n maxThreshold = int(self.maxThresholdSlider.value)\n \n self.markupsList = []\n if markupsNode != None:\n for i in range(markupsNode.GetNumberOfFiducials()):\n point_ras = [0, 0, 0]\n markupsNode.GetNthFiducialPosition(i, point_ras)\n name = markupsNode.GetNthFiducialLabel(i)\n label = int(markupsNode.GetNthControlPointDescription(i))\n self.markupsList.append([name, point_ras, label])\n \n if len(self.markupsList) == 0:\n fileName = self.seedsPath + self.fileNameSeedsLineEdit.text\n self.markupsList = self.loadMarkupsFromSeedFile(fileName)\n \n if len(self.markupsList) == 0:\n print(\"There is no fiducial markups !\")\n return\n \n self.logic.setGlobalPath(self.globalPath)\n self.logic.setSeedsFileName(seedsFileName)\n self.logic.setRemoveLastSegmentation(self.removeLastSegmentationCheckBox.isChecked())\n self.logic.setShowBackGround(self.showBackGroundCheckBox.isChecked())\n result = self.logic.run(self.inputSelector.currentNode(), self.labelColorsList, self.markupsList,\n marginMask, distance, gamma, regularizationDiameter, [minThreshold, maxThreshold])\n\n if result: # Run succeed\n # Set the segmentation file UI name with this seeds file name and the used paramaters\n segmentationFileName = getSegmentationFileName(seedsFileName, distance, gamma, marginMask, regularizationDiameter)\n self.saveSegmentationName.text = segmentationFileName \n self.outputVolume = result", "def fillData(self):\n self.users = c.getUserNames()\n self.userlist.SetItems(self.users)", "def __init__(self, name, ssn, address, courses_grades=None):\n super().__init__(name, ssn, address)\n if courses_grades is None:\n courses_grades = []\n if courses_grades == isinstance(courses_grades, list):\n self.courses_grades = courses_grades\n else:\n self.courses_grades = list(courses_grades)", "def __init__(self):\n self._create_options()\n self._create_sections()", "def __init__ (self, points):\n\n self.points = tuple (points)\n # maximum number of enitities in this mount\n self.amount = len (self.points)\n # list of all entities in this mount\n # indices correspond with self.points\n self.mounts = [None] * self.amount", "def set_segments_to_value(arr, segments, value=0):\n for segment in segments:\n arr[segment[0]:segment[1]] = value", "def __init__(self, name, accounts=None):\r\n self.name = name\r\n if accounts:\r\n self._accounts = accounts\r\n for account in accounts:\r\n # add_account_to_ms is False because we've already added the\r\n # account to this segment, don't want to do it again\r\n account.add_to_market_segment(self, add_account_to_ms=False)\r\n else:\r\n self._accounts = []\r\n check_for_existing_market_segment(self)", "def __init__(self):\n segment_number = 2\n list_digits = 4\n super().__init__(4, segment_number, list_digits, default_val=\"0 \")\n self.set_credit(self.get_credit())", "def __init__(self, name, hip, foot, *segments):\n super().__init__(name=name, joint=hip, appendage=foot, segments=segments)", "def test_creating_a_new_segment(self):\n pass", "def configure_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n segment_name = kwargs[\"objectname\"]\n # Quick search to see if the segment exists of not.\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n # If the segment exists, capture the path for the API call, and the existing configuration in JSON.\n if len(segment['results']) > 0:\n json_init=segment['results'][0]\n segment_path = segment['results'][0]['path']\n else:\n print(\"The segment does not exist. Please create a segment using 'new-segment'.\")\n sys.exit(1)\n # Establish a list of keys to keep - these represent the values we are willing/able to update.\n keep_list = ['display_name', 'connectivity_path', 'advanced_config', 'resource_type', 'subnets']\n # Construct a new JSON using just the keys we want to keep\n json_data = dict([(key, val) for key, val in \n json_init.items() if key in keep_list])\n # Update the json_data with the configuration specified by the user.\n if kwargs['connectivity'] is not None:\n json_data[\"advanced_config\"][\"connectivity\"] = f'{kwargs[\"connectivity\"]}'\n if kwargs['tier1_id'] is not None:\n if segment_path == \"/infra/tier-1s/cgw\":\n print(\"This is a fixed segment - you may not alter the connectivity path. Please create a 'flexible' segment.\")\n else:\n json_data[\"connectivity_path\"] = f'/infra/tier-1s/{kwargs[\"tier1_id\"]}'\n#\n # make the call to the API\n status = configure_segment_json(proxy, sessiontoken, segment_path, json_data)\n # present results.\n if status == 200:\n print(f'The following network has been modified: {segment_name}')\n vars = {\"proxy\":proxy, \"sessiontoken\":sessiontoken, \"object_type\":\"Segment\", \"object_id\":segment_name}\n search_nsx(**vars)\n else:\n print(\"The segment was not modified. Please check your syntax and try again.\")\n sys.exit(1)", "def __init__(self, subsection_arch, subsection_id, server_range,\n serviced_passengers):\n self.server_list = self.init_server_list(subsection_arch,\n subsection_id,\n server_range,\n serviced_passengers)\n self.has_space_in_a_server_queue = True\n self.queue_size = 0\n self.min_queue = self.server_list[0]\n self.online_server_count = 0", "def __init__(self, *points, width=1, color=colors.WHITE, conversion=True):\n if len(points) > 0: # Extracting the points arguments under the same list format\n if type(points[0]) == list:\n points = points[0]\n if len(points) == 1: points = points[0]\n if len(points) != 2: raise Exception(\"A segment must have 2 points.\")\n self.points = list(points)\n self.width = width\n self.color = color\n self.conversion = conversion", "def __init__(self, *args):\n this = _ida_segment.new_lock_segment(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(\n self, user_message: str, yaml_path: str, segment: Optional[str] = None\n ) -> None:\n super().__init__(\n user_message=user_message, yaml_path=yaml_path, segment=segment)", "def __init__(self, params):\r\n _params = {'Application': 'None',\r\n 'Algorithm':\r\n 'first: \"chooses first seq listed, corresponding to cluster seed for uclust\"',\r\n 'ChoiceF': first,\r\n 'ChoiceFRequiresSeqs': False\r\n }\r\n _params.update(params)\r\n RepSetPicker.__init__(self, _params)", "def __init__(self, params):\r\n _params = {'Application': 'None',\r\n 'Algorithm':\r\n 'first: \"chooses first seq listed, corresponding to cluster seed for uclust\"',\r\n 'ChoiceF': first,\r\n 'ChoiceFRequiresSeqs': False\r\n }\r\n _params.update(params)\r\n RepSetPicker.__init__(self, _params)", "def __init__(self, scn_line_list):\n self.scn_line_list = scn_line_list", "def __init__(self):\n segment_number = 2\n list_digits = 3\n super().__init__(6, segment_number, list_digits, \"000000\")", "def init_data(partitions_file):\n mapping = []\n\n drive_size = None\n for line in partitions_file:\n if drive_size is None:\n drive_size = parse_drive_size(line.rstrip())\n else:\n partitions_list = parse_partitions(line.rstrip())\n mapping.append((drive_size, partitions_list))\n drive_size = None\n\n return mapping", "def __init__(self):\n segment_number = 2\n list_digits = 2\n super().__init__(6, segment_number, list_digits, \"000000\")", "def get_segments(self):\n\t\tos.chdir(self.segment_path)\n\t\tfor path in glob.glob(\"%s/*.seg\" % self.segment_path):\n\t\t\t_file = os.path.split(path)[1]\n\t\t\tdae = DiscreetArchiveElement(self,_file,element_type='segment')\n\t\t\tself.elements.append(dae)\n\t\treturn True", "def __init__(self, shared_list):\n self.shared_list = shared_list", "def construct_sub_segment(self, start_frag_id, stop_frag_id):\n fragiter = iter_fragments(iter(self.fragment_list), start_frag_id, stop_frag_id)\n segment = self.construct_segment()\n for frag in fragiter:\n segment.add_fragment(frag, True)\n return segment", "def new_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n if kwargs['objectname'] is None or kwargs['gateway'] is None:\n print(\"Please specify a name for the segment, and the gateway/network.\")\n sys.exit(1)\n if kwargs['segment_type'] == \"flexible\" and kwargs['tier1_id'] is None:\n print(\"Please specify either the segment type as 'fixed' (-st fixed) OR segment type as 'flexible' as well as the ID of the Tier1 for connectivity (-t1id TIER1ID). Use pyVMC -h for additional options.\")\n sys.exit(1)\n if kwargs['segment_type'] == \"fixed\" and kwargs['tier1_id'] is not None:\n print(\"Invalid configuration - 'fixed' segments may only be connected to the default CGW. To attach to a customer Tier1, please create a 'flexible' segment.\")\n sys.exit(1)\n rt_set = [None, \"ROUTED\", \"DISCONNECTED\"]\n if kwargs['segment_type'] == \"fixed\" and kwargs['routing_type'] not in rt_set:\n print(\"Invalid configuration. For a 'fixed' segment, the routing type must be left blank or set explicitly to 'ROUTED' or 'DISCONNECTED.'\")\n sys.exit(1)\n\n segment_name = kwargs[\"objectname\"]\n gateway = kwargs['gateway']\n\n # Search for segment to determine if it already exists\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n print(\"The segment already appears to exist.\")\n sys.exit(1)\n\n\n # Establish baseline json payload\n json_data = {\n \"display_name\":segment_name,\n \"id\":segment_name,\n \"advanced_config\":{\"connectivity\":\"ON\"},\n \"subnets\":[\n {\n \"gateway_address\": gateway\n }\n ]\n }\n #set segment type as either \"fixed\" or \"flexible\"\n segment_type = kwargs['segment_type']\n tier1_id = kwargs['tier1_id']\n\n if segment_type == \"fixed\":\n json_data[\"connectivity_path\"] = \"/infra/tier-1s/cgw\"\n if kwargs['routing_type'] == \"DISCONNECTED\":\n json_data[\"advanced_config\"][\"connectivity\"] = \"OFF\"\n else:\n json_data[\"advanced_config\"][\"connectivity\"] = \"ON\"\n elif segment_type == \"flexible\" and tier1_id is not None:\n json_data[\"connectivity_path\"] = f'/infra/tier-1s/{tier1_id}'\n else:\n print(\"Please specify either the segment type as 'fixed' (-st fixed) OR segment type as 'flexible' as well as the ID of the Tier1 for connectivity (-t1id TIER1ID). Use pyVMC -h for additional options.\")\n if kwargs['dhcp_range'] is not None:\n json_data[\"subnets\"][0][\"dhcp_ranges\"] = [f'{kwargs[\"dhcp_range\"]}']\n if kwargs['domain_name'] is not None:\n json_data[\"domain_name\"] = kwargs[\"domain_name\"]\n\n print(json.dumps(json_data, indent = 2))\n\n status = new_segment_json(proxy, sessiontoken, segment_name, segment_type, json_data)\n if status == 200:\n print(f'The following network has been created: {segment_name}')\n vars = {\"proxy\":proxy, \"sessiontoken\":sessiontoken, \"object_type\":\"Segment\", \"object_id\":segment_name}\n search_nsx(**vars)\n else:\n print(\"The segment was not created. Please check your syntax and try again.\")\n sys.exit(1)", "def __init__(self, coords, name=None):\n self.points = [\n Point(c[0], c[1], None if name is None else \"{}[{}]\".format(name, i))\n for i, c in enumerate(coords)\n ]\n self.line_segments = [\n LineSegment(p1, p2) for p1, p2 in zip(self.points[:-1], self.points[1:])\n ]\n self.line_segments.append(LineSegment(self.points[-1], self.points[0]))", "def fill_list(self):\n for i in range(0, constants.STARTING_WORDS):\n random_word = constants.LIBRARY[random.randint(0, len(constants.LIBRARY) - 1)]\n x = random.randint(1, constants.MAX_X - len(self.get_text()))\n y = random.randint(1, constants.MAX_Y - len(self.get_text()))\n position = Point(x, y)\n self.set_position(position)\n velocity = Point(0, 1)\n self._add_segment(random_word, position, velocity)\n print()", "def prepare_training_data(collection_of_positive_segments, collection_of_negative_segments):\n\n\n\tprint \"Preparing training data...\"\n\n\ttraining_vectors = []\n\ttraining_labels = []\n\n\n\tfor segment in collection_of_positive_segments.list_of_segments:\n\t\tvector = get_segment_feature_vector(segment)\n\n\t\ttraining_labels.append(\"Correct\")\n\t\ttraining_vectors.append(vector)\n\t\t\n\t\t\n\tfor segment in collection_of_negative_segments.list_of_segments:\n\t\tvector = get_segment_feature_vector(segment)\n\t\ttraining_labels.append(\"Incorrect\")\n\t\ttraining_vectors.append(vector)\n\t\t\n\t\n\ttraining_vectors = normalize_train_data(training_vectors)\n\t\n\t\t\n\t#data = VectorDataSet(training_vectors,L=training_labels)", "def assign_actual(segments_path, training_path):\n pass", "def parse_segments(self):\n segs = self.unixtext.split(\"$$\")\n for seg in segs:\n self.segments.append(TextProductSegment(seg, self))", "def _set_out_segments(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"outgoing_interface\",out_segments.out_segments, yang_name=\"out-segments\", rest_name=\"out-segments\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='outgoing-interface', extensions={u'tailf-common': {u'callpoint': u'mpls-out-segment', u'cli-suppress-show-path': None}}), is_container='list', yang_name=\"out-segments\", rest_name=\"out-segments\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-out-segment', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"out_segments must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"outgoing_interface\",out_segments.out_segments, yang_name=\"out-segments\", rest_name=\"out-segments\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='outgoing-interface', extensions={u'tailf-common': {u'callpoint': u'mpls-out-segment', u'cli-suppress-show-path': None}}), is_container='list', yang_name=\"out-segments\", rest_name=\"out-segments\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-out-segment', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)\"\"\",\n })\n\n self.__out_segments = t\n if hasattr(self, '_set'):\n self._set()", "def init(self):\n self.mrs = []\n if self.args.block_ctrl is not None:\n self.block_ctrl_list = self.args.block_ctrl.split(\",\")\n # Verify User input as per 0,1,2 format.\n if \"\" in self.block_ctrl_list:\n raise SALError(\"Invalid Input:[%s] to block controller \"\n \"arguments given to scripts!!!\"\n % (self.block_ctrl_list))\n else:\n self.block_ctrl_list = \"\"\n # Create MR instance for controller index using --ctrl input.\n self.mrs.append(create_mradapter(ctrl_index=self.args.ctrl))\n self.ctrl_cnt = self.mrs[0].cli.controller_count()\n for index in range(0, self.ctrl_cnt):\n # Check for Block controller list.\n if str(index) not in self.block_ctrl_list:\n # Check for --Ctrl index given as arg to script.\n if index != self.args.ctrl:\n self.log.info(\"Creating MR instance for Controller-%d\"\n % (index))\n self.mrs.append(create_mradapter(ctrl_index=index))\n else:\n self.log.info(\"*****TC will not execute on Blocked \"\n \"Controller-%d*****\" % (index))\n for mr in self.mrs:\n if not mr.is_mr():\n raise SALError(\"This script is applicable only for MR \"\n \"controller cards\")", "def network_initialize(self, payments):\n for pt in payments:\n if pt.id1 not in self.users.keys():\n self.users[pt.id1] = set()\n if pt.id2 not in self.users.keys():\n self.users[pt.id2] = set()\n self.users[pt.id1].add(pt.id2)\n self.users[pt.id2].add(pt.id1)", "def create_hltsv_segment(config_db, default_host, hltsv_host, sfos, hlt_segments):\n config_rules = config_db.getObject(\"ConfigurationRuleBundle\",\n \"DefaultConfigurationRuleBundle\")\n efio_config = config_db.getObject(\"EFIOConfiguration\", \"EFIO-Configuration-1\")\n hltsv_segment = dal.Segment(\"HLT\")\n defrc_controller = config_db.getObject(\"RunControlTemplateApplication\",\n \"DefRC\")\n hltsv_segment.IsControlledBy = defrc_controller\n hltsv_app = create_hltsv_app(config_db, hltsv_host)\n\n sfo_apps = []\n for index, sfo_host in zip(range(1, len(sfos)+1), sfos):\n sfo_application = create_sfo_application(config_db, str(index),\n sfo_host)\n sfo_apps.append(sfo_application)\n \n hltsv_resources = [hltsv_app] + sfo_apps\n hltsv_segment.Resources = hltsv_resources\n\n top_aggregator_app = create_aggregator_app(config_db, \"top_aggregator.py\",\n default_host)\n hltsv_segment.Applications = [top_aggregator_app]\n\n hltsv_segment.Hosts = [default_host]\n\n #infrastructure applications\n is_server = config_db.getObject(\"InfrastructureTemplateApplication\",\n \"DF_IS\")\n oh_server = config_db.getObject(\"InfrastructureTemplateApplication\",\n \"DF_Histogramming\")\n hltsv_segment.Infrastructure = [is_server, oh_server]\n \n #Resources\n mon_is = config_db.getObject(\"MIGApplication\", \"TopMIG-IS\")\n mon_oh = config_db.getObject(\"MIGApplication\", \"TopMIG-OH\")\n hltsv_segment.Resources.append(mon_is)\n hltsv_segment.Resources.append(mon_oh)\n\n hltsv_segment.Segments = hlt_segments\n \n return hltsv_segment", "def initialize():\n\n with settings(prompts={'Password: ': 'test', 'Password (again): ': 'test'}):\n for user, group in USER_GROUPS:\n sudo(\"useradd %s -G %s,minv -g minv -N || true\" % (user, group))\n sudo(\"chmod g+rwx /home/%s\" % user)\n sudo('minv_ createuser %s -g %s' % (user, group), user=\"minv\")\n\n # upload script to create collections\n put(\n join(env.testdata_path, \"scripts/initial_collections.sh\"),\n \"\", mode=0755\n )\n sudo(\"cp initial_collections.sh /home/minv-app-administrator/\")\n\n # upload collection configs\n for conf in glob(join(env.testdata_path, \"configurations/*.conf\")):\n put(conf, \"\", mode=0444, use_sudo=True)\n sudo(\"cp %s /home/minv-app-administrator/\" % basename(conf))\n\n with cd(\"/home/minv-app-administrator/\"):\n sudo(\"chmod a+rx . *\")\n sudo(\n \"sh -l ./initial_collections.sh\",\n user=\"minv-app-administrator\"\n )", "def __init__(self, users=()):\n self.users = {str(x.id): x for x in users}", "def _cluster_segments_all_way(self, segmented_instances, labels, \\\n end_points, stats, cluster_thresh=0.5):\n\n #self.showme(segmented_instances, 'main img')\n segment_association_list = []\n max_num_end_points= 0\n\n # for each stem segment\n for i in range(0, len(labels)):\n # each end point in the current segment i\n if max_num_end_points < len(end_points[i]):\n max_num_end_points = len(end_points[i])\n for k in range(0, len(end_points[i])):\n angle_list=[]\n # find the segment that is most likely connected to segment i at end point[i][k]\n for j in range(0, len(labels)):\n # make sure we are not trying to connect the segment to itself\n if i!= j:\n # angle calculates the angle between the line stats['centroid'][i]-end_points[i][k]\n # and stats['centroid'][i]-stats['centroid'][j]\n\n angle = self._ang([stats['centroid'][i],end_points[i][k]], \\\n [stats['centroid'][i], stats['centroid'][j]] )\n # if the angle value is within the acceptable range of +/- angle_thresh\n if angle<=self.angle_thresh or angle>=360-self.angle_thresh:\n other_angle, other_seg_section, end_point_dist = self._get_best_fit(segmented_instances, \\\n len(labels), \\\n stats, end_points,\\\n i, j, k, pos_angle=angle<=self.angle_thresh)\n # if the best fit segment also has a small angle between its\n # end point-centroid line and centroid-centroid line,\n # add it to segments connected to segment i\n if other_angle!=None and other_angle<=self.angle_thresh:\n angle_list.append((j, other_seg_section, other_angle, end_point_dist, angle))\n #Sort the list of stem segments connected to i by end_point_dist\n angle_list = sorted(angle_list, key=lambda x:x[3])\n #Sorting by the Euclidian distance of the end_point_dist and the other_angle does not change end result\n #angle_list = sorted(angle_list, key=lambda x:(math.sqrt(x[3]**2.0+x[2]**2.0)))\n # the angle value reflects how far segment k is from the straight line\n # going through the centroids\n if len(angle_list)>0:\n # (i, j, k, l, angle between i and centroid line, angle between j and centroid line, distance between closest end points k in seg i and l in seg j)\n segment_association_list.append((i,angle_list[0][0],k, angle_list[0][1], angle_list[0][4], angle_list[0][2], angle_list[0][3]))\n\n\n # sort slope differences in an increasing order\n segment_association_list = sorted(segment_association_list,key=lambda x:(x[6]))\n\n # find best match by iteretively selecting the smallest difference\n # and adding it to the ith cluster\n cluster_list = []\n cluster = np.full(len(labels),None)\n colored_clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n #clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n # initialize cluster list to single clusters contianing only each individual segment\n for i in range(0, len(labels)):\n cluster[i]=i\n cluster_list.append([i])\n #self.showme(clusterImg, str(i))\n\n visited=np.full((len(labels),max_num_end_points), False)\n\n #cluster=np.frompyfunc(list,1,1)(cluster) # allows us to append to only the specified list end_points[i]\n new_cluster_num=0\n color_offset=len(labels)\n\n # for each pair of segments in our list of best fit segments\n for curr_tuple in segment_association_list:\n img = np.zeros(segmented_instances.shape)\n i = curr_tuple[0] # index of first segment\n j = curr_tuple[1] # index of second segment in the tuple\n i_section = curr_tuple[2] #end point number in segment i\n j_section = curr_tuple[3] #end point number in segment j\n angle = curr_tuple[4]\n other_angle = curr_tuple[5]\n end_point_dist = curr_tuple[6] #distance between the connecting end points of segments i and j\n img[segmented_instances== i]= 255\n img[segmented_instances== j]= 255\n if (visited[i][i_section]==False)and(visited[j][j_section]==False):\n #cv2.line(clusterImg,(end_points[i][i_section][0],end_points[i][i_section][1]),\\\n # (end_points[j][j_section][0], end_points[j][j_section][1]),150,2)\n #self.showme(clusterImg, str(i))\n visited[i][i_section]=True\n visited[j][j_section]=True\n cluster_num = cluster[i]\n if cluster[i]!=cluster[j]:\n other_cluster_num = cluster[j]\n cluster_list[cluster_num] = list(set(cluster_list[cluster_num]+\\\n copy.deepcopy(cluster_list[other_cluster_num])))\n # update cluster numbers for all segments moved into new cluster\n for seg in cluster_list[other_cluster_num]:\n cluster[seg]=cluster_num\n # update cluster numbers for clusters larger than cluster to be removed\n for idx in range(0, len(cluster)):\n if (cluster[idx]>other_cluster_num):\n cluster[idx]= cluster[idx]-1\n del cluster_list[other_cluster_num]\n\n\n #show clustered segments\n color = 0\n cluster_num = 0\n cluster_mask=[]\n\n for c in cluster_list:\n color = color+0.1\n cluster_mask.append(np.zeros(segmented_instances.shape).astype(np.uint8))\n\n for i in c:\n cluster_mask[cluster_num][(segmented_instances == labels[i])]=1\n colored_clusterImg[(segmented_instances == labels[i])]= int(color*255)\n \"\"\"if self.key in ['../data/images/image1672', '../data/images/image1289']:\n self.showme(colored_clusterImg)\"\"\"\n cluster_num +=1\n\n return cluster_mask, colored_clusterImg", "def build_subsets(self):\n self.all = nrn.SectionList()\n self.all.wholetree(sec=self.soma)", "def set_vendors(self, vendors_list):\n self.multiple_items_selection_from_kendo_dropdown(self.vendors_kendo_dropdown_locator, vendors_list)\n self.wait_for_ajax_spinner_load()", "def setInitialized( initialized, ants, subarray=DEFAULT ):\n antlist = helpers.makeList(ants)\n multiSubarray('antennaInitialized', subarray, initialized, antlist )", "def __init__(self, length=None, primary=None):\n if primary:\n self._primary_list = primary\n else:\n self._primary_list = [None] * length\n self._dot_bracket = None\n self._current_site = 0", "def __init__(__self__, *,\n vnet_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if vnet_subnet_ids is not None:\n pulumi.set(__self__, \"vnet_subnet_ids\", vnet_subnet_ids)", "def __init__(self, uccsdslice, slice_index, angle_deg):\n super()\n # IMPLEMENTATION NOTE: This probably doesn't need to be a deepcopy since\n # the state is passed to seperate processes.\n self.uccsdslice = deepcopy(uccsdslice)\n self.slice_index = slice_index\n self.angle_deg = angle_deg\n self.angle = np.deg2rad(angle_deg)\n self.uccsdslice.update_angles([self.angle] * len(self.uccsdslice.angles))\n self.file_name = \"s{}_a{}\".format(slice_index, angle_deg)\n self.pulse_time = UCCSD_LIH_SLICE_TIMES[slice_index]\n self.lr = UCCSD_LIH_HYPERPARAMETERS[slice_index][\"lr\"]\n self.decay = UCCSD_LIH_HYPERPARAMETERS[slice_index][\"decay\"]", "def make_segments(date_start=None, date_end=None, date_freq=None,\n n_series=3, n_segments=2, seg_sep=None,\n means=None, stds=None, trends=None,\n amplitudes=None, phases=None):\n assert isinstance(n_series, int)\n assert isinstance(n_segments, int) and n_segments >= 1\n # TODO\n pass", "def __init__(self, pair_list=None):\n\n self.plugleads = []\n if pair_list is not None:\n self.add_many(pair_list)", "def initDefaults(self):\n return _libsbml.LineSegment_initDefaults(self)", "def _prepPointsForSegments(points):\n while 1:\n point = points[-1]\n if point.segmentType:\n break\n else:\n point = points.pop()\n points.insert(0, point)\n continue\n break", "def __init__(self, substrates=None, products=None):\n self.substrates = substrates or []\n self.products = products or []", "def init_managers(endpoints_file: Optional[Text]) -> None:", "def __init__(self, station_file: str, ride_file: str) -> None:\n self.all_stations = create_stations(station_file)\n self.all_rides = create_rides(ride_file, self.all_stations)\n self.visualizer = Visualizer()\n self.active_rides = []", "def init_subsections(self, customs_arch):\n section_list = []\n\n # Identify number of unique subsections in the server arcitecture.\n num_subsections = len(customs_arch['subsection'].unique())\n\n # Retrieve number of total servers and start an ID counter.\n num_servers = sum(customs_arch['max'])\n server_id = 1\n\n # Initialize each Subsection Class with a loop.\n for i in range(num_subsections):\n\n # Get the label of the subsection.\n subsection_id = customs_arch['subsection'].unique()[i]\n\n # Subset the master architecture into an architecture just for\n # for the subsection.\n subsection_arch = customs_arch[customs_arch['subsection'] == subsection_id]\n\n # Get server ID range.\n server_range = (server_id, server_id + subsection_arch.iloc[0]['max'])\n server_id = server_id + subsection_arch.iloc[0]['max']\n\n # Get the processed passenger queue from the Class Data Members list.\n serviced_passengers_list = self.outputs\n\n # Init a subsection and append to the list.\n section_list.append(Subsection(subsection_id,\n subsection_arch,\n server_range,\n serviced_passengers_list))\n\n return section_list", "def region_setup(self, slices, ipa_regions):\n self.ipa_regions = ipa_regions\n self.slices = slices", "def init_server_list(self, subsection_arch, subsection_id, server_range,\n output_list):\n\n # Init a list of servers to return.\n rtn = []\n\n # Loop through all servers in the arch.\n for i in range(server_range[0], server_range[1]):\n\n # Pass the ID of the server and Init a server.\n rtn.append(ServiceAgent(str(i), subsection_id, output_list))\n\n # Return the list.\n return rtn", "def __init__(self, list: List[DiagramView], start_button: StartButtonView):\n super().__init__()\n\n self.__init_ui(list, start_button)", "def setUserIDRefs( self, text ):\n self.user_id_list= text.split()", "def __init__(self, contact_loader):\n self.contacts_by_group_list = contact_loader.contacts_by_group_list\n self.contact_list = None", "def update_nets_with_segments(pcb_data: List[Dict[str, Any]], nets: List[Net]):\n segments = get_all_dicts_by_key(pcb_data, 'segment')\n for segment in segments:\n start: Coords = get_dict_by_key(segment['segment'], 'start')['start']\n start[1] = str(-1*float(start[1]))\n end: Coords = get_dict_by_key(segment['segment'], 'end')['end']\n end[1] = str(-1 * float(end[1]))\n width: str = get_dict_by_key(segment['segment'], 'width')['width']\n layer_data: str = get_dict_by_key(segment['segment'], 'layer')['layer']\n layers: List[Layer] = convert_to_layers(layer_data)\n new_segment: Segment = Segment(start=start, end=end, width=width, layers=layers)\n net_id: str = get_dict_by_key(segment['segment'], 'net')['net']\n for net in nets:\n if float(net.net_id) == float(net_id):\n net.segments.append(new_segment)", "def setPUsers(self, users):\n model = self.tvPUsers.get_model()\n model.clear()\n for user in users:\n model.append((user,))\n\n self.on_entPUser_changed(self.entPUser)\n self.on_tvPUsers_cursor_changed(self.tvPUsers)", "def __init__(self):\n self.g_sect = []", "def segment(args):\n from jcvi.formats.base import SetFile\n\n p = OptionParser(segment.__doc__)\n p.add_option(\n \"--chain\",\n default=1,\n type=\"int\",\n help=\"Allow next N genes to be chained\",\n )\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n idsfile, bedfile = args\n bed = Bed(bedfile)\n order = bed.order\n ids = SetFile(idsfile)\n losses = Grouper()\n skip = opts.chain\n for i, a in enumerate(bed):\n a = a.accn\n for j in range(i + 1, i + 1 + skip):\n if j >= len(bed):\n break\n b = bed[j].accn\n if a in ids:\n losses.join(a, a)\n if a in ids and b in ids:\n losses.join(a, b)\n\n losses = list(losses)\n singletons = [x for x in losses if len(x) == 1]\n segments = [x for x in losses if len(x) > 1]\n ns, nm, nt = len(singletons), len(segments), len(losses)\n assert ns + nm == nt\n\n # Summary for all segments\n for x in sorted(singletons) + sorted(segments):\n print(\n \"\\t\".join(\n str(x)\n for x in (\"|\".join(sorted(x)), len(x), estimate_size(x, bed, order))\n )\n )\n\n # Find longest segment stretch\n if segments:\n mx, maxsegment = max([(len(x), x) for x in segments])\n print(\"Longest stretch: run of {0} genes\".format(mx), file=sys.stderr)\n print(\" {0}\".format(\"|\".join(sorted(maxsegment))), file=sys.stderr)\n seg_asize = sum(estimate_size(x, bed, order) for x in segments)\n seg_bsize = sum(\n estimate_size(x, bed, order, conservative=False) for x in segments\n )\n else:\n seg_asize = seg_bsize = 0\n\n sing_asize = sum(estimate_size(x, bed, order) for x in singletons)\n sing_bsize = sum(\n estimate_size(x, bed, order, conservative=False) for x in singletons\n )\n total_asize = sing_asize + seg_asize\n total_bsize = sing_bsize + seg_bsize\n print(\n \"Singleton ({0}): {1} - {2} bp\".format(ns, sing_asize, sing_bsize),\n file=sys.stderr,\n )\n print(\n \"Segment ({0}): {1} - {2} bp\".format(nm, seg_asize, seg_bsize), file=sys.stderr\n )\n print(\n \"Total ({0}): {1} - {2} bp\".format(nt, total_asize, total_bsize),\n file=sys.stderr,\n )\n print(\n \"Average ({0}): {1} bp\".format(nt, (total_asize + total_bsize) / 2),\n file=sys.stderr,\n )" ]
[ "0.6232747", "0.596336", "0.58711517", "0.5702215", "0.55154556", "0.5394213", "0.53759134", "0.53305984", "0.5308964", "0.529063", "0.5197425", "0.51839024", "0.5133967", "0.5053822", "0.50320536", "0.5010542", "0.50038457", "0.4999929", "0.49838173", "0.4962347", "0.49609384", "0.49214706", "0.48969644", "0.48933196", "0.48866424", "0.48850414", "0.48827687", "0.48644063", "0.4854759", "0.48456323", "0.48354244", "0.4816443", "0.48160887", "0.47955847", "0.47694954", "0.47653636", "0.4754155", "0.47535893", "0.47508985", "0.4740349", "0.47355124", "0.4712309", "0.47122893", "0.4693374", "0.4691195", "0.4674327", "0.46700785", "0.46695805", "0.4654817", "0.46472904", "0.46388838", "0.46344438", "0.463277", "0.462129", "0.4612358", "0.4597923", "0.4597923", "0.4596566", "0.4591489", "0.45808166", "0.45659292", "0.45634958", "0.4558433", "0.45572707", "0.4552204", "0.45516923", "0.4551272", "0.4541491", "0.45397142", "0.45340165", "0.45294172", "0.45291016", "0.45244905", "0.45177776", "0.45151123", "0.45150173", "0.45071426", "0.45061377", "0.45047173", "0.45044583", "0.4504141", "0.44990864", "0.4494506", "0.4488189", "0.4483834", "0.4480248", "0.44798985", "0.44797873", "0.44763216", "0.44566116", "0.44549328", "0.44518548", "0.44491574", "0.44479638", "0.44472665", "0.44457617", "0.44414386", "0.44399932", "0.44351396", "0.4433871" ]
0.6051361
1
Create a WordForm of the given CV shape with random segments.
def random_segs(cls, shape, lemma = None, case = None): # For each C or V segment in `shape`, initialize a random Segment of the # appropriate type. Initialize a new WordForm with all these Segments. return cls([Segment(seg_type = seg) for seg in shape], lemma, case)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_word(self):\r\n\r\n template = self.word_constructions.get()\r\n word = \"\"\r\n for c in template:\r\n if c == \"v\":\r\n letter = self.get_letter(100)\r\n else:\r\n letter = self.get_letter(0)\r\n word += letter\r\n\r\n while not any(letter in self.vowels for letter in word):\r\n length = len(word)\r\n if length == 1:\r\n index = 0\r\n elif length == 2:\r\n index = random.randrange(0, 2)\r\n else:\r\n a = len(word) / 2\r\n index = a + random.randrange(-a / 2, a / 2)\r\n word = word[:index] + self.get_letter(100) + word[index + 1:]\r\n\r\n if random.random() > self.capital_chance:\r\n word = word.capitalize()\r\n self.words.append(word)\r\n self.word_count += 1\r\n return word", "def generateByWord(model, voc, maxlen=20, diversity=0.5, numwords=42):\n\n text, sym_indices, indices_sym = voc\n syms = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n \n #generated += sentence\n generated += ' '.join(sentence)\n print('----- Generating with seed: \"' + ' '.join(sentence) + '\"')\n sys.stdout.write(generated)\n\n for i in range(numwords):\n x = np.zeros((1, maxlen, len(syms)))\n for t, sym in enumerate(sentence):\n x[0, t, sym_indices[sym]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_sym = indices_sym[next_index]\n generated += ' '+next_sym\n sentence.append(next_sym)\n tmpsentence = sentence[1:]\n sentence = tmpsentence\n sys.stdout.write(next_sym+' ')\n sys.stdout.flush()\n print()", "def create(seed, model, tokenizer, temp=0.5):\n\n dictionary = [\"\"] + list(tokenizer.index_word.values())\n start = np.array(tokenizer.texts_to_sequences(seed)).reshape(1, -1)\n if seed[0] == '<start>':\n output = [seed[-1]]\n else:\n output = seed[:]\n\n for _ in range(45):\n weights = reweight_distribution(model.predict(start), temperature=temp)\n word = np.random.choice(dictionary, size=1, p=weights[0, :])[0]\n if word == '<end>': \n if len(output) > 10:\n break\n else:\n continue\n output.append(word)\n start = np.append(start[0, 1:], tokenizer.texts_to_sequences([word])).reshape(1, -1)\n return \" \".join(output)", "def create_word(self):\n return self.random.choice(CONSONANTS) + self.random.choice(VOWELS)", "def build_vocabulary(image_paths, vocab_size):\n n_image = len(image_paths)\n\n # Since want to sample tens of thousands of SIFT descriptors from different images, we\n # calculate the number of SIFT descriptors we need to sample from each image.\n n_each = int(np.ceil(40000 / n_image)) # You can adjust 10000 if more is desired\n\n # Initialize an array of features, which will store the sampled descriptors\n features = np.zeros((n_image * n_each, 128))\n j=0\n for i, path in enumerate(image_paths):\n # Load SIFT features from path\n descriptors = np.loadtxt(path, delimiter=',',dtype=float)\n\n # TODO: Randomly sample n_each features from descriptors, and store them in features\n #use the randomizer in numpy library to make n_each random index\n idx= np.array(np.random.randint(0,len(descriptors),n_each))\n\n # choose randomly n_each number of discriptor to train K-mean classifier\n for k in idx:\n\n features[j] = descriptors[k,:]\n j = j+1\n # TODO: pefrom k-means clustering to cluster sampled SIFT features into vocab_size regions.\n # You can use KMeans from sci-kit learn.\n # Reference: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html\n\n #use K_mean classifier to make Bag of visual words represantation for SIFT features\n kmeans = KMeans(n_clusters=250).fit(features)\n #kmeans= clustering = AgglomerativeClustering().fit(features)\n\n\n return kmeans", "def create_new_doc(self, doc: Doc, min_prob: float = 0.25) -> Doc:\n\n # print(\"running on\", doc[:10])\n\n if not self.form_frequencies:\n raise RuntimeError(\n \"Cannot truecase without a dictionary of form frequencies\")\n\n tokens = []\n spaces = []\n doctext = doc.text\n for tok in doc:\n toktext = tok.text\n\n # We only change casing for words in Title or UPPER\n if tok.is_alpha and toktext[0].isupper():\n cond1 = tok.is_upper and len(toktext) > 2 # word in uppercase\n cond2 = toktext[0].isupper(\n ) and not tok.is_sent_start # titled word\n if cond1 or cond2:\n token_lc = toktext.lower()\n if token_lc in self.form_frequencies:\n frequencies = self.form_frequencies[token_lc]\n if frequencies.get(toktext, 0) < min_prob:\n alternative = sorted(\n frequencies.keys(), key=lambda x: frequencies[x])[-1]\n\n # We do not change from Title to to UPPER\n if not tok.is_title or not alternative.isupper():\n toktext = alternative\n\n tokens.append(toktext)\n\n # Spacy needs to know whether the token is followed by a space\n if tok.i < len(doc)-1:\n spaces.append(doctext[tok.idx+len(tok)].isspace())\n else:\n spaces.append(False)\n\n # Creates a new document with the tokenised words and space information\n doc2 = Doc(self.model.vocab, words=tokens, spaces=spaces) #type: ignore\n # print(\"finished with doc\", doc2[:10])\n return doc2", "def create_wordcloud(self, text):\n text = ' '.join(f\"{word}\" for word in text)\n mask = np.array(Image.open(os.path.join(CURRDIR, \"cloud.png\")))\n wc = WordCloud(background_color=\"white\",\n max_words=200,\n mask=mask)\n wc.generate(text)\n wc.to_file(PATH_TO_SAVE_IMG, \"wordle.png\")", "def word(length, upper=False):\n letters = \"abcdefghijklmnopqrstuvwxyz\"\n if upper:\n letters = letters.upper()\n\n def gen(shape):\n lengths = _ints(length, shape)\n field_length = lengths.max()\n dtype = \"U{}\".format(field_length)\n\n result = np.empty(shape, dtype=dtype)\n flat = result.ravel()\n for i, l in enumerate(lengths):\n flat[i] = \"\".join( random.choice(letters) for _ in range(l) )\n return result\n\n return gen", "def create_random_text(word_count=10):\n sample_text_lst = TEXT_BASE_RUS.replace('\\n', '').split(' ')\n generate_text_lst = []\n for i in range(word_count):\n generate_text_lst.append(random.choice(sample_text_lst))\n generate_text = ' '.join(generate_text_lst)\n return generate_text", "def generate(model, voc, maxlen=20, diversity=0.5, numchars=100):\n\n text, char_indices, indices_char = voc\n chars = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n #print(\"Insert text to start from [min 20 chars]:\")\n #sentence = str(raw_input())\n #sentence = sentence[:maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for i in range(numchars):\n x = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x[0, t, char_indices[char]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n sentence = sentence[1:] + next_char\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()", "def generate_sentence():\n markov_chain = makeMarkovDict(\"text.txt\")\n\n # Pick a random word to begin with.\n first_word = random.choice(markov_chain.keys()) # Illegall\n\n # print first_word\n # random_choice = random.randint(0, len(markov_chain.keys()))\n # index = 0\n # first_word = \"\"\n # for word in markov_chain:\n # print word\n # if index == random_choice:\n # first_word = word\n # break\n # index += 1\n\n # Based on that word, call function to chose the next word.\n # print markov_chain[first_word]\n # print word_selection(markov_chain[first_word])\n\n lenght_of_sentence = 10\n sentence = [first_word] # First word already in there\n for i in range(lenght_of_sentence):\n sentence.append(word_selection(markov_chain[sentence[i]]))\n # Sentence after loop: ['fish', 'red', 'fish', 'two', 'fish', 'red', 'fish', 'red', 'fish', 'two', 'fish']\n\n # Cap with letter and add period at the end.\n final_sentece = \" \".join(sentence) + \".\"\n return final_sentece.capitalize()", "def create_vocab(vocab_path='ORBvoc-synth.txt'):\n total_time = 10 # seconds\n num_frames = 20\n speed = 3.0\n vocab_builder = VocabularyBuilder()\n for seed in tqdm(range(100), total=100):\n image_builder = DemoImageBuilder(\n mode=ImageMode.MONOCULAR, seed=seed,\n length=total_time * speed\n )\n for idx in range(num_frames):\n time = total_time * idx / num_frames\n image = image_builder.create_frame(time)\n vocab_builder.add_image(image.pixels)\n vocab_builder.build_vocabulary(str(vocab_path))", "def word_cloud_generator(text: str, mask_image: Path, save_to_file=False) -> WordCloud:\n mask = imageio.imread(mask_image)\n word_cloud = WordCloud(colormap='prism', mask=mask, background_color='white')\n word_cloud = word_cloud.generate(text)\n if save_to_file:\n word_cloud.to_file('word_cloud.png')\n return word_cloud", "def generate_wordcloud(topic_description, use_mask='rectangle', store_to_file=False):\n\n # transform the topic description in frequencies\n topic_frequencies = get_word_frequencies(topic_description)\n\n if use_mask == 'oval':\n mask = numpy.array(Image.open(os.path.join(config.__resources_folder_path, \"oval.jpg\")))\n else:\n mask = numpy.array(Image.open(os.path.join(config.__resources_folder_path, \"rect.png\")))\n\n wc = WordCloud(background_color=\"white\", max_words=2000, mask=mask)\n # generate word cloud\n wc.generate_from_frequencies(topic_frequencies)\n\n if store_to_file:\n # store to file\n wc.to_file(os.path.join(config.__inputs_outputs_folder_path, \"wordcloud_{0}_{1}.png\".format(\n hash(str(topic_description)), use_mask)))\n\n # show\n plt.imshow(wc, interpolation='bilinear')\n plt.axis(\"off\")\n plt.show()", "def generate(self, count=15):\n\n sentence = []\n print(\"self.word_dict\", self.word_dict)\n for i in range(count):\n first_tuple = random.choice(list(self.word_dict.keys())) # first word for our sentence\n first_word = random.choice(first_tuple)\n sentence.append(first_word)\n second_word = self.word_dict[first_tuple]\n # print(\"second_word\", second_word)\n next_word = second_word.sample()\n # print(\"next_word\", next_word)\n # first_tuple = second_word\n sentence.append(next_word)\n # end_tuple =\n sentence = ' '.join(sentence)\n return sentence + \".\"\n # for i in range(len(self.token)):\n # val = list(self.word_dict.values())[i]\n # print(len(val))\n # # print(\"val\", val)\n # next_word = val.sample()\n # sentence.append(next_word)\n # sentence = ' '.join(sentence)\n # return sentence + \".\"", "def build_from_words(self, words):\n if isinstance(words, unicode):\n self.build(words)\n elif isinstance(words, list):\n flag = \"seg\"\n assert len(words) > 0\n\n word = words[0]\n if isinstance(word, unicode):\n flag = \"seg\"\n elif ((isinstance(word, list) or isinstance(word, tuple)) and\n len(word) == 2 and isinstance(word[0], unicode) and isinstance(word[1], unicode)):\n flag = \"pos\"\n elif ((isinstance(word, list) or isinstance(word, tuple)) and\n len(word) == 4 and isinstance(word[0], unicode) and isinstance(word[1], unicode)):\n flag = \"dp\"\n else:\n flag = \"unknown\"\n\n self._xml4nlp = Element('xml4nlp')\n self._note = SubElement(self._xml4nlp, 'note')\n self._doc = SubElement(self._xml4nlp, 'doc')\n\n para = SubElement(self._doc, 'para')\n sent = SubElement(para, 'sent')\n\n para.set(\"id\", \"0\")\n sent.set(\"id\", \"0\")\n\n self._clean_note()\n\n if flag == \"seg\":\n for i, word in enumerate(words):\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word\n }))\n sent.set('cont', (\"\".join(words)))\n self._set_word_on_note()\n elif flag == \"pos\":\n for i, word_pos in enumerate(words):\n word, pos = word_pos\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word,\n 'pos': pos\n }))\n sent.set('cont', (\"\".join([word[0] for word in words])))\n self._set_pos_on_note()\n elif flag == \"dp\":\n for i, rep in enumerate(words):\n word, pos, head, dep_rel = rep\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word,\n 'pos': pos,\n 'parent': str(int(head) - 1),\n 'relation': dep_rel\n }))\n sent.set('cont', (\"\".join([word[0] for word in words])))\n self._set_parser_on_note()\n\n self.dom = self._xml4nlp", "def generate_words(text='', train_path=None, case_sensitive=True, epochs=20, classifier=nlup.BinaryAveragedPerceptron, **kwargs):\n if train_path:\n generate_sentences.detector = Detector(slurp(train_path), epochs=epochs, nocase=not case_sensitive)\n # generate_sentences.detector = SentenceDetector(text=text, nocase=not case_sensitive, epochs=epochs, classifier=classifier)\n return iter(generate_sentences.detector.segments(text))", "def surface_labelled_data_preparation_pipeline(word_list: [str]):\n X = []\n\n for word in word_list:\n segments = word.split('-')\n segment_features = []\n for i in range(len(segments)):\n features = {}\n\n segment_length = len(segments[i])\n features['length'] = segment_length\n\n features['segment.lower()'] = segments[i].lower()\n features['pos_in_word'] = i\n\n if segment_length % 2 == 0:\n features['even'] = 1\n else:\n features['odd'] = 1\n\n features['begin'] = segments[i][0]\n features['end'] = segments[i][len(segments[i]) - 1]\n\n try:\n features['prev_segment'] = segments[i - 1]\n except IndexError:\n features['prev_segment'] = ''\n # continue\n\n try:\n features['next_segment'] = segments[i + 1]\n except IndexError:\n features['next_segment'] = ''\n\n if segments[0].isupper():\n features['start_upper'] = 1\n else:\n features['start_lower'] = 1\n\n if segments[0] in 'aeiou':\n features['first_vowel'] = 1\n else:\n features['first_const'] = 1\n\n segment_features.append(features)\n\n X.append(segment_features)\n\n return X", "def makeFeatureVec(words, model, num_features):\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n num_words = 0.\n index2word_set = set(model.wv.index2word)\n for word in words:\n if word in index2word_set:\n num_words += 1\n featureVec = np.add(featureVec,model[word]) \n featureVec = np.divide(featureVec,num_words)\n return featureVec", "def gen_words(self, doc):\n pattern = re.compile(u'[\\\\s\\\\d,.<>/?:;\\'\\\"[\\\\]{}()\\\\|~!@#$%^&*\\\\-_=+a-zA-Z,。《》、?:;“”‘’{}【】()…¥!—┄-]+')\n doc = re.sub(pattern, ' ', doc)\n suffix_indexes = index_of_sorted_suffix(doc, self.max_word_len)\n word_cands = {}\n # compute frequency and neighbors\n for suf in suffix_indexes:\n word = doc[suf[0]:suf[1]]\n if word not in word_cands:\n word_cands[word] = WordInfo(word)\n word_cands[word].update(doc[suf[0] - 1:suf[0]], doc[suf[1]:suf[1] + 1])\n # compute probability and entropy\n length = len(doc)\n for k in word_cands:\n word_cands[k].compute(length)\n word_cands[k].compute_pp(self.pos_prop)\n # compute aggregation of words whose length > 1\n values = sorted(word_cands.values(), key=lambda x: len(x.text))\n for v in values:\n if len(v.text) == 1:\n continue\n v.compute_cohesion(word_cands)\n\n return sorted(values, key=lambda v: v.freq, reverse=True)", "def make_text(markov_chains):\n\n random_num = generate_random_number(markov_chains.keys())\n\n random_text = []\n\n start_words = generate_start_words(random_num, markov_chains.keys())\n \n random_text.extend(start_words)\n\n\n for i in range(500):\n word_tuple = (random_text[-2],random_text[-1])\n next_word = add_next_word(word_tuple, markov_chains)\n random_text.append(next_word)\n\n return random_text", "def word2vec_model(sentences, size=100, min_count=5, window=5,\n negative=5, cbow=True, iterations=5, seed=0,\n workers=1):\n if cbow is True:\n sg = 0\n else:\n sg = 1\n model = Word2Vec(size=size, window=window,\n min_count=min_count, workers=workers,\n sg=sg, negative=negative, seed=seed)\n\n model.build_vocab(sentences)\n\n model.train(sentences, total_examples=model.corpus_count,\n epochs=iterations)\n return model", "def create_random_tags(count=100):\n all_words = words.words('en')\n selected_words = []\n picker = ColorPicker(reset=True)\n colors = picker._get_colors()\n while count > 0:\n word = random.choice(all_words)\n selected_words.insert(0, word)\n all_words.remove(word)\n count += -1\n del all_words\n for word in selected_words:\n color = colors.next()\n tag = Tag(slug=slugify(word), tag=word, color=color)\n tag.save()", "def generate_sample(sentences, vocab, window):\n for sentence in sentences:\n word_vocabs = [vocab[w] for w in sentence if w in vocab and\n vocab[w]['prob'] > np.random.rand()]\n\n for index, word in enumerate(word_vocabs):\n center = word['index']\n reduced_window = np.random.randint(1, window + 1)\n\n # words before the center word\n for context in word_vocabs[max(0, index - reduced_window):index]:\n target = context['index']\n yield center, target\n\n # words after the center word\n for context in word_vocabs[(index + 1):(index + 1 + reduced_window)]:\n target = context['index']\n yield center, target", "def generate_text_owc(model: Dict[str, Set[str]], n: int) -> str:\n # ACCUMULATOR: a list of the randomly-generated words so far\n words_so_far = []\n # We've provided this template as a starting point; you may modify it as necessary.\n words_so_far.append(generate_new_word(model))\n for x in range(0, n-1):\n key = words_so_far[x]\n new_word = generate_next_word(model,key)\n if new_word == \".\":\n words_so_far[x] = words_so_far[x]+'.'\n new_word= generate_new_word(model)\n elif new_word == {}:\n new_word = generate_new_word(model)\n words_so_far.append(new_word)\n\n return str.join(' ', words_so_far)", "def generate_words(num_words, word_len, grid, reject_func=is_overlapping):\n height, width = len(grid), len(grid[0])\n restrictions = position_restrictions(word_len, height, width)\n word_hashes = set()\n words_positions = []\n words = []\n while len(word_hashes) < num_words:\n cardinal = random.choice(list(restrictions.keys()))\n (min_h, max_h), (min_w, max_w) = restrictions[cardinal]\n x0, y0 = random.randint(min_h, max_h), random.randint(min_w, max_w)\n x, y = DIRECTIONS[cardinal]\n positions = [(x0 + x * i, y0 + y * i) for i in range(word_len)]\n if (word_hash := (cardinal, (x0, y0))) not in word_hashes \\\n and not reject_func(p_set := set(positions), words_positions):\n words.append(\"\".join(str(grid[x][y]) for x, y in positions))\n word_hashes.add(word_hash)\n words_positions.append(p_set)\n return words", "def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary", "def generate_words(self, count=100):\n\n with self.open_text_data() as f:\n result = self.read_words(f, count=count)\n return result", "def create_word(char_list):", "def fill_list(self):\n for i in range(0, constants.STARTING_WORDS):\n random_word = constants.LIBRARY[random.randint(0, len(constants.LIBRARY) - 1)]\n x = random.randint(1, constants.MAX_X - len(self.get_text()))\n y = random.randint(1, constants.MAX_Y - len(self.get_text()))\n position = Point(x, y)\n self.set_position(position)\n velocity = Point(0, 1)\n self._add_segment(random_word, position, velocity)\n print()", "def build_vocab(words, vocab_size, visual_fld=None):\n utils.safe_mkdir(visual_fld)\n file = open(os.path.join(visual_fld, 'vocab.tsv'), 'w',encoding='utf8')\n\n dictionary = dict()\n count = [('UNK', -1)]\n index = 0\n count.extend(Counter(words).most_common(vocab_size - 1))\n\n for word, _ in count:\n dictionary[word] = index\n index += 1\n file.write(word + '\\n')\n\n index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n file.close()\n return dictionary, index_dictionary", "def random_shape(gts, reference_shape, pca_model):\n\n def synthesize(lms):\n return detect.synthesize_detection(pca_model, menpo.shape.PointCloud(\n lms).bounding_box()).points.astype(np.float32)\n\n bb, = tf.py_func(synthesize, [gts], [tf.float32])\n shape = align_reference_shape(reference_shape, bb)\n shape.set_shape(reference_shape.get_shape())\n\n return shape", "def sample( self, n, words = 100, n_views = 3 ):\n\n # Initialise each view to be 0s\n docs = [ sc.zeros( (n, self.W) ) for v in xrange( n_views ) ]\n\n for i in xrange( n ):\n # Draw topic weights once for a document\n weights = dirichlet( self.alphas )\n\n # Draw the words/k for each view\n for v in xrange( n_views ):\n words_ = words/n_views\n # Get the topic counts\n cnts = multinomial( words_, weights )\n for (k, cnt) in zip( xrange( self.K ), cnts ):\n freq = multinomial( cnt, self.topics.T[k] )/float(words_)\n # Get the word frequencies for this document\n docs[v][i] += freq\n\n return docs", "def generate_sample(index_words, context_window_size):\n for index, center in enumerate(index_words):\n context = random.randint(1, context_window_size)\n # get a random target before the center word\n for target in index_words[max(0, index - context): index]:\n yield center, target\n # get a random target after the center wrod\n for target in index_words[index + 1: index + context + 1]:\n yield center, target", "def words_to_edges_v(words, word_threshold=DEFAULT_MIN_WORDS_VERTICAL):\n # Find words that share the same left, right, or centerpoints\n by_x0 = cluster_objects(words, \"x0\", 1)\n by_x1 = cluster_objects(words, \"x1\", 1)\n by_center = cluster_objects(words, lambda x: (x[\"x0\"] + x[\"x1\"]) / 2, 1)\n clusters = by_x0 + by_x1 + by_center\n\n # Find the points that align with the most words\n sorted_clusters = sorted(clusters, key=lambda x: -len(x))\n large_clusters = filter(lambda x: len(x) >= word_threshold, sorted_clusters)\n\n # For each of those points, find the bboxes fitting all matching words\n bboxes = list(map(objects_to_bbox, large_clusters))\n\n # Iterate through those bboxes, condensing overlapping bboxes\n condensed_bboxes = []\n for bbox in bboxes:\n overlap = False\n for c in condensed_bboxes:\n if get_bbox_overlap(bbox, c):\n overlap = True\n break\n if not overlap:\n condensed_bboxes.append(bbox)\n\n if len(condensed_bboxes) == 0:\n return []\n\n condensed_rects = map(bbox_to_rect, condensed_bboxes)\n sorted_rects = list(sorted(condensed_rects, key=itemgetter(\"x0\")))\n\n max_x1 = max(map(itemgetter(\"x1\"), sorted_rects))\n min_top = min(map(itemgetter(\"top\"), sorted_rects))\n max_bottom = max(map(itemgetter(\"bottom\"), sorted_rects))\n\n # Describe all the left-hand edges of each text cluster\n edges = [\n {\n \"x0\": b[\"x0\"],\n \"x1\": b[\"x0\"],\n \"top\": min_top,\n \"bottom\": max_bottom,\n \"height\": max_bottom - min_top,\n \"orientation\": \"v\",\n }\n for b in sorted_rects\n ] + [\n {\n \"x0\": max_x1,\n \"x1\": max_x1,\n \"top\": min_top,\n \"bottom\": max_bottom,\n \"height\": max_bottom - min_top,\n \"orientation\": \"v\",\n }\n ]\n\n return edges", "def createParagraph(self, nsentences=0, words=[]):\n ww = []\n for w in words:\n w_ = w.lower()\n if w_ in self.plain_words:\n ww.append(w_)\n words = ww\n if not nsentences:\n nsentences = n.random.randint(3,10)\n sentences = []\n ri = n.random.randint\n for i in range(nsentences):\n sentences.append(self.createSentence(ri(1,4),ri(1,4),ri(1,4),ri(1,4),prep=n.random.randint(0,2)))\n\n w = set()\n s1 = []\n s2 = []\n for s in sentences:\n have_word = 0\n for ww in words:\n if ww in s:\n w.add(ww)\n have_word = 1\n if have_word:\n s1.append(s)\n else:\n s2.append(s)\n w2 = set(words)\n w_ = w2.difference(w)\n while s2 and w_:\n ss = self.createSentence(prep=n.random.randint(0,2))\n w__ = w_.copy()\n for w in w__:\n if w in ss:\n s1.append(ss)\n s2.pop()\n w_.remove(w)\n paragraph = \". \".join(s1+s2)\n return paragraph", "def gen_fake_topic_word(topic_word_shape, fake_idxs):\n if topic_word_shape[0] != len(fake_idxs):\n raise ValueError(\"topic num isn't equal\")\n fake_topic_word = np.full(topic_word_shape, -1, dtype=np.int32)\n for tidx in range(topic_word_shape[0]):\n tmp = len(fake_idxs[0])\n for widx in fake_idxs[tidx]:\n fake_topic_word[tidx][widx] = tmp\n tmp -= 1\n return fake_topic_word", "def makeFeatureVec(words, model, num_features):\n\t# Initialize an empty numpy array (for speed) \n\tfeatureVec = np.zeros((num_features,), dtype=\"float32\")\n\t# Initialize a counter (number of words)\n\tnwords = 0.\n\t \n\t# Index2word is a list that contains the names of the words in the model's vocabulary. \n\tindex2word_set = set(model.index2word)\n\t# \n\t# Loop over each word in the review and, if it is in the model's vocaublary, add \n\t# its feature vector to the total \n\tfor word in words:\n\t\tif word in index2word_set:\n\t\t\tnwords = nwords + 1.\n\t\t\tfeatureVec = np.add(featureVec,model[word])\n\t# \n\t# Divide the result by the number of words to get the average \n\tfeatureVec = np.divide(featureVec,nwords)\n\treturn featureVec", "def build_doc_sense_vec(self):\n\t\twith codecs.open(self.vocab_file, encoding='utf-8', mode='r') as infile:\n\t\t\tline = infile.readline()\n\t\t\ti = 0\n\t\t\twhile line:\n\t\t\t\tword = line.split()[0]\n\t\t\t\tif not self.word2IdVocabulary.has_key(word):\n\t\t\t\t\t# print i, word\n\t\t\t\t\t# else:\n\t\t\t\t\tself.word2IdVocabulary[word] = i\n\t\t\t\tif not self.id2WordVocabulary.has_key(i):\n\t\t\t\t\tself.id2WordVocabulary[i] = word\n\t\t\t\tline = infile.readline()\n\t\t\t\ti += 1\n\t\t\tself.vocab_num = len(self.word2IdVocabulary)\n\t\t\tprint \"vocabulary number:\" + str(self.vocab_num)\n\n\t\twith codecs.open(self.vec_file, encoding='utf-8', mode='r') as vecfile:\n\t\t\twith codecs.open(self.vec_out_file, encoding='utf-8', mode='a+') as vec_outfile:\n\n\t\t\t\tfor i, line in enumerate(vecfile):\n\t\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\t\tprint i\n\t\t\t\t\t# if i > 72:\n\t\t\t\t\t# \tbreak\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ta, b, c = map(int, line.split()[:3])\n\t\t\t\t\t\tprint('Number of sememes: {}\\n'\n\t\t\t\t\t\t\t 'Number of words: {}\\n'\n\t\t\t\t\t\t\t 'Dimension of vectors: {}'.format(a, b, c))\n\t\t\t\t\telif i > 462667:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tvector_list.append(sline[1:])\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\t# vector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_array\n\t\t\t\t\t\t# vec_outfile.write(line)\n\t\t\t\t\telif i > 462887:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tsense_num = int(sline[1])\n\t\t\t\t\t\tvectors = sline[2:sense_num*c+2] # (sense_num*c+2)\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tfor start in range(0, len(vectors), c):\n\t\t\t\t\t\t\tvector_list.append(list(map(float, vectors[start: start+c])))\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\tvector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_mean\n\t\t\t\t\t\t'''j = 0\n\t\t\t\t\t\tfor each_sense_vec in vector_array:\n\t\t\t\t\t\t\tif len(vector_array) > 1:\n\t\t\t\t\t\t\t\tnew_line = word + '_' + str(j) + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tformatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n'\n\t\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnew_line = word + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t x: '%6f' % x})[1:-1] + '\\n'\n\n\t\t\t\t\t\t\tvec_outfile.write(new_line)'''\n\n\t\twith codecs.open(self.doc_file, encoding='utf-8', mode='r') as docfile:\n\t\t\twith codecs.open(self.doc_out_file, encoding='utf-8', mode='a+') as doc_outfile:\n\t\t\t\twith codecs.open(self.vec_out_file_bydoc, encoding='utf-8', mode='a+') as vec_outfile_bydoc:\n\t\t\t\t\tprint \"Processing document file......\"\n\t\t\t\t\tline = docfile.readline().strip('\\n')\n\t\t\t\t\twhile line:\n\t\t\t\t\t\twords = line.split()\n\t\t\t\t\t\tnew_words = [x for x in words]\n\t\t\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\t\t\tword_id = self.word2IdVocabulary[words[i]]\n\t\t\t\t\t\t\tsense_vecs = self.vectors[word_id]\n\t\t\t\t\t\t\tsense_num = len(sense_vecs)\n\t\t\t\t\t\t\tif sense_num > 1:\n\t\t\t\t\t\t\t\tcontext_words = []\n\t\t\t\t\t\t\t\tfor x in range(i-int(self.context_num), i+int(self.context_num)+1):\n\t\t\t\t\t\t\t\t\tif x != i and 0 <= x < len(words):\n\t\t\t\t\t\t\t\t\t\tcontext_words.append(words[x])\n\t\t\t\t\t\t\t\tsense_index = self.select_attention(context_words, sense_vecs)\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[sense_index]\n\t\t\t\t\t\t\t\tnew_wordi = words[i] + '_' + str(sense_index)\n\t\t\t\t\t\t\t\tself.vector_word_doc[new_wordi.encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\t\tnew_words[i] = new_wordi\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[0]\n\t\t\t\t\t\t\t\tself.vector_word_doc[words[i].encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\tvec_outfile_bydoc.write(new_words[i] + ' ' + np.array2string(word_vec_i, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n')\n\n\t\t\t\t\t\tdoc_outfile.write(' '.join(new_words) + '\\n')\n\n\t\t\t\t\t\tline = docfile.readline()\n\n\t\treturn self.vector_word_doc", "def make_image():\n # get the mask\n twitter_mask = np.array(Image.open('resource/twitter-mask.png'))\n\n wc = WordCloud(background_color='white', max_words=100, mask=twitter_mask, contour_width=3,\n contour_color='steelblue')\n\n # generate word cloud\n wc.generate_from_frequencies(get_word_frequency())\n\n # store to file\n wc.to_file('/tmp/twitter.png')\n\n # show\n frame = cv2.imread('/tmp/twitter.png')\n cv2.imshow('figure', frame)\n cv2.waitKey(60000)\n cv2.destroyAllWindows()", "def get_random_words_from_wordnik(part_of_speech, limit):\n words = words_api.getRandomWords(includePartOfSpeech=part_of_speech, limit=limit)\n\n random_words = []\n for word in words:\n random_words.append(word.word)\n # pprint(random_words)\n return random_words", "def generateWord(self, parameters=None):\n\t\t# Initial set-up\n\t\tvowels = ['a', 'e', 'i', 'o', 'u']\n\t\tspecialVowels = ['y']\n\n\t\tconsonants = ['b', 'c', 'd', 'f', 'g', 'h', 'k', 'l', 'm', 'n', 'p', 'r', 's', 't']\n\t\tspecialConsonants = ['j', 'q', 'v', 'w', 'x', 'z']\n\n\t\tnewLetterFraction = 5\n\t\tvowelChance = 50 #percent\n\n\t\t#Determine how many words we're going to have to generate\n\t\trepeats = 1\n\t\tif parameters and len(parameters) > 0:\n\t\t\trepeats = SharedFunctions.parseInt(parameters[0], 1, 1, 25)\n\n\t\twords = []\n\t\tfor i in xrange(0, repeats):\n\t\t\tword = u\"\"\n\t\t\tcurrentVowelChance = vowelChance\n\t\t\tcurrentNewLetterFraction = newLetterFraction\n\t\t\tconsonantCount = 0\n\t\t\twhile random.randint(0, currentNewLetterFraction) <= 6:\n\t\t\t\tif random.randint(1, 100) <= currentVowelChance:\n\t\t\t\t\tconsonantCount = 0\n\t\t\t\t\t#vowel. Check if we're going to add a special or normal vowel\n\t\t\t\t\tif random.randint(1, 100) <= 10:\n\t\t\t\t\t\tword += random.choice(specialVowels)\n\t\t\t\t\t\tcurrentVowelChance -= 30\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(vowels)\n\t\t\t\t\t\tcurrentVowelChance -= 20\n\t\t\t\telse:\n\t\t\t\t\tconsonantCount += 1\n\t\t\t\t\t#consonant, same deal\n\t\t\t\t\tif random.randint(1, 100) <= 25:\n\t\t\t\t\t\tword += random.choice(specialConsonants)\n\t\t\t\t\t\tcurrentVowelChance += 30\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(consonants)\n\t\t\t\t\t\tcurrentVowelChance += 20\n\t\t\t\t\tif consonantCount > 3:\n\t\t\t\t\t\tcurrentVowelChance = 100\n\t\t\t\tcurrentNewLetterFraction += 1\n\n\t\t\t#Enough letters added. Finish up\n\t\t\tword = word[0].upper() + word[1:]\n\t\t\twords.append(word)\n\n\t\t#Enough words generated, let's return the result\n\t\treturn u\", \".join(words)", "def createSentence(self, n=0, v=0, o=0, p=0,prep=True):\n sentence = ''\n if not n:\n n = np.random.randint(1, 5)\n if not v:\n v = np.random.randint(1, 5)\n if not o:\n o = np.random.randint(1, 5)\n sentence += self.createPhrase(nwords=n) + ' '\n if sentence[:-1] not in ('mi', 'sina'):\n sentence += 'li '\n sentence += self.createPhrase(nwords=v) + ' e '\n sentence += self.createPhrase(nwords=o)\n if prep:\n if not p:\n p = np.random.randint(1, 5)\n sentence += ' ' + np.random.choice(self.prepositions) + ' ' + self.createPhrase(nwords=p)\n return sentence", "def create_vocabulary(sentences, path):\n print('creating vocab..')\n\n word_dict = dict(); vocabulary = dict()\n for sentence in sentences:\n for word in nltk.word_tokenize(sentence):\n if word not in word_dict:\n word_dict[word] = ''\n word_dict['<s>'] = ''\n word_dict['</s>'] = ''\n\n with open(path, encoding=\"utf8\") as f:\n for line in f:\n word, vec = line.split(' ', 1)\n if word in word_dict:\n vocabulary[word] = np.fromstring(vec, sep=' ')\n\n print('vocabulary was created successfully!')\n return vocabulary", "def generate(net, z, maxlen=50, im=None, init=None, use_end=True):\n caption = lm_tools.sample(net, z['word_dict'], z['index_dict'], num=maxlen, Im=im, initial=init, use_end=use_end)\n print ' '.join(caption)", "def generate(handle, occurrence_threshold): # verbose, static creation\n tokenizer = Tokenizer(occurrence_threshold=occurrence_threshold)\n tokenizer.generate(handle)\n return tokenizer", "def buildVocabulary(paragraphs, verbose=True):\n vocabulary = []\n \n for p in paragraphs:\n for word in p.split():\n vocabulary.append(word)\n\n vocabulary = set(vocabulary)\n if verbose:\n print('Built vocabulary of %d unique words'%len(vocabulary))\n \n return list(vocabulary)", "def create_wordcloud(processed_text, filename):\n\n\n pyplot.clf()\n wordcloud = WordCloud(background_color='white', max_font_size=40, relative_scaling=.5).generate(' '.join(processed_text))\n pyplot.imshow(wordcloud)\n pyplot.axis('off')\n\n pyplot.savefig(filename)", "def card_generator(mw, prob_dist_dict, gensim_model):\n\n # First things first: make sure that the word is actually in the word2vec vocab.\n # word_vectors = gensim_model.wv\n if mw not in gensim_model.wv.vocab:\n return False\n\n # Generate five categories with the weighted probabilities based on their frequency in the gold standard data.\n five_semrels_list = select_five_categories(prob_dist_dict)\n five_semrels = pd.Series(five_semrels_list)\n\n # Count the number of instances of each semrel category in that list.\n semrels_counts = dict( five_semrels.value_counts() )\n\n # Generate the semantic relations dictionary.\n srdict = sr.make_semrel_dict(mw)\n\n # Rejig five_semrels_list, if need be, to one whose labels are compatible with the cardinality of the sets available\n # in srdict.\n good_five_labels = get_good_label_distrib(srdict, semrels_counts)\n\n # Now we just populate a list with the required number of each kind of word!\n # First, initialise list to contain the five final Taboo words (yay!)\n tws = []\n\n # Go through good_five_labels and, for the labels that aren't 'collocation', access their list in the dictionary and\n # randomly select however many out of it.\n for label, count in good_five_labels.items():\n if label != 'collocation':\n tws.extend( rd.sample( tuple( srdict[label] ), count ) )\n\n # Now, take the number of collocations needed and return the most similar words according to gensim, removing the\n # words that are forbidden (i.e. the main word and also the other words that are already in tws)\n forbidden_words = set(tws + [mw])\n num_coll = good_five_labels['collocation']\n collocates = sr.get_collocations(mw, forbidden_words, gensim_model, num_collocates = num_coll)\n\n # If there are more collocates than needed, randomly select num_coll of them and add to tws. Else just add list to tws.\n if len(collocates) > num_coll:\n tws.extend( rd.sample( tuple(collocates), num_coll ) )\n else:\n tws.extend(collocates)\n\n return {mw: tws}", "def paragraph_selection(sample, mode):\n # predefined maximum length of paragraph\n MAX_P_LEN = 500\n # predefined splitter\n splitter = u'<splitter>'\n # topN of related paragraph to choose\n topN = 3\n doc_id = None\n if 'answer_docs' in sample and len(sample['answer_docs']) > 0:\n doc_id = sample['answer_docs'][0]\n if doc_id >= len(sample['documents']):\n # Data error, answer doc ID > number of documents, this sample\n # will be filtered by dataset.py\n return\n for d_idx, doc in enumerate(sample['documents']):\n if 'segmented_paragraphs_scores' not in doc:\n continue\n status = dup_remove(doc)\n segmented_title = doc[\"segmented_title\"]\n title_len = len(segmented_title)\n para_id = None\n if doc_id is not None:\n para_id = sample['documents'][doc_id]['most_related_para']\n total_len = title_len + sum(doc['paragraphs_length'])\n # add splitter\n para_num = len(doc[\"segmented_paragraphs\"])\n total_len += para_num\n if total_len <= MAX_P_LEN:\n incre_len = title_len\n total_segmented_content = copy.deepcopy(segmented_title)\n for p_idx, segmented_para in enumerate(doc[\"segmented_paragraphs\"]):\n if doc_id == d_idx and para_id > p_idx:\n incre_len += len([splitter] + segmented_para)\n if doc_id == d_idx and para_id == p_idx:\n incre_len += 1\n total_segmented_content += [splitter] + segmented_para\n if doc_id == d_idx:\n answer_start = incre_len + sample['answer_spans'][0][0]\n answer_end = incre_len + sample['answer_spans'][0][1]\n sample['answer_spans'][0][0] = answer_start\n sample['answer_spans'][0][1] = answer_end\n doc[\"segmented_paragraphs\"] = [total_segmented_content]\n doc[\"segmented_paragraphs_scores\"] = [1.0]\n doc['paragraphs_length'] = [total_len]\n doc['paragraphs'] = [''.join(total_segmented_content)]\n doc['most_related_para'] = 0\n continue\n # find topN paragraph id\n para_infos = []\n for p_idx, (para_tokens, para_scores) in \\\n enumerate(zip(doc['segmented_paragraphs'], doc['segmented_paragraphs_scores'])):\n para_infos.append((para_tokens, para_scores, len(para_tokens), p_idx))\n para_infos.sort(key=lambda x: (-x[1], x[2]))\n topN_idx = []\n for para_info in para_infos[:topN]:\n topN_idx.append(para_info[-1])\n final_idx = []\n total_len = title_len\n if doc_id == d_idx:\n if mode == \"train\":\n final_idx.append(para_id)\n total_len = title_len + 1 + doc['paragraphs_length'][para_id]\n for id in topN_idx:\n if total_len > MAX_P_LEN:\n break\n if doc_id == d_idx and id == para_id and mode == \"train\":\n continue\n total_len += 1 + doc['paragraphs_length'][id] \n final_idx.append(id)\n total_segmented_content = copy.deepcopy(segmented_title)\n final_idx.sort()\n incre_len = title_len\n for id in final_idx:\n if doc_id == d_idx and id < para_id:\n incre_len += 1 + doc['paragraphs_length'][id]\n if doc_id == d_idx and id == para_id:\n incre_len += 1\n total_segmented_content += [splitter] + doc['segmented_paragraphs'][id]\n if doc_id == d_idx:\n answer_start = incre_len + sample['answer_spans'][0][0]\n answer_end = incre_len + sample['answer_spans'][0][1]\n sample['answer_spans'][0][0] = answer_start\n sample['answer_spans'][0][1] = answer_end\n doc[\"segmented_paragraphs\"] = [total_segmented_content]\n doc[\"segmented_paragraphs_scores\"] = [1.0]\n doc['paragraphs_length'] = [total_len]\n doc['paragraphs'] = [''.join(total_segmented_content)]\n doc['most_related_para'] = 0", "def build_model():\n \n #english trained optimized pipeline for word embedding\n nlp = spacy.load(\"en_core_web_md\") # this model will give you 300D\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ])),\n \n ('embeddings_pipeline', Pipeline([\n ('vect_trans',SpacyVectorTransformer(nlp)),\n ('reduce_dim', TruncatedSVD(50)),\n ])),\n \n ])),\n \n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {\n 'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),\n 'features__embeddings_pipeline__reduce_dim__n_components':(50,60,70,100,120,130,150)\n }\n cv = GridSearchCV(pipeline, param_grid=parameters,cv=2)\n \n return cv", "def build_data(self, data_folder, cv=10, clean_string=False):\n revs = []\n # pos_file = loadmodel(data_folder[0])\n # neg_file = loadmodel(data_folder[1])\n pos_texts = loadmodel(data_folder[0]).get(\"content\")\n neg_texts = loadmodel(data_folder[1]).get(\"content\")\n vocab = defaultdict(float)\n happyList = [ \":-)\", \":)\", \":D\", \":o)\", \":]\", \":3\", \":c)\", \":>\", \"=]\", \"8)\", \"=)\", \":}\", \":^)\", \":?)\", \":-)\", \": )\", \": D\", \": o)\", \":]\", \": 3\", \":c)\", \":>\", \"= ]\", \"8 )\", \"= )\", \": }\", \":^)\", \":?)\" ]\n sadList = [ \">:[\", \":-(\", \":(\", \":-c\", \":c\", \":-<\", \":?C\", \":<\", \":-[\", \":[\", \":{\",\">:[\", \":-(\", \": (\", \":-c\", \": c\", \": -<\", \": ?C\", \": <\", \": -[\", \": [\", \": {\" ]\n for line in pos_texts:\n rev = []\n rev.append(line.strip())\n\n if clean_string:\n orig_rev = self.dc.clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev).lower()\n #print orig_rev\n words = set(orig_rev.split())\n for word in words:\n if word in happyList or word in sadList:\n pass\n else:\n vocab[word] += 1\n datum = {\"y\":1,\n \"text\": orig_rev,\n \"num_words\": len(orig_rev.split()),\n \"split\": np.random.randint(0,cv)}\n revs.append(datum)\n\n for line in neg_texts:\n rev = []\n rev.append(line.strip())\n if clean_string:\n orig_rev = self.dc.clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev).lower()\n words = set(orig_rev.split())\n for word in words:\n if word in happyList or word in sadList:\n pass\n else:\n vocab[word] += 1\n datum = {\"y\":0,\n \"text\": orig_rev,\n \"num_words\": len(orig_rev.split()),\n \"split\": np.random.randint(0,cv)}\n revs.append(datum)\n return revs, vocab", "def generate(size, data_dim=5, n_phrase_labels=4, n_words=3,\n n_phrase_words=3, n_phrases=5, label_noise=0.,\n min_sent_len=5, max_sent_len=5, tag_end=True):\n assert n_words < 256\n assert max_sent_len >= n_phrase_words\n global dictionary, phrases\n\n # generate dictionary\n dictionary = uniform(-1.0, 1.0, size=(n_words, data_dim))\n\n # generate n_phrases unique word sequences of length n_phrase_words\n print \"Generating %d phrases\" % n_phrases\n phrases = []\n phrase_labels = []\n while len(phrases) != n_phrases:\n phrases = np.unique(np.array([\"\".join(map(chr, randint(n_words, size=n_phrase_words)))\n for i in xrange(n_phrases)], dtype=np.object))\n assert np.unique(map(len, phrases)) == n_phrase_words\n phrase_labels = 1+randint(n_phrase_labels-1, size=n_phrases)\n\n # generate 'sentences'\n print \"Generating %d sentences\" % sum(size)\n Xind = []\n Y = []\n for i in xrange(sum(size)):\n while True:\n sent_len = randint(min_sent_len, max_sent_len+1)\n sent = \"\".join(map(chr, randint(n_words, size=sent_len)))\n if contains_any_phrase(sent, phrases):\n print \".\",\n break\n Y.append(np.zeros(sent_len,dtype=np.int))\n Xind.append(sent)\n\n # generate labels for dataset\n print \"Generating labels for the sentences...\"\n for phrase, plabel in zip(phrases, phrase_labels):\n for idx, sent in enumerate(Xind):\n start = 0\n while True:\n sidx = sent.find(phrase, start)\n if sidx < 0:\n break\n if tag_end:\n Y[idx][sidx+len(phrase)-1] = plabel\n else:\n Y[idx][sidx] = plabel\n start += 1\n\n print \"Trafo...\"\n # transform dataset to code\n if data_dim > 1:\n X = [[dictionary[ord(c)] for c in sent] for sent in Xind]\n else:\n X = [[ord(c) for c in sent] for sent in Xind]\n\n Xtrain, Xtest = X[:size[0]], X[size[0]:]\n Ytrain, Ytest = Y[:size[0]], Y[size[0]:]\n\n # training label noise\n for sent in Ytrain:\n mask = uniform(size=sent.size) < label_noise\n sent[mask] = randint(n_phrase_labels, size=mask.sum())\n print \"Done.\"\n\n return Xtrain, Xtest, Ytrain, Ytest", "def feature_vecs_DOC(train_pos, train_neg, test_pos, test_neg):\n # Doc2Vec requires LabeledSentence objects as input.\n # Turn the datasets from lists of words to lists of LabeledSentence objects.\n # YOUR CODE HERE\n labeled_train_pos = []\n labeled_train_neg = []\n labeled_test_pos = []\n labeled_test_neg = []\n i = 0\n for line in train_pos:\n labeled_train_pos.append(LabeledSentence(line, ['TRAIN_POS_%i' % i]))\n i += 1\n i = 0\n for line in train_neg:\n labeled_train_neg.append(LabeledSentence(line, ['TRAIN_NEG_%i' % i]))\n i += 1\n i = 0\n for line in test_pos:\n labeled_test_pos.append(LabeledSentence(line, ['TEST_POS_%i' % i]))\n i += 1\n i = 0\n for line in test_neg:\n labeled_test_neg.append(LabeledSentence(line, ['TEST_NEG_%i' % i]))\n i += 1\n\n # Initialize model\n model = Doc2Vec(min_count=1, window=10, size=100, sample=1e-4, negative=5, workers=4)\n sentences = labeled_train_pos + labeled_train_neg + labeled_test_pos + labeled_test_neg\n model.build_vocab(sentences)\n\n # Train the model\n # This may take a bit to run \n for i in range(5):\n print \"Training iteration %d\" % (i)\n random.shuffle(sentences)\n model.train(sentences)\n\n # Use the docvecs function to extract the feature vectors for the training and test data\n # YOUR CODE HERE\n train_pos_vec = []\n train_neg_vec = []\n test_pos_vec = []\n test_neg_vec = []\n for j in range(len(train_pos)):\n train_pos_vec.append(model.docvecs['TRAIN_POS_%i' % j])\n for j in range(len(train_neg)):\n train_neg_vec.append(model.docvecs['TRAIN_NEG_%i' % j])\n for j in range(len(test_pos)):\n test_pos_vec.append(model.docvecs['TEST_POS_%i' % j])\n for j in range(len(test_neg)):\n test_neg_vec.append(model.docvecs['TEST_NEG_%i' % j])\n\n # Return the four feature vectors\n return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec", "def build_Wordv(word2vec_dict, k):\r\n vocab_size = len(word2vec_dict)\r\n word2id_dict = dict()\r\n W = np.zeros(shape=(vocab_size + 1, k))\r\n W[0] = np.zeros(k)\r\n i = 1\r\n for word in word2vec_dict:\r\n # print type(word), ' | ', word\r\n W[i] = word2vec_dict[word]\r\n # print type(W[i]), \" | \", W[i]\r\n word2id_dict[word] = i\r\n i += 1\r\n return W, word2id_dict", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def word2vec_generation(self, utterance, with_punctuations):\n vector = []\n\n #words = self.text_preparation(utterance)\n\n words = utterance\n\n #model_ = Word2Vec.load('model.bin')\n #if not self.is_word_in_word2vec_vocabulary(utterance, model_):\n # self.retrain_model([words])\n\n if with_punctuations:\n new_model = Word2Vec.load('./model/model_word2vec.bin')\n else:\n new_model = Word2Vec.load('./model/model_no_punctuation_word2vec.bin')\n\n\n\n # TODO: how generate word2vec vectors for each utterance using the vocabularies in Word2vec model?\n\n #First: average of Word2Vec vectors in each utterance\n for w in words:\n vector.append(new_model.wv[w])\n\n return np.mean(vector, axis=0)", "def build_data_cv(file, split_dict, label_dict, clean_string=False):\n revs = []\n f = open(file)\n vocab = defaultdict(float)\n \n for index, line in enumerate(f.readlines()): \n rev = []\n rev.append(line.strip())\n if clean_string:\n orig_rev = clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev)\n words = set(orig_rev.split())\n for word in words:\n vocab[word] += 1\n datum = {\"y\":label_dict[index], \n \"text\": orig_rev, \n \"num_words\": len(orig_rev.split()),\n \"split\": split_dict[index]}#1 or 2\n revs.append(datum)\n\n return revs, vocab", "def pregroup_draw(words, cups, **params):\n textpad = params.get('textpad', (.1, .2))\n textpad_words = params.get('textpad_words', (0, .1))\n space = params.get('space', .5)\n width = params.get('width', 2.)\n fontsize = params.get('fontsize', None)\n\n backend = TikzBackend(use_tikzstyles=params.get('use_tikzstyles', None))\\\n if params.get('to_tikz', False)\\\n else MatBackend(figsize=params.get('figsize', None))\n\n def draw_triangles(words):\n scan = []\n for i, word in enumerate(words.boxes):\n for j, _ in enumerate(word.cod):\n x_wire = (space + width) * i\\\n + (width / (len(word.cod) + 1)) * (j + 1)\n scan.append(x_wire)\n if params.get('draw_types', True):\n backend.draw_text(\n str(word.cod[j]), x_wire + textpad[0], -textpad[1],\n fontsize=params.get('fontsize_types', fontsize))\n backend.draw_polygon(\n ((space + width) * i, 0),\n ((space + width) * i + width, 0),\n ((space + width) * i + width / 2, 1),\n color=DEFAULT.color)\n backend.draw_text(\n str(word), (space + width) * i + width / 2 + textpad_words[0],\n textpad_words[1], ha='center', fontsize=fontsize)\n return scan\n\n def draw_cups_and_wires(cups, scan):\n for j, off in [(j, off)\n for j, s in enumerate(cups) for off in s.offsets]:\n middle = (scan[off] + scan[off + 1]) / 2\n backend.draw_wire((scan[off], 0), (middle, - j - 1), bend_in=True)\n backend.draw_wire(\n (scan[off + 1], 0), (middle, - j - 1), bend_in=True)\n scan = scan[:off] + scan[off + 2:]\n for i, _ in enumerate(cups[-1].cod if cups else words.cod):\n label = str(cups[-1].cod[i]) if cups else \"\"\n backend.draw_wire((scan[i], 0), (scan[i], - (len(cups) or 1) - 1))\n if params.get('draw_types', True):\n backend.draw_text(\n label, scan[i] + textpad[0], - (len(cups) or 1) - space,\n fontsize=params.get('fontsize_types', fontsize))\n\n scan = draw_triangles(words.normal_form())\n draw_cups_and_wires(cups, scan)\n backend.output(\n params.get('path', None),\n tikz_options=params.get('tikz_options', None),\n xlim=(0, (space + width) * len(words.boxes) - space),\n ylim=(- len(cups) - space, 1),\n margins=params.get('margins', DEFAULT.margins),\n aspect=params.get('aspect', DEFAULT.aspect))", "def build_new_text(tgram, start_words, max_words):\n out_words = []\n for i in range(max_words - 2):\n if start_words in tgram:\n next_word = random.choice(tgram[start_words])\n out_words.append(next_word)\n start_words = start_words.split()\n start_words = start_words[1] + \" \" + next_word\n else:\n break\n out_words = \" \".join(out_words)\n return out_words", "def createPhrase(self, nsy=0, nwords=0, vowel=False):\n phrase = ''\n if nwords:\n if nsy == 0:\n nsy = n.random.randint(1,7)\n for i in range(nwords):\n w = n.random.choice(self.plain_words)\n phrase += w + ' '\n else:\n if nsy == 0:\n nsy = n.random.randint(1,7)\n nsy_ = 0\n while nsy_ < nsy:\n w = n.random.choice(self.plain_words)\n s = self._getSyllables(w)\n l = len(s)\n if w[0] in self.vowels and vowel: # elision\n l -= 1\n if nsy_+l > nsy:\n continue\n nsy_ += l\n phrase += w + ' '\n if w[-1] in self.vowels:\n vowel = 1\n else:\n vowel = 0\n p = phrase[:-1]\n p_ = p.split()\n nsy_ = sum([len(self._getSyllables(i)) for i in p_])\n if len(p_) > 2 and nsy_ > 4:\n if n.random.random() < .2: # use pi in 1/5 of sentences\n # print('\\n', p, self._countSyllables(p))\n where = n.random.randint(len(p_)-2)\n p_.insert(where+1, 'pi')\n if p_[where+2][0] not in self.vowels or (p_[where+2][0] in self.vowels and p_[where][-1] in self.vowels):\n # choose a words with two or three syllables\n # and make them one syllable shorter\n p__ = ' '.join(p_)\n while self._countSyllables(p__) != self._countSyllables(p):\n sy__ = n.array([len(self._getSyllables(i)) for i in p_])\n if n.all(sy__ == 1):\n if self._countSyllables(' '.join(p_)) < nsy:\n where_ = n.random.randint(len(p_))\n p_.insert(where_, n.random.choice(self.plain_words))\n elif self._countSyllables(' '.join(p_)) > nsy:\n chosen = n.random.randint(0, len(p_))\n while p_[chosen] == 'pi':\n\n chosen = n.random.randint(0, len(p_))\n p_.pop(chosen)\n else:\n ok = (sy__ > 1).nonzero()[0]\n chosen = n.random.choice(ok)\n size = sy__[chosen]\n w = n.random.choice(self.words_nsy[size-2])\n p_[chosen] = w\n p__ = ' '.join(p_)\n # print(' '.join(p_), self._countSyllables(' '.join(p_)))\n pp = ' '.join(p_)\n\n return pp", "def generate_text(model, w2vmodel, nb_epoch, length=75, max_seq_length=20, seed=\"Rain drop drop top\"):\n global sample\n generated = ''\n sequences = seed\n\n generated += seed\n\n #clean seed\n seed=re.sub(r'<[^<]+?>', '', seed)\n #remove encoding characters like \\x86\n seed=re.sub(r'[^\\x00-\\x7f]','',seed)\n seed=re.sub(r'\\#','',seed)\n #remove punctuation\n seed=re.sub(r'[^A-Za-z0-9\\s]','',seed)\n\n #shorten if longer than max_seq_length\n seed = seed.split(' ')[:max_seq_length]\n\n word_ix_list = []\n for word in seed:\n try:\n word = word_to_ix(word,w2vmodel)\n except:\n #since we're using -1 as a null word (why we also pad with the not in vocab index), we'll use that for words that aren't in the word2vec model\n print('Warning: {0} not contained in training vocabulary. It will be ignored when computing output.'.format(word))\n word = word_to_ix('_UNSEEN_',w2vmodel)\n word_ix_list.append(word)\n\n #pad word_list with the unseen word2vec if shorter than max_seq_length\n word_ix_list = [word_to_ix('_UNSEEN_',w2vmodel)] * (max_seq_length-len(word_ix_list)) + word_ix_list\n\n for temp in [0.2, 0.5, .75, 1.0]:\n print('temperature: ', temp)\n for word in range(length):\n #reshape wordlist\n word_ix_list = np.asarray(word_ix_list).reshape(1,max_seq_length)\n\n #prediction = model.predict(x=word_ix_list)\n #next_ix = np.argmax(prediction)\n prediction = model.predict(x=word_ix_list,verbose=0)[0]\n next_ix = sample(prediction, temp)\n predicted_word = ix_to_word(next_ix,w2vmodel)\n\n generated += (' ' + predicted_word) #add predicted word to the generated output\n\n #remove first word from the word list to reduce the array for the max sequence length for the model\n word_ix_list = np.append(word_ix_list,next_ix)\n word_ix_list.shape\n word_ix_list = np.delete(word_ix_list,0,0)\n print(generated)\n print('-----')\n #print(generated)\n return", "def create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob:\n target_seq_length = rng.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n while i < len(document):\n segment = document[i]\n current_chunk.append(segment)\n current_length += len(segment)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = rng.randint(1, len(current_chunk) - 1)\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n # Random next\n is_random_next = False\n if len(current_chunk) == 1 or rng.random() < 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # This should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document\n # we're processing.\n for _ in range(10):\n random_document_index = rng.randint(\n 0, len(all_documents) - 1)\n if random_document_index != document_index:\n break\n\n random_document = all_documents[random_document_index]\n random_start = rng.randint(0, len(random_document) - 1)\n for j in range(random_start, len(random_document)):\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, None,\n max_num_tokens, rng)\n if len(tokens_a) < 1 or len(tokens_b) < 1:\n current_chunk = []\n current_length = 0\n i += 1\n continue\n assert len(tokens_a) >= 1, tokens_a\n assert len(tokens_b) >= 1, tokens_b\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = []\n current_length = 0\n i += 1\n\n return instances", "def make_text(sent, begin, end):\n lemmas = [sent.morps[begin].lemma(), ]\n for idx in range(begin+1, end):\n if sent.mid2wid[idx-1] != sent.mid2wid[idx]: # if go over word boundary\n # insert space between words\n lemmas.append(' ')\n lemmas.append(sent.morps[idx].lemma())\n return ''.join(lemmas)", "def voc_rand_crop(feature, label, height, width):\n i, j, h, w = torchvision.transforms.RandomCrop.get_params(\n feature, output_size=(height, width))\n \n feature = torchvision.transforms.functional.crop(feature, i, j, h, w)\n label = torchvision.transforms.functional.crop(label, i, j, h, w) \n\n return feature, label", "def initialize_out_of_vocab_words(dimension, choice='zero'):\r\n if choice == 'random':\r\n \"\"\"Returns a random vector of size dimension where mean is 0 and standard deviation is 1.\"\"\"\r\n return np.random.normal(size=dimension)\r\n elif choice == 'zero':\r\n \"\"\"Returns a vector of zeros of size dimension.\"\"\"\r\n return np.zeros(shape=dimension)", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def generate_bar_example(\n num_topics=10, num_documents=500, num_words_per_doc=100, alpha=1, beta=1, seed=None\n):\n\n width = 5\n\n vocab_size = width * width\n rng = random.Random()\n if seed is not None:\n rng.seed(seed)\n\n zeros = [[0 for i in range(width)] for j in range(width)]\n topic_squares = [zeros for i in range(num_topics)]\n for i in range(width):\n for j in range(width):\n topic_squares[i][i][j] = 1.0 / width\n for i in range(width):\n for j in range(width):\n topic_squares[width + i][j][i] = 1.0 / width\n topics = []\n for k in range(num_topics):\n topics.append(list(_itertools.chain(*topic_squares[k])))\n\n def weighted_choice(probs):\n total = sum(probs)\n r = rng.uniform(0, total)\n upto = 0\n for i, w in enumerate(probs):\n if upto + w > r:\n return i\n upto += w\n assert False, \"Shouldn't get here\"\n\n documents = []\n thetas = []\n for d in range(num_documents):\n doc = [0 for i in range(width * width)]\n topic_dist = [rng.gammavariate(1, 1) for k in range(num_topics)]\n topic_dist = [z / sum(topic_dist) for z in topic_dist]\n for i in range(num_words_per_doc):\n k = weighted_choice(topic_dist)\n w = weighted_choice(topics[k])\n doc[w] += 1\n thetas.append(topic_dist)\n documents.append(doc)\n\n sparse_documents = []\n for d in documents:\n sd = {}\n for i in range(width):\n for j in range(width):\n k = str(i) + \",\" + str(j)\n sd[k] = d[i * width + j]\n sparse_documents.append(sd)\n bow_documents = turicreate.SArray(sparse_documents)\n return bow_documents", "def build_vocab(vocab_size, text_vector):\n vocab = Counter()\n for text in text_vector:\n for word in text.split(' '):\n vocab[word.lower()]+=1\n vocab = dict(vocab.most_common(vocab_size))\n return vocab", "def surface_labelled_data_preparation(word_dictionary: {str, str}):\n X = []\n Y = []\n words = []\n\n for word in word_dictionary:\n segments = word.split('-')\n labels = word_dictionary[word].split('-')\n segment_features = []\n for i in range(len(segments)):\n features = {}\n\n segment_length = len(segments[i])\n features['length'] = segment_length\n\n features['segment.lower()'] = segments[i].lower()\n features['pos_in_word'] = i\n\n if segment_length % 2 == 0:\n features['even'] = 1\n else:\n features['odd'] = 1\n\n features['begin'] = segments[i][0]\n features['end'] = segments[i][len(segments[i]) - 1]\n\n try:\n features['prev_segment'] = segments[i - 1]\n except IndexError:\n features['prev_segment'] = ''\n # continue\n\n try:\n features['next_segment'] = segments[i + 1]\n except IndexError:\n features['next_segment'] = ''\n\n if segments[0].isupper():\n features['start_upper'] = 1\n else:\n features['start_lower'] = 1\n\n if segments[0] in 'aeiou':\n features['first_vowel'] = 1\n else:\n features['first_const'] = 1\n\n segment_features.append(features)\n words.append(segments)\n\n X.append(segment_features)\n Y.append(labels)\n words.append(word)\n\n return X, Y, words", "def make_embedding(path, words, indices):\n #root = '/'.join(path.split('/')[0:-1])\n #all_paths = [root+'/'+x for x in os.listdir(root)] #'/'.join(path.split('/')[0:-1]))\n #for path in all_paths:\n vec_path = 'data/'+path.split('/')[-1]+'_'+mode\n print(vec_path)\n if os.path.exists(vec_path+'.npy'):\n np_vecs = np.load(vec_path+'.npy')\n else:\n words_len = len(words)\n vecs = []\n if mode == 'word':\n f = load_model('wiki.en.bin')\n for i, w in enumerate(words):\n if mode == 'word':\n vec = f.get_word_vector(w)\n else:\n vec = eye[indices[w]]\n vecs.append(vec) \n if i % 10000 == 0:\n print(\"{} / {}\".format(i, words_len))\n np_vecs = np.asarray(vecs, dtype=np.int8)\n np.save(vec_path, np_vecs)\n return np_vecs", "def build_vocab(captions, threshold=0):\n\n\tcounter = Counter()\n\tfor caption in captions:\n\t\ttokens = caption.lower().split('/') \n\t\tcounter.update(tokens)\n\n\n\t# If the word frequency is less than 'threshold', then the word is discarded.\n\twords = [word for word, cnt in counter.items() if cnt >= threshold]\n\t# Creates a vocab wrapper and add some special tokens.\n\tvocab = Vocabulary()\n\tvocab.add_word('<pad>')\n\tvocab.add_word('<start>')\n\tvocab.add_word('<end>')\n\tvocab.add_word('<unk>')\n\n\t# Adds the words to the vocabulary.\n\tfor i, word in enumerate(words):\n\t\tvocab.add_word(word)\n\treturn vocab", "def generate_wordcloud(dict_, title='WordCloud', PATH=None):\n wordcloud = WordCloud(min_font_size=10).generate_from_frequencies(dict_)\n plt.figure(figsize = (8, 8), facecolor = None) \n plt.imshow(wordcloud) \n plt.axis(\"off\") \n plt.title(title, size = 24)\n plt.tight_layout(pad = 0) \n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n plt.show()", "def _create_vocab(captions):\n print(\"Creating vocabulary.\")\n min_word_count = 4\n word_counts_output_file = '/Users/lzg/Desktop/image_caption/word_count.txt'\n counter = Counter()\n for c in captions:\n counter.update(c)\n print(\"Total words:\", len(counter))\n\n # Filter uncommon words and sort by descending count.\n word_counts = [x for x in counter.items() if x[1] >= min_word_count]\n word_counts.sort(key=lambda x: x[1], reverse=True)\n print(\"Words in vocabulary:\", len(word_counts))\n\n # Write out the word counts file.\n with tf.gfile.FastGFile(word_counts_output_file, \"w\") as f:\n f.write(\"\\n\".join([\"%s %d\" % (w, c) for w, c in word_counts]))\n print(\"Wrote vocabulary file:\", word_counts_output_file)\n\n # Create the vocabulary dictionary.\n reverse_vocab = [x[0] for x in word_counts]\n unk_id = len(reverse_vocab)\n vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])\n # vocab = Vocabulary(vocab_dict, unk_id)\n\n return vocab_dict, unk_id", "def wordcloud_maker(df, stopwords = None):\n all_clean = \" \".join(review for review in df.clean)\n wordcloud = WordCloud(stopwords = stopwords, background_color=\"white\").generate(all_clean)\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n plt.show()", "def _make_feature_vec(self, word_list):\n\n # Pre-initialize an empty numpy array (for speed)\n feature_vec = np.zeros((self.num_features,), dtype=\"float32\")\n\n # index2word is a list that contains the names of the words in\n # the model's vocabulary. Convert it to a set, for speed.\n index2word_set = set(self.w2v_model.index2word)\n\n # Loop over each word in the word_list and, if it is in the model's\n # vocabulary, add its feature vector to the total\n nwords = 0\n for word in word_list:\n # NOTE: Careful there, if all words are in caps in the article,\n # this function will return nan values and blow up the forest.\n word = word.lower()\n if word in index2word_set:\n nwords += 1\n feature_vec = np.add(feature_vec, self.w2v_model[word])\n\n # Divide the result by the number of words to get the average\n feature_vec = np.divide(feature_vec, nwords)\n return feature_vec", "def createDwords(self, start: ghidra.program.model.address.Address, count: int) -> None:\n ...", "def make_cloud(\n self,\n image_path: str,\n word_frequency: Dict[str, int],\n font_path: str,\n background_color: str = \"white\",\n width: int = 800,\n height: int = 600,\n **kwargs,\n ) -> WordCloud:\n word_cloud = WordCloud(\n font_path=font_path, background_color=background_color, width=width, height=height, **kwargs\n )\n word_cloud.generate_from_frequencies(word_frequency)\n word_cloud.to_file(image_path)\n print(f\"Saved image to {image_path}\")\n\n return word_cloud", "def build_model():\n \n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {\n 'vect__ngram_range': ((1, 1), (1, 2)),\n 'clf__estimator__min_samples_split': [2, 4],\n }\n \n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv", "def fd(self, sent, index, length):\n context = lambda idx, field: sent[index + idx][field] \\\n if index+idx >= 0 and index + idx < length \\\n else \"<s>\" if index+idx < 0 \\\n else \"</s>\"\n\n ## tokens in a 5 token window x_{i-2}..x_{i+2}\n word_unigram_cur = numify(context(0, WORD))\n word_unigram_pre = numify(context(-1, WORD))\n word_unigram_2pre = numify(context(-2, WORD))\n word_unigram_post = numify(context(1, WORD))\n word_unigram_2post = numify(context(2, WORD))\n\n ## token bigrams in a 5 token window\n word_bigram_pre_cur = \"/\".join([word_unigram_pre, word_unigram_cur])\n word_bigram_cur_post = \"/\".join([word_unigram_cur, word_unigram_post])\n\n ## pos in a 5 token window\n pos_cur = context(0, POS)\n pos_pre = context(-1, POS)\n pos_post = context(1, POS)\n pos_2pre = context(-2, POS)\n pos_2post = context(2, POS)\n\n ## pos bigrams in a 3 token window\n pos_bigram_pre_cur = \"/\".join([pos_pre, pos_cur])\n pos_bigram_cur_post = \"/\".join([pos_cur, pos_post])\n #pre_pre_pos_bigram = \"/\".join([pre_pre_pos, pre_pos])\n #post_post_pos_bigram = \"/\".join([post_pos, post_post_pos])\n\n pos_posw_cur = \"/\".join([word_unigram_cur, pos_cur])\n\n ## Word shape features (5 token window)\n shape_istitle_cur = word_unigram_cur.istitle()\n shape_isdigit_cur = context(0, WORD).isdigit()\n shape_isupper_cur = word_unigram_cur.isupper()\n shape_hyphen_cur = \"-\" in word_unigram_cur[1:-1]\n shape_isalnum_cur = context(0, WORD).isalnum()\n #shape_mixedcase_cur = self.mixedcase.match(context(0, WORD)) != None\n\n shape_istitle_pre = word_unigram_pre.istitle()\n shape_isdigit_pre = context(-1, WORD).isdigit()\n shape_isupper_pre = word_unigram_pre.isupper()\n shape_hyphen_pre = \"-\" in word_unigram_pre[1:-1]\n shape_isalnum_pre = context(-1, WORD).isalnum()\n #shape_mixedcase_pre = self.mixedcase.match(context(-1, WORD)) != None\n\n shape_istitle_2pre = word_unigram_2pre.istitle()\n shape_isdigit_2pre = context(-2, WORD).isdigit()\n shape_isupper_2pre = word_unigram_2pre.isupper()\n shape_hyphen_2pre = \"-\" in word_unigram_2pre[1:-1]\n shape_isalnum_2pre = context(-2, WORD).isalnum()\n #shape_mixedcase_2pre = self.mixedcase.match(context(-2, WORD)) != None\n\n shape_istitle_post = word_unigram_post.istitle()\n shape_isdigit_post = context(1, WORD).isdigit()\n shape_isupper_post = word_unigram_post.isupper()\n shape_hypen_post = \"-\" in word_unigram_post[1:-1]\n shape_isalnum_post = context(1, WORD).isalnum()\n #shape_mixedcase_post = self.mixedcase.match(context(1, WORD)) != None\n\n shape_istitle_2post = word_unigram_2post.istitle()\n shape_isdigit_2post = context(2, WORD).isdigit()\n shape_isupper_2post = word_unigram_2post.isupper()\n shape_hypen_2post = \"-\" in word_unigram_2post[1:-1]\n shape_isalnum_2post = context(2, WORD).isalnum()\n #shape_mixedcase_2post = self.mixedcase.match(context(2, WORD)) != None\n\n ## 2-4 suffixes in a 3 token window\n suffix_1_cur = word_unigram_cur[-1:]\n suffix_2_cur = word_unigram_cur[-2:]\n suffix_3_cur = word_unigram_cur[-3:]\n suffix_4_cur = word_unigram_cur[-4:]\n\n suffix_1_pre = word_unigram_pre[-1:]\n suffix_2_pre = word_unigram_pre[-2:]\n suffix_3_pre = word_unigram_pre[-3:]\n suffix_4_pre = word_unigram_pre[-4:]\n\n suffix_1_post = word_unigram_post[-1:]\n suffix_2_post = word_unigram_post[-2:]\n suffix_3_post = word_unigram_post[-3:]\n suffix_4_post = word_unigram_post[-4:]\n\n ## 3-4 prefixes in a 3 token window\n prefix_3_cur = word_unigram_cur[:3]\n prefix_4_cur = word_unigram_cur[:4]\n\n prefix_3_pre = word_unigram_pre[:3]\n prefix_4_pre = word_unigram_pre[:4]\n\n prefix_3_post = word_unigram_post[:3]\n prefix_4_post = word_unigram_post[:4]\n\n ## Noun phrase in a 3 token window\n syn_np_cur = context(0, NP)\n syn_npw_cur = \"/\".join([syn_np_cur, word_unigram_cur])\n syn_np_pre = context(-1, NP)\n syn_np_post = context(1, NP)\n\n ## Extract features from local scope\n features = locals()\n del features[\"context\"]\n del features[\"sent\"]\n del features[\"index\"]\n del features[\"length\"]\n del features[\"self\"]\n features = features.items()\n\n features.extend(self.brown_extractor(\"brown_%d_cur\", context(0, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_pre\", context(-1, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_2pre\", context(-2, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_post\", context(1, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_2post\", context(2, WORD))) \n\n return features", "def generate_babble_text(self):\n markov_chain_output = []\n for n in range(self.number_of_sentences):\n sentence_length = random.randint(self.min_sentence_length, self.max_sentence_length)\n markov_chain_output.append(self.markov_chain.generate_sentence(sentence_length))\n\n random.shuffle(markov_chain_output)\n\n to_display = ''\n for i in markov_chain_output:\n to_display += i + '\\n'\n\n # Clears any old text in the display, then inserts the newly created text\n self.display.delete('1.0', tk.END)\n self.display.insert('1.0', to_display)", "def trainingModel4wmd(corpus):\n model = Word2Vec(corpus, workers = nCores, size = 100, window = 300,\n min_count = 2, iter = 250)\n # model = Word2Vec(corpus)\n\n # use the following if we want to normalize the vectors\n model.init_sims(replace=True)\n\n return model", "def definition(self, definition):\n result = Words()\n result.words = DefinitionHelper.words_for_definition(definition)\n return result", "def build_data_cv(self, data_folder, cv=10, clean_string=True):\n revs = []\n pos_file = data_folder[0]\n neg_file = data_folder[1]\n vocab = defaultdict(float)\n with open(pos_file, \"rb\") as f:\n for line in f:\n rev = []\n rev.append(line.strip())\n if clean_string:\n orig_rev = self.dc.clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev).lower()\n words = set(orig_rev.split())\n for word in words:\n vocab[word] += 1\n datum = {\"y\":1,\n \"text\": orig_rev,\n \"num_words\": len(orig_rev.split()),\n \"split\": np.random.randint(0,cv)}\n revs.append(datum)\n with open(neg_file, \"rb\") as f:\n for line in f:\n rev = []\n rev.append(line.strip())\n if clean_string:\n orig_rev = self.dc.clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev).lower()\n words = set(orig_rev.split())\n for word in words:\n vocab[word] += 1\n datum = {\"y\":0,\n \"text\": orig_rev,\n \"num_words\": len(orig_rev.split()),\n \"split\": np.random.randint(0,cv)}\n revs.append(datum)\n return revs, vocab", "def make_words(self,lm):\n if \" \" in self.corpus[0] and \" \" in self.corpus[1]: \n print \"assuming BLICK\"\n self.corpus = [convert_to_disc(i) for i in self.corpus]\n else:\n self.disc = 1\n print \"assuming Disc\" \n if not os.path.isfile(self.f): ##check if it already exists\n print \"generating 10 million words\"\n outfile = open(self.f, \"w\")\n outfile.write(\"word,blick,ngram,Real,T,disc\\n\")\n for word in self.corpus:\n write_row_of_bigmatch(word, self.disc, outfile, lm, \"Real\", \"1\")\n while len(self.wordlist)<10000000: \n words = lm.generate(100)\n for word in words:\n if word not in self.wordlist and len(word) < 9: #keep only words less than len9\n write_row_of_bigmatch(word, self.disc, outfile, lm, \"Simulated\", \"0\")\n self.wordlist[word] = 0\n return", "def make_csd(shape, scale, npart, show_plot=False):\r\n if shape == 0:\r\n rads = [scale + 0 * x for x in range(npart)]\r\n else:\r\n rads = lognorm.rvs(shape, scale=scale, size=npart)\r\n with open('diameters.txt', 'w') as fout:\r\n for rad in rads:\r\n fout.write('{0}\\n'.format(rad))\r\n if shape == 0:\r\n xpos = linspace(scale / 2, scale * 2, 100)\r\n else:\r\n xpos = linspace(lognorm.ppf(0.01, shape, scale=scale),\r\n lognorm.ppf(0.99, shape, scale=scale), 100)\r\n plt.plot(xpos, lognorm.pdf(xpos, shape, scale=scale))\r\n plt.hist(rads, normed=True)\r\n plt.savefig('packing_histogram.png')\r\n plt.savefig('packing_histogram.pdf')\r\n if show_plot:\r\n plt.show()", "def construct_embedding(captions, cbow=True):\n\n # List of characters to filter out of the caption string\n chars_to_remove = [\"\\n\", \">\", \"--\"]\n\n for char in chars_to_remove:\n captions = captions.replace(char, \" \")\n\n # Perform some necessary tokenization\n data = [word_tokenize(word.lower()) for word in sent_tokenize(captions)]\n\n # Filter out punctuation from the data\n data_minus_punctuation = remove_punctuation(data)\n\n # Filter out stop words from the data\n data_minus_stop_words = remove_stop_words(data_minus_punctuation)\n\n # Filter out any surviving single character list elements\n fully_formatted_data = [\n [word for word in item if len(word) > 1] for item in data_minus_stop_words\n ]\n\n # Train the Word2Vec model using the specified architecture\n if cbow:\n print(\"UTILIZING CBOW ARCHITECTURE\")\n model = Word2Vec(fully_formatted_data, min_count=1, size=100, window=5)\n else:\n print(\"UTILIZING SKIPGRAM ARCHITECTURE\")\n model = Word2Vec(fully_formatted_data, min_count=1, size=100, window=5, sg=1)\n\n return model", "def test_topic_model_generator_dimensions( ):\n N = 100\n D = 1000\n K = 10\n W = 100\n\n tm = TopicModel.generate( K, D )\n assert( tm.topics.shape == (D, K) )\n assert( tm.weights.shape == (K,) )\n\n docs = tm.sample( N, words = W )\n # Each document is a column\n assert( docs.shape == (N, D) ) \n # Each doc should have 100 words\n assert( sc.all(docs.sum(1) == W) )", "def generate_corpus(model, sample):\r\n \r\n dl_corpus = []\r\n for word in sample:\r\n if word in model:\r\n dl_corpus.append(model[word])\r\n else:\r\n dl_corpus.append([0]*VECTOR_DIM)\r\n\r\n return [dl_corpus]", "def build_vocab(cleaned_captions):\n # QUESTION 1.1\n # Here we Build a vocabulary\n\n # create a vocab instance\n vocab = Vocabulary()\n\n words = dict()\n for caption in cleaned_captions: # iterate through all cleaned_caption\n for word in caption.split(): # iterate over all words in a caption\n # add the token words to vocabulary if and only if the count of word is more than MIN_FREQUENCY i.e. 3\n if word not in words.keys():\n words[word] = 1\n else:\n words[word] += 1\n if words[word] > MIN_FREQUENCY:\n vocab.add_word(word)\n\n vocab.add_word('<pad>')\n vocab.add_word('<start>')\n vocab.add_word('<end>')\n vocab.add_word('<unk>')\n\n print(vocab.idx)\n\n return vocab", "def model(self, doc_list=None):\r\n\r\n # eta => prior for the per-topic word distribution\r\n eta = torch.ones(self.V)\r\n\r\n with pyro.plate(\"topics\", self.K):\r\n\r\n # Beta => per topic word distribution\r\n Beta = pyro.sample(f\"beta\", dist.Dirichlet(eta))\r\n\r\n # alpha => prior for the per-doc topic vector\r\n alpha = torch.ones(self.K) / self.K\r\n\r\n X_List, Theta = [], []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # theta => per-doc topic vector\r\n theta = pyro.sample(f\"theta_{d}\", dist.Dirichlet(alpha))\r\n\r\n doc = None if doc_list is None else doc_list[d]\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]):\r\n\r\n # assign a topic\r\n z_assignment = pyro.sample(\r\n f\"z_assignment_{d}\",\r\n dist.Categorical(theta)\r\n )\r\n\r\n # from that topic vec, select a word\r\n X = pyro.sample(\r\n f\"w_{d}\",\r\n dist.Categorical(Beta[z_assignment]),\r\n obs=doc\r\n )\r\n\r\n X_List.append(X)\r\n Theta.append(theta)\r\n\r\n Theta = torch.stack(Theta)\r\n\r\n return X_List, Beta, Theta", "def generate_wordcloud_from_probabilities_and_words(prob, words, return_image=True, wordcloud_instance=None,\n **wordcloud_kwargs):\n\n if len(prob) != len(words):\n raise ValueError('`prob` and `words` must have the name length')\n if hasattr(prob, 'ndim') and prob.ndim != 1:\n raise ValueError('`prob` must be a 1D array or sequence')\n if hasattr(words, 'ndim') and words.ndim != 1:\n raise ValueError('`words` must be a 1D array or sequence')\n\n weights = dict(zip(words, prob))\n\n return generate_wordcloud_from_weights(weights, return_image=return_image,\n wordcloud_instance=wordcloud_instance, **wordcloud_kwargs)", "def generate(self, seed_text, next_words=20, T=0.9):\n\n index_to_word = {index: word for word, index in self.tokenizer.word_index.items()}\n\n for _ in range(next_words):\n token_list = self.tokenizer.texts_to_sequences([seed_text])[0]\n token_list = pad_sequences([token_list], maxlen=self.max_sequence_len, padding='pre')\n\n probas = self.model.predict(token_list, verbose=0)\n probas = np.array(probas[0][1:])\n probas = probas ** (1.0 / T)\n probas /= np.sum(probas)\n predicted = np.random.choice(range(1,self.total_words), p=probas)\n \n seed_text += \" \" + (index_to_word[predicted] if predicted != 0 else '')\n\n return seed_text", "def random_bbox(self, shape, margin, bbox_shape):\r\n img_height = shape\r\n img_width = shape\r\n height = bbox_shape\r\n width = bbox_shape\r\n ver_margin = margin\r\n hor_margin = margin\r\n maxt = img_height - ver_margin - height\r\n maxl = img_width - hor_margin - width\r\n t = np.random.randint(low = ver_margin, high = maxt)\r\n l = np.random.randint(low = hor_margin, high = maxl)\r\n h = height\r\n w = width\r\n return (t, l, h, w)", "def random_bbox(self, shape, margin, bbox_shape):\r\n img_height = shape\r\n img_width = shape\r\n height = bbox_shape\r\n width = bbox_shape\r\n ver_margin = margin\r\n hor_margin = margin\r\n maxt = img_height - ver_margin - height\r\n maxl = img_width - hor_margin - width\r\n t = np.random.randint(low = ver_margin, high = maxt)\r\n l = np.random.randint(low = hor_margin, high = maxl)\r\n h = height\r\n w = width\r\n return (t, l, h, w)", "def create_vocabs(self):\r\n print('Creating vocabs...')\r\n\r\n # Update surface_char2id\r\n unique_surfaces = set(chain(*[sentence.surface_words for sentence in self.sentences]))\r\n unique_chars = set(chain(*[surface for surface in unique_surfaces]))\r\n for ch in unique_chars:\r\n self.surface_char2id[ch] = len(self.surface_char2id)\r\n\r\n # Update lemma_char2id\r\n unique_lemmas = set(chain(*[sentence.lemmas for sentence in self.sentences]))\r\n unique_chars = set(chain(*[lemma for lemma in unique_lemmas]))\r\n for ch in unique_chars:\r\n self.lemma_char2id[ch] = len(self.lemma_char2id)\r\n\r\n # Update transformation2id\r\n for sentence in self.sentences:\r\n for transformation in sentence.transformations:\r\n for _t in transformation:\r\n if _t not in self.transformation2id:\r\n self.transformation2id[_t] = len(self.transformation2id)\r\n\r\n # Update morph_tag2id\r\n unique_morph_tags = list(chain(*[sentence.morph_tags for sentence in self.sentences]))\r\n unique_tags = set(chain(*[morph_tag for morph_tag in unique_morph_tags]))\r\n for tag in unique_tags:\r\n self.morph_tag2id[tag] = len(self.morph_tag2id)\r\n print('Surface Chars={}, Lemma Chars={}, Transformations={}, tags={}'.format(\r\n len(self.surface_char2id), len(self.lemma_char2id), len(self.transformation2id), len(self.morph_tag2id)\r\n ))", "def build_geometry(self, depth=None, maxcount=20000):\n self.G.init()\n self.word_generator = partial(self.G.traverse, depth=depth, maxcount=maxcount)\n self.get_vertices()\n self.get_edges()\n self.get_faces()\n return self", "def build_train_data(self,data_folder, cv=10, clean_string=False):\n revs = []\n\n vocab = defaultdict(float)\n print data_folder\n with codecs.open( data_folder, 'rb') as fi:\n for line in fi.readlines():\n line = line.decode('utf-8')\n parts = line.split(\"\\n\")[0].split(\"\\t\")\n if len(parts) > 1:\n sent = parts[1]\n rev = []\n rev.append(sent.strip())\n\n if clean_string:\n orig_rev = self.dc.clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev).lower()\n #print orig_rev\n words = set(orig_rev.split())\n for word in words:\n vocab[word.lower()] += 1\n if len(orig_rev.split()) < 50 :\n\n datum = {\"y\":int(parts[0]),\n \"text\": orig_rev,\n \"num_words\": len(orig_rev.split()),\n \"split\": np.random.randint(0,cv)}\n revs.append(datum)\n # else:\n # print orig_rev\n\n\n return revs, vocab", "def create_vocabulary(vocabulary_path, words, max_vocabulary_size, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s with max size %d\" % (vocabulary_path, max_vocabulary_size))\n vocab = {}\n counter = 0\n for w in words:\n counter += 1\n if counter % 10000 == 0:\n print(\" processing word %d = %s\" % (counter, w))\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"w\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")", "def generate_new_word(model: Dict[str, Set[str]]) -> str:\n all_keys = list(dict.keys(model))\n return random.choice(all_keys)" ]
[ "0.61004114", "0.5693294", "0.55114466", "0.5438077", "0.53612614", "0.5311946", "0.52376354", "0.51894677", "0.5161035", "0.5152379", "0.5143327", "0.5127287", "0.51046485", "0.50831926", "0.50809175", "0.5080614", "0.5072338", "0.5021304", "0.50183684", "0.4984602", "0.4969572", "0.49440196", "0.49193308", "0.4914714", "0.49089342", "0.48999873", "0.48745552", "0.4864325", "0.48638293", "0.48628196", "0.4858256", "0.48582435", "0.48566815", "0.48537022", "0.48470083", "0.4842336", "0.48402998", "0.48179993", "0.48127428", "0.47894543", "0.47893265", "0.47797585", "0.47764012", "0.47720045", "0.4766227", "0.47613114", "0.47586912", "0.47573754", "0.47396976", "0.472817", "0.47280297", "0.4728022", "0.47201937", "0.47177866", "0.47123986", "0.47071484", "0.4698746", "0.46918735", "0.46873325", "0.4686019", "0.46832922", "0.46824035", "0.4681813", "0.4680935", "0.4674394", "0.46738964", "0.4671416", "0.46671373", "0.4660146", "0.4658207", "0.46517506", "0.46463352", "0.46456817", "0.46431527", "0.46391335", "0.46357048", "0.46354827", "0.4632805", "0.46302158", "0.46293816", "0.46283782", "0.46273026", "0.46172655", "0.46105552", "0.46042913", "0.46026146", "0.46016905", "0.45895168", "0.458653", "0.45864525", "0.458507", "0.4579809", "0.45790774", "0.45758802", "0.45758802", "0.45736805", "0.45717034", "0.45672423", "0.45604864", "0.455573" ]
0.72299457
0
Add the suffix vowel.
def add_suffix(self, suffix): # Append the suffix vowel to this WordForm. self.segments.append(Segment.new_segment(suffix))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_suffix(word, suffix):\n suffix, sep, rest = suffix.partition(' ')\n expanded = _add_suffix(word, suffix)\n return expanded + sep + rest", "def get_vowel_names():", "def _replace_suffix(self, word, suffix, replacement):\n assert word.endswith(suffix), \"Given word doesn't end with given suffix\"\n if suffix == \"\":\n return word + replacement\n else:\n return word[: -len(suffix)] + replacement", "def translate(self):\n\t\tvowels = \"aeiou\"\n\n\t\tif (self.word[0] not in vowels) and (self.word[1] in vowels):\n\t\t\tnew_word = self.word[1:] + self.word[0] + \"ay\"\n\t\telif self.word[0] in vowels:\n\t\t\tnew_word = self.word + \"way\"\n\t\telse:\n\t\t\tnew_word = self.word[2:] + self.word[:2] + \"ay\"\n\n\t\tprint(new_word)", "def pig_word(self, original):\n word = original.lower()\n if word[0] in \"aeiou\":\n new_word = word + 'ay'\n else:\n new_word = word[1:] + word[0] + 'ay'\n return new_word", "def stem(self, word):\n word = word.lower()\n\n if word in self.stopwords:\n return word\n\n step1_success = False\n\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n rv = self._rv_standard(word, self.__vowels)\n\n # STEP 0: Attached pronoun\n for suffix in self.__step0_suffixes:\n if not (word.endswith(suffix) and rv.endswith(suffix)):\n continue\n\n if (\n rv[: -len(suffix)].endswith(\n (\n \"ando\",\n \"ar\",\n \"er\",\n \"iendo\",\n \"ir\",\n )\n )\n ) or (\n rv[: -len(suffix)].endswith(\"yendo\")\n and word[: -len(suffix)].endswith(\"uyendo\")\n ):\n\n word = self.__replace_accented(word[: -len(suffix)])\n r1 = self.__replace_accented(r1[: -len(suffix)])\n r2 = self.__replace_accented(r2[: -len(suffix)])\n rv = self.__replace_accented(rv[: -len(suffix)])\n break\n\n # STEP 1: Standard suffix removal\n for suffix in self.__step1_suffixes:\n if not word.endswith(suffix):\n continue\n\n if suffix == \"amente\" and r1.endswith(suffix):\n step1_success = True\n word = word[:-6]\n r2 = r2[:-6]\n rv = rv[:-6]\n\n if r2.endswith(\"iv\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith((\"os\", \"ic\", \"ad\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith(suffix):\n step1_success = True\n if suffix in (\n \"adora\",\n \"ador\",\n \"acion\",\n \"adoras\",\n \"adores\",\n \"aciones\",\n \"ante\",\n \"antes\",\n \"ancia\",\n \"ancias\",\n ):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif suffix in (\"logia\", \"logias\"):\n word = suffix_replace(word, suffix, \"log\")\n rv = suffix_replace(rv, suffix, \"log\")\n\n elif suffix in (\"ucion\", \"uciones\"):\n word = suffix_replace(word, suffix, \"u\")\n rv = suffix_replace(rv, suffix, \"u\")\n\n elif suffix in (\"encia\", \"encias\"):\n word = suffix_replace(word, suffix, \"ente\")\n rv = suffix_replace(rv, suffix, \"ente\")\n\n elif suffix == \"mente\":\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n if r2.endswith((\"ante\", \"able\", \"ible\")):\n word = word[:-4]\n rv = rv[:-4]\n\n elif suffix in (\"idad\", \"idades\"):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n for pre_suff in (\"abil\", \"ic\", \"iv\"):\n if r2.endswith(pre_suff):\n word = word[: -len(pre_suff)]\n rv = rv[: -len(pre_suff)]\n\n elif suffix in (\"ivo\", \"iva\", \"ivos\", \"ivas\"):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n else:\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n break\n\n # STEP 2a: Verb suffixes beginning 'y'\n if not step1_success:\n for suffix in self.__step2a_suffixes:\n if rv.endswith(suffix) and word[-len(suffix) - 1 : -len(suffix)] == \"u\":\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n break\n\n # STEP 2b: Other verb suffixes\n for suffix in self.__step2b_suffixes:\n if rv.endswith(suffix):\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n if suffix in (\"en\", \"es\", \"eis\", \"emos\"):\n if word.endswith(\"gu\"):\n word = word[:-1]\n\n if rv.endswith(\"gu\"):\n rv = rv[:-1]\n break\n\n # STEP 3: Residual suffix\n for suffix in self.__step3_suffixes:\n if rv.endswith(suffix):\n word = word[: -len(suffix)]\n if suffix in (\"e\", \"\\xE9\"):\n rv = rv[: -len(suffix)]\n\n if word[-2:] == \"gu\" and rv.endswith(\"u\"):\n word = word[:-1]\n break\n\n word = self.__replace_accented(word)\n\n return word", "def create_extended_name(y: str, p: str) -> str:\n final_letter = y[-1]\n if final_letter == \"e\":\n extended_name = y + \"x\" + p\n elif final_letter in [\"a\", \"i\", \"o\", \"u\"]:\n extended_name = y[:-1] + \"ex\" + p\n elif final_letter == \"x\":\n if y[-2] == \"e\":\n extended_name = y + p\n else:\n extended_name = y + \"ex\" + p\n return extended_name", "def _ends_with_vowel(self, letter_group: str) -> bool:\n if len(letter_group) == 0:\n return False\n return self._contains_vowels(letter_group[-1])", "def find_vowels(s):\n \"*** YOUR CODE HERE ***\"", "def add_suffix(name: str, suffix: str):\n return f'{name}_{suffix}'", "def addSuffixes(self, alist):\n for i, (word, filename) in enumerate(alist):\n withsuffix = self._findVideoFile(filename)\n alist[i] = (word, withsuffix)\n return alist", "def upper_vowel(s):\n for k, v in REPLACED_MAP.iteritems():\n s = s.replace(k, v)\n return s", "def last_char_to_vowel(word):\n assert isinstance(word, str)\n # We iterate over characters of the word, because the last might be a\n # punctuation, perhaps.\n for last in reversed(word):\n last = last.lower()\n for ch, prev in ((\"a\", \"a/+£\"),\n (\"e\", \"eébcçdgptvwz&*:.\"),\n (\"o\", \"ohk€å\"),\n (\"ä\", \"äflmnrsx§\"),\n (\"ö\", \"ö\"),\n (\"i\", \"ij%$\"),\n (\"u\", \"uq,\"),\n (\"y\", \"yü\")):\n if last in prev:\n return ch\n return \"e\"", "def suffix():\r\n\r\n return _random.choice(\r\n [\r\n 'Sr.', 'Jr.', 'II', 'III', 'IV', 'V'\r\n ]\r\n )", "def reverse_vowels(s):\n\n phrase = \"\"\n vowels = []\n for letter in s:\n if letter.lower() in \"aeiou\":\n phrase += \"~\"\n vowels.append(letter)\n else: \n phrase += letter\n \n index = 0\n new_phrase = \"\"\n vowels = vowels[-1:-len(vowels)-1:-1]\n \n for letter in phrase:\n\n if letter == \"~\":\n new_phrase += vowels[index]\n index += 1\n else:\n new_phrase += letter\n\n return new_phrase", "def find_vowel(text: str) -> str:\r\n\r\n vowel = text.count('a') + text.count('o') + text.count('u') +\\\r\n text.count('i') + text.count('e') + text.count(\"y\") +\\\r\n text.count('A') + text.count('O') + text.count('U') +\\\r\n text.count('I') + text.count('E') + text.count('Y')\r\n\r\n return(vowel)", "def _suffix(self) -> str:\n return \"\"", "def suffix_replace(original, old, new):\n ...", "def add_suffix(in_image,\n suffix_str):\n bandnames = in_image.bandNames().map(lambda elem: ee.String(elem).toLowerCase().cat('_').cat(suffix_str))\n nb = bandnames.length()\n return in_image.select(ee.List.sequence(0, ee.Number(nb).subtract(1)), bandnames)", "def FindSuffix(self):\n self.numSuffixes = 0\n self.forceStress = 0\n resultslist = []\n for f in self.suffixes.finditer(self.wd):\n resultslist.append((f.group(), f.start()))\n if not resultslist: return\n # make sure *end* of word is in list! otherwise, 'DESP erate'\n if resultslist[-1][1] + len(resultslist[-1][0]) < len(self.wd):\n return\n resultslist.reverse()\n for res in resultslist:\n # if no vowel left before, false suffix ('singing')\n # n.b.: will choke on 'quest' etc! put in dictionary, I guess\n if not sre.search('[aeiouy]', self.wd[:res[1]]): break\n if res[0] == 'ing' and self.wd[res[1]-1] == self.wd[res[1]-2]:\n self.sylBounds.append(res[1] - 1) # freq special case\n else: self.sylBounds.append(res[1]) # sorted later\n self.wd = self.wd[:res[1]]\n self.numSuffixes += 1\n if res[0] in STRESSSUFFIX:\n self.forceStress = 0 - len(self.sylBounds)\n if res[0] in MULTISUFFIX:\n # tricky bit! it *happens* that secondary division in all these\n # comes after its first character; NOT inevitable!\n # also does not allow for 3-syl: 'ically' (which are reliable!)\n self.sylBounds.append(res[1]+1)\n self.numSuffixes += 1", "def generate_vowel():\n return random.sample(['a', 'e', 'i', 'o', 'u', 'y'], 1)", "def gerundify(verb):\n if verb.endswith(\"e\"):\n verb = verb[:-1]\n\n if random() < 0.4:\n if (\n not verb.startswith(\"a\")\n and not verb.startswith(\"e\")\n and not verb.startswith(\"i\")\n and not verb.startswith(\"o\")\n and not verb.startswith(\"u\")\n ):\n verb = \"a-\" + verb\n\n return verb + \"ing\"", "def apply_sinalefa(self):\n syllables_sinalefa = []\n index = 0\n while index < len(self.word_syllables):\n try:\n # checking if there is sinalefa\n if self.are_vowels(syllables_sinalefa[-1][-1], self.word_syllables[index][0]):\n merged_syllables = ''.join([syllables_sinalefa[-1], self.word_syllables[index]])\n # replacing the last syllable with the merged syllable\n syllables_sinalefa.pop(-1)\n syllables_sinalefa.append(merged_syllables)\n else:\n syllables_sinalefa.append(self.word_syllables[index])\n except IndexError:\n # we reached the last word\n syllables_sinalefa.append(self.word_syllables[index])\n finally:\n index += 1\n\n return '-'.join(syllables_sinalefa)", "def is_suffix(v,s):\n c = len(v)-1\n n = len(s)\n return c + v[c] == 2*n", "def removesuffix(self, x) -> String:\n pass", "def vowel_with_for(character):\r\n\tif character in vowels:\r\n\t\tprint(\"Entered character is vowel..!\")\r\n\telse:\r\n\t\tprint(\"Not a Vowel\")", "def step1c(self, word):\r\n\r\n if word.endswith('y'):\r\n result = word.rfind('y')\r\n base = word[:result]\r\n if self.containsVowel(base):\r\n word = base\r\n word += 'i'\r\n return word", "def is_suffix(suffix: str, word: str):\n return word.endswith(suffix)", "def stem(self, word):\n word = word.lower()\n\n if word in self.__special_words:\n return self.__special_words[word]\n\n # Map the different apostrophe characters to a single consistent one\n word = (word.replace(u(\"\\u2019\"), u(\"\\x27\"))\n .replace(u(\"\\u2018\"), u(\"\\x27\"))\n .replace(u(\"\\u201B\"), u(\"\\x27\")))\n\n if word.startswith(u(\"\\x27\")):\n word = word[1:]\n\n if word.startswith(\"y\"):\n word = \"\".join((\"Y\", word[1:]))\n\n for i in range(1, len(word)):\n if word[i - 1] in self.__vowels and word[i] == \"y\":\n word = \"\".join((word[:i], \"Y\", word[i + 1:]))\n\n step1a_vowel_found = False\n step1b_vowel_found = False\n\n r1 = \"\"\n r2 = \"\"\n\n if word.startswith((\"gener\", \"commun\", \"arsen\")):\n if word.startswith((\"gener\", \"arsen\")):\n r1 = word[5:]\n else:\n r1 = word[6:]\n\n for i in range(1, len(r1)):\n if r1[i] not in self.__vowels and r1[i - 1] in self.__vowels:\n r2 = r1[i + 1:]\n break\n else:\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n\n # STEP 0\n for suffix in self.__step0_suffixes:\n if word.endswith(suffix):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n break\n\n # STEP 1a\n for suffix in self.__step1a_suffixes:\n if word.endswith(suffix):\n\n if suffix == \"sses\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"ied\", \"ies\"):\n if len(word[:-len(suffix)]) > 1:\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n else:\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif suffix == \"s\":\n for letter in word[:-2]:\n if letter in self.__vowels:\n step1a_vowel_found = True\n break\n\n if step1a_vowel_found:\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n break\n\n # STEP 1b\n for suffix in self.__step1b_suffixes:\n if word.endswith(suffix):\n if suffix in (\"eed\", \"eedly\"):\n\n if r1.endswith(suffix):\n word = \"\".join((word[:-len(suffix)], \"ee\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ee\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ee\"))\n else:\n r2 = \"\"\n else:\n for letter in word[:-len(suffix)]:\n if letter in self.__vowels:\n step1b_vowel_found = True\n break\n\n if step1b_vowel_found:\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n\n if word.endswith((\"at\", \"bl\", \"iz\")):\n word = \"\".join((word, \"e\"))\n r1 = \"\".join((r1, \"e\"))\n\n if len(word) > 5 or len(r1) >= 3:\n r2 = \"\".join((r2, \"e\"))\n\n elif word.endswith(self.__double_consonants):\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif ((r1 == \"\" and len(word) >= 3 and\n word[-1] not in self.__vowels and\n word[-1] not in \"wxY\" and\n word[-2] in self.__vowels and\n word[-3] not in self.__vowels)\n or\n (r1 == \"\" and len(word) == 2 and\n word[0] in self.__vowels and\n word[1] not in self.__vowels)):\n\n word = \"\".join((word, \"e\"))\n\n if len(r1) > 0:\n r1 = \"\".join((r1, \"e\"))\n\n if len(r2) > 0:\n r2 = \"\".join((r2, \"e\"))\n break\n\n # STEP 1c\n if (len(word) > 2\n and word[-1] in \"yY\"\n and word[-2] not in self.__vowels):\n word = \"\".join((word[:-1], \"i\"))\n if len(r1) >= 1:\n r1 = \"\".join((r1[:-1], \"i\"))\n else:\n r1 = \"\"\n\n if len(r2) >= 1:\n r2 = \"\".join((r2[:-1], \"i\"))\n else:\n r2 = \"\"\n\n # STEP 2\n for suffix in self.__step2_suffixes:\n if word.endswith(suffix):\n if r1.endswith(suffix):\n if suffix == \"tional\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"enci\", \"anci\", \"abli\"):\n word = \"\".join((word[:-1], \"e\"))\n\n if len(r1) >= 1:\n r1 = \"\".join((r1[:-1], \"e\"))\n else:\n r1 = \"\"\n\n if len(r2) >= 1:\n r2 = \"\".join((r2[:-1], \"e\"))\n else:\n r2 = \"\"\n\n elif suffix == \"entli\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"izer\", \"ization\"):\n word = \"\".join((word[:-len(suffix)], \"ize\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ize\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ize\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"ational\", \"ation\", \"ator\"):\n word = \"\".join((word[:-len(suffix)], \"ate\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ate\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ate\"))\n else:\n r2 = \"e\"\n\n elif suffix in (\"alism\", \"aliti\", \"alli\"):\n word = \"\".join((word[:-len(suffix)], \"al\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"al\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"al\"))\n else:\n r2 = \"\"\n\n elif suffix == \"fulness\":\n word = word[:-4]\n r1 = r1[:-4]\n r2 = r2[:-4]\n\n elif suffix in (\"ousli\", \"ousness\"):\n word = \"\".join((word[:-len(suffix)], \"ous\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ous\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ous\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"iveness\", \"iviti\"):\n word = \"\".join((word[:-len(suffix)], \"ive\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ive\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ive\"))\n else:\n r2 = \"e\"\n\n elif suffix in (\"biliti\", \"bli\"):\n word = \"\".join((word[:-len(suffix)], \"ble\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ble\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ble\"))\n else:\n r2 = \"\"\n\n elif suffix == \"ogi\" and word[-4] == \"l\":\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif suffix in (\"fulli\", \"lessli\"):\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix == \"li\" and word[-3] in self.__li_ending:\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n break\n\n # STEP 3\n for suffix in self.__step3_suffixes:\n if word.endswith(suffix):\n if r1.endswith(suffix):\n if suffix == \"tional\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix == \"ational\":\n word = \"\".join((word[:-len(suffix)], \"ate\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ate\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ate\"))\n else:\n r2 = \"\"\n\n elif suffix == \"alize\":\n word = word[:-3]\n r1 = r1[:-3]\n r2 = r2[:-3]\n\n elif suffix in (\"icate\", \"iciti\", \"ical\"):\n word = \"\".join((word[:-len(suffix)], \"ic\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ic\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ic\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"ful\", \"ness\"):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n\n elif suffix == \"ative\" and r2.endswith(suffix):\n word = word[:-5]\n r1 = r1[:-5]\n r2 = r2[:-5]\n break\n\n # STEP 4\n for suffix in self.__step4_suffixes:\n if word.endswith(suffix):\n if r2.endswith(suffix):\n if suffix == \"ion\":\n if word[-4] in \"st\":\n word = word[:-3]\n r1 = r1[:-3]\n r2 = r2[:-3]\n else:\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n break\n\n # STEP 5\n if r2.endswith(\"l\") and word[-2] == \"l\":\n word = word[:-1]\n elif r2.endswith(\"e\"):\n word = word[:-1]\n elif r1.endswith(\"e\"):\n if len(word) >= 4 and (word[-2] in self.__vowels or\n word[-2] in \"wxY\" or\n word[-3] not in self.__vowels or\n word[-4] in self.__vowels):\n word = word[:-1]\n\n word = word.replace(\"Y\", \"y\")\n return word", "def is_vowel(self, letter):\n\n if letter in (\"a\", \"e\", \"i\", \"o\", \"u\", \"A\", \"E\", \"I\", \"O\", \"U\"):\n return True\n return False", "def replace_vowels(word):\n variants = []\n for c in word:\n if c in vowels:\n for vowel in vowels:\n variants.append(word.replace(c, vowel))\n return variants", "def form_ing(word):\n\n # last char of the word\n last = word[-1]\n\n if last == 'e':\n return word[:-1] + 'ing'\n elif last == 'r':\n if word[-2] == 'a': \n return word + \"ring\"\n elif last in ['b', 'd', 'g', 'm', 'n', 'p', 't']:\n if _is_vowel(word[-2]) and not (_is_vowel(word[-3])):\n return word + word[-1] + \"ing\"\n\n return word + \"ing\"", "def test_add_filename_suffix(self):\r\n self.assertEqual(add_filename_suffix('/foo/bar/baz.txt', 'z'),\r\n 'bazz.txt')\r\n self.assertEqual(add_filename_suffix('baz.txt', 'z'),\r\n 'bazz.txt')\r\n self.assertEqual(add_filename_suffix('/foo/bar/baz', 'z'),\r\n 'bazz')\r\n self.assertEqual(add_filename_suffix('baz', 'z'),\r\n 'bazz')\r\n self.assertEqual(add_filename_suffix('/baz.fasta.txt', 'z'),\r\n 'baz.fastaz.txt')\r\n self.assertEqual(add_filename_suffix('baz.fasta.txt', 'z'),\r\n 'baz.fastaz.txt')\r\n self.assertEqual(add_filename_suffix('/foo/', 'z'), 'z')", "def __add_filename_suffix(filename, suffix):\n return \"{}{}.pdf\".format(filename.split(\".pdf\", 1)[0], suffix)", "def suffix(self, e, remaining, rate):\n return f' {e + 1}/{self.max} [{remaining} remaining, {round(rate, 2)} iter/s]'", "def remove_vowels(phrase):\n vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n cons_word = \"\".join([char for char in phrase if char not in vowels])\n return cons_word", "def suffix(rem):\n if rem == 0:\n suf = ''\n else:\n if rem <= 600: #Class A suffix -- only letters.\n rem = rem - 1\n suf = base34[rem // 25]\n if rem % 25 > 0:\n suf = suf + base34[rem % 25 - 1]# second class A letter, if present.\n else: #rems > 600 : First digit of suffix is a number. Second digit may be blank, letter, or number.\n rem = rem - 601\n suf = base10[rem // 35]\n if rem % 35 > 0:\n suf = suf + base34[rem % 35 - 1]\n return suf", "def _iendswith(string, suffix):\n return string.lower().endswith(suffix)", "def add_possessive(results, form, poss):\n if not poss:\n return results\n\n # Add possessive suffix\n suffixes = nounspecs.possessive_suffixes[poss]\n if isinstance(suffixes, str):\n suffixes = [suffixes]\n results2 = []\n for suffix in suffixes:\n for v in results:\n parts = list(x for x in v)\n if suffix[0] != \"@\":\n for x in suffix:\n if x == \"A\":\n p = \"\".join(parts)\n m = re.search(\"([aouAOU])[^yäöYÄÖ]*$\", p)\n if m:\n parts.append(\"a\")\n else:\n parts.append(\"ä\")\n else:\n parts.append(x)\n v = \"\".join(parts)\n else:\n if form not in (\n \"ine-sg\", \"ine-pl\", \"ela-sg\", \"ela-pl\",\n \"all-sg\", \"all-pl\", \"ade-sg\", \"ade-pl\",\n \"abl-sg\", \"abl-pl\", \"tra-sg\", \"tra-pl\",\n \"ess-sg\", \"ess-pl\", \"abe-sg\", \"abe-pl\",\n \"ptv-sg\", \"ptv-pl\", \"cmt\",\n \"inf1-long\", \"inf2\", \"inf3\", \"inf4\", \"inf5\"):\n continue\n if len(v) < 2 or v[-1] not in \"aeiouyäö\":\n continue\n if v[-2] == v[-1]:\n continue\n v += v[-1]\n v += suffix[1:]\n if v:\n results2.append(v)\n return results2", "def add_suffix_to_filename(filename, suffix):\n name, ext = os.path.splitext(filename)\n return ''.join([name, suffix, ext])", "def add_edge(self, u, v, weight, pre_start, pre_end, suff_start, suff_end):\n \n self.add_node(u)\n self.add_node(v)\n \n if u not in self.prefix[v] and v not in self.suffix[u]:\n self.edges = self.edges + 1\n \n if u not in self.prefix[v]:\n self.prefix[v][u] = [weight, pre_start, pre_end]\n \n if v not in self.suffix[u]:\n self.suffix[u][v] = [weight, suff_start, suff_end]", "def replace_suffix (name, new_suffix):\n assert isinstance(name, basestring)\n assert isinstance(new_suffix, basestring)\n split = os.path.splitext (name)\n return split [0] + new_suffix", "def add_extra_words():\n\n with open(ES_STOPWORDS_FILE, \"r\", encoding=\"utf-8\") as temp_file:\n for word in temp_file.read().splitlines():\n STOP_WORDS.add(word)\n\n with open(EN_STOPWORDS_FILE, \"r\", encoding=\"utf-8\") as temp_file:\n for word in temp_file.read().splitlines():\n STOP_WORDS.add(word)\n\n extra_words = list()\n\n for word in STOP_WORDS:\n extra_words.append(word.title())\n extra_words.append(word.upper())\n\n for word in extra_words:\n STOP_WORDS.add(word)", "def add_filename_suffix(filepath, suffix):\r\n root, extension = splitext(basename(filepath))\r\n return root + suffix + extension", "def censor_vowels(word):\n\n chars = []\n\n for letter in word:\n if letter in \"aeiou\":\n chars.append(\"*\")\n chars.append(letter)\n\n return \"\".join(vowels)", "def addSuffixLink(self, suffix_link):\n self.suffix_link = suffix_link", "def is_vowel(text):\n return text.lower() in AVRO_VOWELS", "def endswith(a, suffix, start=0, end=None):\n return _vec_string(\n a, bool_, 'endswith', [suffix, start] + _clean_args(end))", "def suffix(string, suffix, sep = '_'):\n if suffix == 'production':\n suffixed = string\n else:\n suffixed = string + sep + suffix\n return suffixed", "def suffix(self, suffix):\n\n self._suffix = suffix", "def suffix(self, suffix):\n\n self._suffix = suffix", "def suffix(self, suffix):\n\n self._suffix = suffix", "def suffix(self, suffix):\n\n self._suffix = suffix", "def verb_lemma(word):\n if word.endswith(\"ed\"):\n if word[:-2].endswith(\"v\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"at\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"it\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"et\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ut\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ac\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"i\"):\n return word[:-3].lower() + \"y\"\n elif word[:-2].endswith(\"ir\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ag\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nc\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nu\"):\n return word[:-2].lower() + \"e\"\n else:\n return word[:-2].lower() \n elif word.endswith(\"ing\"):\n if word[:-3].endswith(\"v\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"at\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"it\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"et\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ut\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ac\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"i\"):\n return word[:-4].lower() + \"y\"\n elif word[:-3].endswith(\"ir\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ag\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nc\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nu\"):\n return word[:-3].lower() + \"e\"\n else:\n return word[:-3].lower()\n elif re.match(r\"(does|did|done)\", word):\n return (\"do\")\n elif re.match(r\"(is|are|am|was|will|were|been)\", word):\n return (\"be\")\n elif word == (\"'s\"):\n return (\"be\")\n elif re.match(r\"(had|has|'ve)\", word):\n return (\"have\")\n else:\n return word.lower()", "def number_of_vowels(find_vowels):\n v = [\"a\", \"e\", \"i\", \"o\", \"u\", \"y\"]\n new_vowels = find_vowels.lower()\n vowels = \"\"\n for x in new_vowels:\n if x in v:\n vowels += x\n return len(vowels)", "def test_suffix():\n transformer = hug.transform.suffix({\".js\": int, \".txt\": str})\n\n class FakeRequest(object):\n path = \"hey.js\"\n\n request = FakeRequest()\n assert transformer(\"1\", request) == 1\n\n request.path = \"hey.txt\"\n assert transformer(2, request) == \"2\"\n\n request.path = \"hey.undefined\"\n transformer({\"data\": \"value\"}, request) == {\"data\": \"value\"}", "def setSuffix(self, value):\n return self._set(suffix=value)", "def reverse_vowels(s):\n\n vowels = {'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'}\n vowel_indices = { i if s[i] in vowels else None for i in range(len(s)) }\n vowel_indices = tuple(filter(lambda x: x != None, vowel_indices))\n \n new_str = \"\"\n for i in range(len(s)):\n if i in vowel_indices:\n new_str += s[vowel_indices[-vowel_indices.index(i) - 1]]\n else:\n new_str += s[i]\n return new_str", "def censor_vowels(word):\n\n chars = []\n\n for letter in word:\n if letter in \"aeiou\":\n chars.append(\"*\")\n else:\n chars.append(letter)\n \n return \"\".join(chars)", "def suffix ( self ) :\n return self.__suffix", "def suffix ( self ) :\n return self.__suffix", "def get_letter(self, vowel_need):\r\n\r\n return self.letters.get(vowel_need, self.vowels)", "def __truediv__(self, suffix: str) -> FSSpecTarget:\n return replace(self, root_path=os.path.join(self.root_path, suffix))", "def get_suffix(cls, raw_disable: RawDisable) -> str:\n variations = raw_disable.parent_test.variations\n\n maybe_variation_node = raw_disable.node.find(f'.//{cls.VARIATION_TAG}')\n if maybe_variation_node is None:\n return ''\n\n variation = maybe_variation_node.text\n if variation not in variations:\n raise DisableNodeProcessingException(f'could not find {variation!r} in defined variations; skipping node')\n\n idx = variations.index(variation)\n suffix = f'_{idx}'\n return suffix", "def is_english_vowel(c):\n # y was included in the vowel set guided by the tests.\n return c in 'aeiouyAEIOUY'", "def enc_suffix(suf):\n if len(suf) == 0:\n return 0\n r0 = base34.find(suf[0])\n if len(suf) == 1:\n r1 = 0\n else:\n r1 = base34.find(suf[1]) + 1\n if r0 < 24: # first char is a letter, use base 25\n return r0 * 25 + r1 + 1\n else: # first is a number -- base 35.\n return r0 * 35 + r1 - 239", "def makePigLatin(word): \n m = len(word)\n vowels = \"a\", \"e\", \"i\", \"o\", \"u\", \"y\" \n # short words are not converted \n if m<3 or word==\"the\":\n return word\n else:\n for i in vowels:\n if word.find(i) < m and word.find(i) != -1:\n m = word.find(i)\n if m==0:\n return word+\"way\" \n else:\n return word[m:]+word[:m]+\"ay\"", "def prefix_suffix_modify():\n prefixes = \"JKLMNOPQ\"\n suffix = \"ack\"\n for letter in prefixes:\n if letter == \"O\" or letter == \"Q\":\n print(letter + \"u\" + suffix)\n else:\n print(letter + suffix)", "def SpecialCodes(self):\n if sre.search(r\"[^aeiouy]e\\b\", self.wd): # nonsyllabic final e after C\n if ((not self.isPlural or self.wd[-2] not in SIBILANTS) and\n (not self.isPast or self.wd[-2] not in 'dt')):\n self.wd = self.wd[:-1] + encode(self.wd[-1])\n if not sre.search(r\"[aeiouy]\", self.wd): # any vowel left??\n self.wd = self.wd[:-1] + 'e' # undo the encoding\n self.wd = self.CiVcomb.sub(handleCiV, self.wd)\n self.wd = self.CCpair.sub(handleCC, self.wd)\n self.wd = self.VyVcomb.sub(handleVyV, self.wd)", "def _step1b(self, word):\n # this NLTK-only block extends the original algorithm, so that\n # 'spied'->'spi' but 'died'->'die' etc\n if self.mode == self.NLTK_EXTENSIONS:\n if word.endswith(\"ied\"):\n if len(word) == 4:\n return self._replace_suffix(word, \"ied\", \"ie\")\n else:\n return self._replace_suffix(word, \"ied\", \"i\")\n\n # (m>0) EED -> EE\n if word.endswith(\"eed\"):\n stem = self._replace_suffix(word, \"eed\", \"\")\n if self._measure(stem) > 0:\n return stem + \"ee\"\n else:\n return word\n\n rule_2_or_3_succeeded = False\n\n for suffix in [\"ed\", \"ing\"]:\n if word.endswith(suffix):\n intermediate_stem = self._replace_suffix(word, suffix, \"\")\n if self._contains_vowel(intermediate_stem):\n rule_2_or_3_succeeded = True\n break\n\n if not rule_2_or_3_succeeded:\n return word\n\n return self._apply_rule_list(\n intermediate_stem,\n [\n (\"at\", \"ate\", None), # AT -> ATE\n (\"bl\", \"ble\", None), # BL -> BLE\n (\"iz\", \"ize\", None), # IZ -> IZE\n # (*d and not (*L or *S or *Z))\n # -> single letter\n (\n \"*d\",\n intermediate_stem[-1],\n lambda stem: intermediate_stem[-1] not in (\"l\", \"s\", \"z\"),\n ),\n # (m=1 and *o) -> E\n (\n \"\",\n \"e\",\n lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)),\n ),\n ],\n )", "def has_more_vowels(word):\n\n# If the phrase is over half vowels, it should return True:\n\n # intialize a vowel count variable \n # Loop through the letters of the word:\n # if the letter is in the set of vowels, increment the vowel count\n # if vowel count is greater than length of the word divided by 2, return True\n # else return false\n\n\n vowel_count = 0\n\n for letter in word:\n if letter.lower() in {\"a\", \"e\", \"i\", \"o\", \"u\"}:\n vowel_count += 1\n\n if vowel_count > (len(word) / 2):\n return True\n\n return False", "def vowels(self):\n vas = []\n file = self.read()\n words = re.sub(\"[aeiouAEIOU]\",\" \", file).split(\" \")\n for h_u in words:\n if h_u != \"\":\n vas.append(h_u)\n self.print(vas)\n self.write(vas)\n logging.debug(\"Starting with to\")\n return vas", "def main():\n word = input(\"Give me a word! \\n\\n\")\n vowels = ['a', 'e', 'i', 'o', 'u']\n if word[0].lower() in vowels:\n print(f\"\\n\\nPig latin: {word}way\")\n else:\n print(f\"\\n\\nPig latin: {word[1:]}{word[0]}ay\")", "def wemo_entity_suffix_fixture():\n return \"\"", "def removeVowels(self, S: str) -> str:\n \n vowel_list = ['a', 'e', 'i', 'o', 'u']\n output = \"\"\n \n for letter in S:\n \n if letter not in vowel_list:\n \n output += letter\n \n else:\n continue\n \n return output", "def suffix(self):\n return self[\"suffix\"]", "def suffix(self):\n return self[\"suffix\"]", "def doCombineCurEnd(self, endofword, nrc='', nextvowel=''): # nrc = next root consonant\n if not self.end:\n return\n self.final = PhonStateCAT.getFinal(self.end)\n nasalPhon = ''\n postVowelPhon = ''\n preVowelPhon = ''\n # geminates\n geminates = False\n if self.end.startswith('w'):\n preVowelPhon = 'w'\n self.vowel = self.end[1:2]\n else:\n self.vowel = self.end[:1]\n vowelPhon = self.vowel\n if nrc == self.final and self.final != '':\n geminates = True\n if self.gemminatesStrategy == 'len' or self.gemminatesStrategy == 'lentone':\n postVowelPhon = 'ː'\n ## Suffix\n finalPhon = ''\n if self.final == 'ng':\n nasalPhon = self.nasalchar # ?\n if geminates:\n pass\n elif self.final in PhonStateCAT.simpleFinalMapping:\n finalPhon = PhonStateCAT.simpleFinalMapping[self.final]\n elif self.final == '':\n if self.latent != '' and self.prefixStrategy != 'never' and (self.prefixSyllable == 'afterEmptyCoda' or self.prefixSyllable == 'afterEmptyCoda+'):\n finalPhon = PhonStateCAT.simpleLatentMapping[self.latent]\n finalPhon = ''\n else:\n print(\"unrecognized final: \"+self.final)\n self.phon += preVowelPhon+vowelPhon+nasalPhon+postVowelPhon+finalPhon\n if not endofword:\n self.phon += self.syllablesepchar", "def new_end(self, end): \n self.last_words.append(end)", "def endswith(self, suffix, start=0, end=None):\n return endswith(self, suffix, start, end)", "def get_elb_name ( base_name, app_name ) :\n max_len = 32\n name = base_name + '-' + app_name.upper( ) + '-LB'\n if len( name ) > max_len :\n name = base_name + '-' + app_name.upper( )\n if len( name ) > max_len :\n raise NameError( 'ELB Name ' + name + ' exceeds limit of ' + str( max_len ) )\n\n return name", "def gibber(self): \n for x in self.consonants:\n if (x in self.sentence):\n \t self.sentence = self.sentence.replace(x, x+'o'+unicode(x).lower())", "def case_suffix(self, **case_kws):\n if callable(self.output_suffix):\n return self.output_suffix(**case_kws)\n else:\n return self.output_suffix.format(**case_kws)", "def disemvowel(string):\n to_return = ''\n for char in string:\n if char not in 'aeiouAEIOU':\n to_return += char\n return to_return", "def pluralize(word):\n\n assert word\n assert isinstance(word, basestring)\n assert len(word) > 0\n\n second_last = word[-2]\n last = word[-1]\n if last in ['s', 'z','x']:\n return word + \"es\"\n elif last == 'h':\n if second_last in ['s', 'c']:\n return word + \"es\"\n else:\n return word + 's'\n elif last == 'o':\n if not _is_vowel(second_last):\n return word + \"es\"\n else: \n return word + 's'\n elif last == 'y':\n if not _is_vowel(second_last):\n return word[:-1] + \"ies\"\n else:\n return word + 's'\n else:\n return word + 's'", "def needs_aou(word):\n return re.search(\"([aouAOU])[^yäöYÄÖ]*$\", word)", "def find_words_using_all_vowels():\n pass", "def stem(self, word):\n word = word.lower()\n\n step1_success = False\n\n # All acute accents are replaced by grave accents.\n word = (word.replace(u(\"\\xE1\"), u(\"\\xE0\"))\n .replace(u(\"\\xE9\"), u(\"\\xE8\"))\n .replace(u(\"\\xED\"), u(\"\\xEC\"))\n .replace(u(\"\\xF3\"), u(\"\\xF2\"))\n .replace(u(\"\\xFA\"), u(\"\\xF9\")))\n\n # Every occurrence of 'u' after 'q'\n # is put into upper case.\n for i in range(1, len(word)):\n if word[i - 1] == \"q\" and word[i] == \"u\":\n word = \"\".join((word[:i], \"U\", word[i + 1:]))\n\n # Every occurrence of 'u' and 'i'\n # between vowels is put into upper case.\n for i in range(1, len(word) - 1):\n if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels:\n if word[i] == \"u\":\n word = \"\".join((word[:i], \"U\", word[i + 1:]))\n elif word[i] == \"i\":\n word = \"\".join((word[:i], \"I\", word[i + 1:]))\n\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n rv = self._rv_standard(word, self.__vowels)\n\n # STEP 0: Attached pronoun\n for suffix in self.__step0_suffixes:\n if rv.endswith(suffix):\n if rv[-len(suffix) - 4:-len(suffix)] in (\"ando\", \"endo\"):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n rv = rv[:-len(suffix)]\n\n elif (rv[-len(suffix) - 2:-len(suffix)] in\n (\"ar\", \"er\", \"ir\")):\n word = \"\".join((word[:-len(suffix)], \"e\"))\n r1 = \"\".join((r1[:-len(suffix)], \"e\"))\n r2 = \"\".join((r2[:-len(suffix)], \"e\"))\n rv = \"\".join((rv[:-len(suffix)], \"e\"))\n break\n\n # STEP 1: Standard suffix removal\n for suffix in self.__step1_suffixes:\n if word.endswith(suffix):\n if suffix == \"amente\" and r1.endswith(suffix):\n step1_success = True\n word = word[:-6]\n r2 = r2[:-6]\n rv = rv[:-6]\n\n if r2.endswith(\"iv\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith((\"os\", \"ic\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2 .endswith(\"abil\"):\n word = word[:-4]\n rv = rv[:-4]\n\n elif (suffix in (\"amento\", \"amenti\",\n \"imento\", \"imenti\") and\n rv.endswith(suffix)):\n step1_success = True\n word = word[:-6]\n rv = rv[:-6]\n\n elif r2.endswith(suffix):\n step1_success = True\n if suffix in (\"azione\", \"azioni\", \"atore\", \"atori\"):\n word = word[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n rv = rv[:-len(suffix)]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif suffix in (\"logia\", \"logie\"):\n word = word[:-2]\n rv = word[:-2]\n\n elif suffix in (\"uzione\", \"uzioni\",\n \"usione\", \"usioni\"):\n word = word[:-5]\n rv = rv[:-5]\n\n elif suffix in (\"enza\", \"enze\"):\n word = \"\".join((word[:-2], \"te\"))\n rv = \"\".join((rv[:-2], \"te\"))\n\n elif suffix == u(\"it\\xE0\"):\n word = word[:-3]\n r2 = r2[:-3]\n rv = rv[:-3]\n\n if r2.endswith((\"ic\", \"iv\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith(\"abil\"):\n word = word[:-4]\n rv = rv[:-4]\n\n elif suffix in (\"ivo\", \"ivi\", \"iva\", \"ive\"):\n word = word[:-3]\n r2 = r2[:-3]\n rv = rv[:-3]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n else:\n word = word[:-len(suffix)]\n rv = rv[:-len(suffix)]\n break\n\n # STEP 2: Verb suffixes\n if not step1_success:\n for suffix in self.__step2_suffixes:\n if rv.endswith(suffix):\n word = word[:-len(suffix)]\n rv = rv[:-len(suffix)]\n break\n\n # STEP 3a\n if rv.endswith((\"a\", \"e\", \"i\", \"o\", u(\"\\xE0\"), u(\"\\xE8\"),\n u(\"\\xEC\"), u(\"\\xF2\"))):\n word = word[:-1]\n rv = rv[:-1]\n\n if rv.endswith(\"i\"):\n word = word[:-1]\n rv = rv[:-1]\n\n # STEP 3b\n if rv.endswith((\"ch\", \"gh\")):\n word = word[:-1]\n\n word = word.replace(\"I\", \"i\").replace(\"U\", \"u\")\n return word", "def normalize_suffix_0(string, logger_=_LOGGER):\n if re.search(\"_[A-Z]{1,}$\", string):\n return string\n numbers = re.search(\"[0-9]{1,}$\", string)\n if numbers:\n logger.log(\n level=\"warning\",\n message='Suffix of string \"'\n + string\n + '\" should not have a number. Numbers removed from the suffix',\n logger=logger_,\n )\n instance = numbers.group(0)\n string = string[0 : string.find(instance)]\n lower_case = re.search(\"_[a-z]{1,}$\", string)\n if lower_case:\n instance_ = lower_case.group(0)\n string = string[0 : string.find(instance_)] + instance_.upper()\n return string", "def add_word(self, word):\n word = self.map_word(word)\n super(InvariantLanguage, self).add_word(word)", "def inner(word):\n return word + '!!!'", "def pig_latinify(word):\n\n first_letter = word[0]\n\n if first_letter in VOWELS:\n output_word = word + \"yay\"\n else:\n #scan for vowel if word starts with a consonant\n for i in range(len(word)):\n individual_letter = word[i]\n if individual_letter in VOWELS:\n output_word = word[i:] + word[:i] + \"ay\"\n break\n else:\n continue\n\n return output_word", "def pig_latinify(word):\n result = \"\"\n if len(word) > 0 and word.isalpha():\n first = word[0]\n if is_vowel(first): # starts with a vowel\n result = str(word) + \"yay\"\n else: # starts with non-vowel\n cut = position_of_vowel(word) # where to cut the word\n if cut > 0: # \"street\"-->\"eet+str+ay\"\n result = word[cut:] + word[:cut] + \"ay\"\n else: # no vowel found\n result = word + \"ay\"\n else:\n result = 'Only letters allowed!'\n\n return result", "def fry(word):\n\n # looks for a Y or y which will be (captured) followed and ended by an 'ou'\n match_you = re.match('([Yy])ou$', word)\n\n # First group will be the (captured) group so either 'Y' or 'y'\n if match_you:\n return match_you.group(1) + \"'all\"\n\n # looks for anyword ending in 'ing'\n match_ing = re.search('(.+)ing$', word)\n\n # checks if vowel exists before the 'ing'\n if match_ing:\n vowel_check = re.search('[aeiouy]', match_ing.group(1))\n # First group will be the (captured) group so everything before the 'ing'\n if vowel_check:\n return match_ing.group(1) + \"in'\"\n\n return word", "def _rv_standard(self, word, vowels):\n rv = \"\"\n if len(word) >= 2:\n if word[1] not in vowels:\n for i in range(2, len(word)):\n if word[i] in vowels:\n rv = word[i + 1 :]\n break\n\n elif word[0] in vowels and word[1] in vowels:\n for i in range(2, len(word)):\n if word[i] not in vowels:\n rv = word[i + 1 :]\n break\n else:\n rv = word[3:]\n\n return rv", "def add_suffix(self, suffix, axis=1):\n return DataFrameDefault.register(pandas.DataFrame.add_suffix)(\n self, suffix=suffix, axis=axis\n )", "def generate_syllable():\n return generate_vowel() + generate_consonant()", "def indefinite(self):\n return \"an\" if self.short_desc[0] in 'aeiou' else \"a\"", "def _contains_vowel(self, stem):\n for i in range(len(stem)):\n if not self._is_consonant(stem, i):\n return True\n return False", "def replace_end(s, old, new):\n assert s.endswith(old)\n return s[:-len(old)] + new" ]
[ "0.5983021", "0.5954164", "0.59408945", "0.5778313", "0.5686185", "0.5563964", "0.5542913", "0.55272454", "0.54683185", "0.5462567", "0.54531056", "0.5446963", "0.54377186", "0.5427365", "0.53498006", "0.53402376", "0.53384125", "0.5302798", "0.52940315", "0.5291557", "0.52781844", "0.52564484", "0.52411693", "0.52356154", "0.5230086", "0.5162771", "0.51485455", "0.5136728", "0.5126367", "0.5126245", "0.5104269", "0.5102299", "0.5078848", "0.50750136", "0.50700855", "0.50556964", "0.5021987", "0.49928167", "0.4991532", "0.4985778", "0.4983493", "0.49590418", "0.49586567", "0.495339", "0.4941221", "0.4928155", "0.49269378", "0.49256873", "0.49254015", "0.4920181", "0.4920181", "0.4920181", "0.4920181", "0.49141145", "0.49087605", "0.4902713", "0.48902482", "0.48665494", "0.48587644", "0.4858456", "0.4858456", "0.48572665", "0.48561782", "0.48515445", "0.4845795", "0.48441982", "0.48420036", "0.4837805", "0.4836", "0.48341617", "0.48317844", "0.48309174", "0.48286653", "0.48268256", "0.48185936", "0.47931543", "0.47931543", "0.47825304", "0.47690168", "0.4765036", "0.47566208", "0.4739206", "0.47327724", "0.47315034", "0.47208524", "0.4717525", "0.47173312", "0.47143188", "0.47121692", "0.47055966", "0.4692895", "0.46915162", "0.4690172", "0.46891174", "0.46856782", "0.4678084", "0.46711275", "0.46709234", "0.4668408", "0.46626365" ]
0.7781676
0