query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Pack a sequence of individual interventions into one batched intervention. All interventions must be in the same nodes and locations.
def pack_interventions( interventions: Sequence[Intervention], batch_dim: int = 0, non_batch_inputs: Optional[Sequence[str]] = None ) -> Tuple[Dict, Dict, Dict]: base_lists, ivn_lists = defaultdict(list), defaultdict(list) loc_dict = {} multi_loc_nodes = set() batch_size = len(interventions) ivn_is_empty = False for ivn in interventions: for leaf, val in ivn.base._values.items(): base_lists[leaf].append(val) if ivn_is_empty and not ivn.is_empty(): raise RuntimeError(f"Cannot pack empty interventions together with non-empty ones") if ivn.is_empty(): ivn_is_empty = True for node, val in ivn.intervention._values.items(): if not isinstance(val, list): assert node not in multi_loc_nodes ivn_lists[node].append(val) else: # multi-loc interventions if node not in ivn_lists: multi_loc_nodes.add(node) ivn_lists[node] = [[] for _ in range(len(val))] else: assert node in multi_loc_nodes assert len(val) == len(ivn_lists[node]) for i in range(len(val)): ivn_lists[node][i].append(val[i]) for node, loc in ivn.location.items(): if node not in loc_dict: loc_dict[node] = loc else: # if node not in multi_loc_nodes and location.expand_dim(loc, batch_dim) != loc_dict[node]: if node not in multi_loc_nodes and loc != loc_dict[node]: raise RuntimeError(f"Locs are inconsistent in the list of interventions " f"(found both {loc} and {loc_dict[node]} for node {node})") # if node in multi_loc_nodes and not all(location.expand_dim(l, batch_dim) == ll for l, ll in zip(loc, loc_dict[node])): if node in multi_loc_nodes and not all(l == ll for l, ll in zip(loc, loc_dict[node])): raise RuntimeError(f"Locs are inconsistent in the list of " f"multi_node interventions for node {node}!") # make sure base lists have equal length if not all(len(l) == batch_size for l in base_lists.values()): for leaf, vals in base_lists.items(): if len(vals) != batch_size: raise RuntimeError( f"List of values for leaf `{leaf}` has shorter length ({len(vals)}) than batch size ({batch_size})") # make sure intervention values have equal length if not ivn_is_empty: for node, vals in ivn_lists.items(): if node not in multi_loc_nodes: if len(vals) != batch_size: raise RuntimeError( f"List of values for intervention at `{node}` has shorter length ({len(vals)}) than batch size ({batch_size})") else: if not all(len(vals[j]) == batch_size for j in range(len(vals))): raise RuntimeError(f"Lists of values for multi-location intervention have inconsistent length") base_dict = batchify(base_lists, batch_dim, multi_loc_nodes, non_batch_inputs) ivn_dict = batchify(ivn_lists, batch_dim, multi_loc_nodes) if not ivn_is_empty else {} return base_dict, ivn_dict, loc_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collate_fn(self, batch: List[Dict]) -> List[Dict]:\n # package up a list of individual interventions into multiple batched interventions\n # batch may contain interventions on different locations\n high_node_to_minibatches = defaultdict(list)\n for d in batch:\n high_nodes = tuple(sorted(d[\"high_intervention\"].intervention._values.keys()))\n high_node_to_minibatches[high_nodes].append(d)\n\n minibatches = []\n for minibatch_dicts in high_node_to_minibatches.values():\n low_base_dict, low_ivn_dict, low_loc_dict = pack_interventions(\n [d[\"low_intervention\"] for d in minibatch_dicts],\n batch_dim=self.batch_dim,\n non_batch_inputs=self.low_non_batch_leaves\n )\n low_base_input = GraphInput(\n low_base_dict, batched=True, batch_dim=self.batch_dim,\n cache_results=self.cache_base_results,\n key_leaves=self.low_key_leaves,\n non_batch_leaves=self.low_non_batch_leaves\n )\n low_realizations = [d[\"low_intervention\"].realization for d in minibatch_dicts]\n if all(rzn is None for rzn in low_realizations):\n low_realizations = None\n low_ivn = Intervention.batched(\n low_base_input, low_ivn_dict, low_loc_dict,\n batch_dim=self.batch_dim, cache_base_results=self.cache_interv_results,\n realization=low_realizations\n )\n\n high_base_dict, high_ivn_dict, high_loc_dict = pack_interventions(\n [d[\"high_intervention\"] for d in minibatch_dicts],\n batch_dim=self.batch_dim,\n non_batch_inputs=self.high_non_batch_leaves\n )\n high_base_input = GraphInput(\n high_base_dict, batched=True, batch_dim=self.batch_dim,\n cache_results=self.cache_base_results,\n key_leaves=self.high_key_leaves,\n non_batch_leaves=self.high_non_batch_leaves\n )\n high_ivn = Intervention.batched(\n high_base_input, high_ivn_dict, high_loc_dict,\n batch_dim=self.batch_dim, cache_base_results=self.cache_interv_results)\n\n minibatches.append({\"low_intervention\": low_ivn,\n \"high_intervention\": high_ivn})\n\n return minibatches", "def _batchify(batch):\n im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9 = zip(*batch)\n im0 = nd.stack(*im0)\n im1 = nd.stack(*im1)\n im2 = nd.stack(*im2)\n im3 = nd.stack(*im3)\n im4 = nd.stack(*im4)\n im5 = nd.stack(*im5)\n im6 = nd.stack(*im6)\n im7 = nd.stack(*im7)\n im8 = nd.stack(*im8)\n im9 = nd.stack(*im9)\n return im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9", "def transform(self, inputs: list, stage: str) -> datapack.DataPack:", "def split_sequence(image_name, output_name):\n nim = nib.load(image_name)\n T = nim.header['dim'][4]\n affine = nim.affine\n image = nim.get_data()\n\n for t in range(T):\n image_fr = image[:, :, :, t]\n nim2 = nib.Nifti1Image(image_fr, affine)\n nib.save(nim2, '{0}{1:02d}.nii.gz'.format(output_name, t))", "def _insertAllSteps(self): \n self.uMics = self.inputCoordinatesTiltedPairs.get().getUntilted().getMicrographs()\n self.tMics = self.inputCoordinatesTiltedPairs.get().getTilted().getMicrographs()\n\n self.inputMics = self._createSetOfParticles('auxMics')\n self.inputMics.copyInfo(self.uMics)\n self.inputMics.setStore(False)\n \n for micU, micT in izip(self.uMics, self.tMics):\n micU.cleanObjId()\n micT.cleanObjId()\n self.inputMics.append(micU)\n self.inputMics.append(micT)\n\n self.samplingInput = self.uMics.getSamplingRate()\n \n\n if self.downsampleType.get() != OTHER:\n # If 'same as picking' or 'original' get sampling rate from input micrographs\n #TODO: Review this when downsampling before picking is possible\n self.samplingFinal = self.samplingInput\n else:\n # If 'other' multiply the input sampling rate by the factor provided\n self.samplingFinal = self.samplingInput*self.downFactor.get()\n \n # Write pos files for each micrograph\n firstStepId = self._insertFunctionStep('writePosFilesStep')\n \n # For each micrograph insert the steps\n #run in parallel\n \n deps = []\n for mic in self.inputMics:\n localDeps = [firstStepId]\n micrographToExtract = mic.getFileName()\n micName = removeBaseExt(mic.getFileName())\n micId = mic.getObjId()\n\n # If downsample type is 'other' perform a downsample\n if self.downsampleType == OTHER:\n fnDownsampled = self._getTmpPath(micName+\"_downsampled.xmp\")\n downFactor = self.downFactor.get()\n args = \"-i %(micrographToExtract)s -o %(fnDownsampled)s --step %(downFactor)f --method fourier\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_downsample\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnDownsampled\n \n # If remove dust \n if self.doRemoveDust:\n fnNoDust = self._getTmpPath(micName+\"_noDust.xmp\")\n \n thresholdDust = self.thresholdDust.get() #TODO: remove this extra variable\n args=\" -i %(micrographToExtract)s -o %(fnNoDust)s --bad_pixels outliers %(thresholdDust)f\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_filter\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnNoDust\n \n #self._insertFunctionStep('getCTF', micId, micName, micrographToExtract)\n micName = removeBaseExt(mic.getFileName())\n \n # Actually extract\n deps.append(self._insertFunctionStep('extractParticlesStep', micId, micName, \n None, micrographToExtract, prerequisites=localDeps))\n # TODO: Delete temporary files\n \n # Insert step to create output objects \n self._insertFunctionStep('createOutputStep', prerequisites=deps)", "def _insertAllSteps(self):\n \n # Get pointer to input micrographs \n self.particlePickingRun = self.xmippParticlePicking.get()\n \n copyId = self._insertFunctionStep('copyInputFilesStep')\n # Get micrographs to pick\n #self.inputMicrographs.set(self.getInputMicrographs())\n \n deps = []\n for mic in self.getInputMicrographs():\n stepId = self._insertFunctionStep('autopickMicrographStep', mic.getFileName(), prerequisites=[copyId])\n deps.append(stepId)\n \n self._insertFunctionStep('_createOutput',self._getExtraPath(), prerequisites=deps)", "def emit_pack_instruction(self, *, loop_indices=None):", "def auto_pack(self, iterable):\r\n pack_type = self.count_packer.pack\r\n\r\n rle_encoded = self.compressed_pack(iterable)\r\n normal_encoded = self.uncompressed_pack(iterable)\r\n\r\n if len(rle_encoded) < len(normal_encoded):\r\n compression_type = IterableCompressionType.compress\r\n data = pack_type(compression_type) + rle_encoded\r\n\r\n # If they are equal, non rle is faster to rebuild\r\n else:\r\n compression_type = IterableCompressionType.no_compress\r\n data = pack_type(compression_type) + normal_encoded\r\n\r\n return data", "def conRFMixAndMaskToBeagle(indfile_name, rephasedhaps_pref, em_iters, win_size, chroms):\n\t### First get individual information\n\twindow_id = 0\n\tem_iter = em_iters\n\tindfile = open(indfile_name, \"r\")\t\n\tinds = []\n\tfor line in indfile:\n\t\tsplits = line.strip(\"\\r\\n\").split()\n\t\tinds.append(splits[1] + \"_A\")\n\t\tinds.append(splits[1] + \"_B\")\n\n\tallloci = []\n\toutfilename = rephasedhaps_pref + \"_w\" + str(win_size) + \".beagle\"\n\toutfile = open(outfilename, \"w\")\n\toutfile.write(\"I\\tid\\t\" + \"\\t\".join(inds) + \"\\n\")\n\t## Write genotype data out to file\n\n\tvitout = open(rephasedhaps_pref + \".vit\", \"w\")\n\twinout = open(rephasedhaps_pref + \".windows\", \"w\")\n\tfbkout = rephasedhaps_pref + \".fbk\"\n\tif os.path.exists(fbkout):\n\t\tos.remove(fbkout)\n\tvitlist = []\n\tfor chrom in chroms:\n\t\tprint chrom\n\t\tshapeitfilename = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.allelesRephased\" + str(em_iters) + \".txt\"\n\t\tshapeitfile = open(shapeitfilename, \"rb\")\n\t\tfbkin_name = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.\" + str(em_iters) + \".ForwardBackward.txt\"\n\t\tos.system('cat ' + fbkin_name + \" >> \" + fbkout) # Concatenate files together\n\t\tmarkerin = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.amaps\"\n\t\tmarkerfile = open(markerin, \"r\")\n\t\tloci=[]\n\t\talleles = {}\n\t\tfor mline in markerfile:\n\t\t\tmsplit = mline.strip().split()\n\t\t\tloci.append(msplit[1])\n\t\t\talleles[msplit[1]] = [msplit[3], msplit[4] ]\n\n\t\tallloci.extend(loci)\n\t\tfor j,line in enumerate(shapeitfile):\n\t\t\tsline = line.strip(\"\\r\\n\")\n\t\t\tzero, ones = alleles[loci[j]]\n\t\t\tfixed = [ recodeAllele(k, zero, ones) for k in sline ]\n\t\t\toutfile.write(\"M\\t\" + loci[j] + \"\\t\" + \"\\t\".join(fixed) + \"\\n\")\n\t\tvitfile = open(rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.\" + str(em_iters) + \".Viterbi.txt\", \"r\")\n\t\tvitlist.extend([x.strip().split() for x in vitfile])\n\t\tshapeitfile.close()\n\t\tvitfile.close()\n\t\t\n\t# This will transpose the whole Viterbi file\n\t# Yikes this may take a lot of memory\n\tfor i,x in enumerate(zip(*vitlist)):\n\t\tvitout.write(inds[i] + \"\\t\")\n\t\tfor y in x:\n\t\t\tvitout.write(y+\"\\t\")\n\t\tvitout.write(\"\\n\")\n\t\t### This doesn't quite work yet so make sure to fix it next time\n\tfor l in allloci:\n\t\twinout.write(\"window\" + str(window_id) + \"\\t\" + l + \"\\n\")\n\t\twindow_id += 1\n\treturn([outfile.name, vitout.name, winout.name, fbkout])", "def add_batch(batch_index, pCS, orphans, fasta_d, cpus, dun_use_partial):\n cur_file = \"batch{0}.fasta\".format(batch_index)\n seqids = set([r.id for r in SeqIO.parse(open(cur_file), 'fasta')])\n o = ar.run_minimap(cur_file, \"seed{0}.S.fasta\".format(batch_index), cpus=cpus)\n print >> sys.stderr, \"processing\", o\n pCS, remains = sp.process_align_to_pCS(o, seqids, pCS, MiniReader, dun_use_partial=dun_use_partial)\n print >> sys.stderr, \"pCS: {0}, tucked: {1}, orphans: {2}, remains: {3}\".format( \\\n len(pCS.S), sum(v == 'T' for v in pCS.seq_stat.itervalues()), len(orphans), len(remains))\n # write batch<i>.remains.fasta\n cur_file = \"batch{0}.remains.fasta\".format(batch_index)\n FileIO.write_seqids_to_fasta(remains, cur_file, fasta_d)\n o = ar.run_minimap(cur_file, \"seed{0}.orphans.fasta\".format(batch_index), cpus=cpus)\n print >> sys.stderr, \"processing\", o\n pCS, orphans, remains = sp.process_align_to_orphan(o, remains, orphans, pCS, MiniReader, dun_use_partial=dun_use_partial)\n print >> sys.stderr, \"pCS: {0}, tucked: {1}, orphans: {2}, remains: {3}\".format( \\\n len(pCS.S), sum(v == 'T' for v in pCS.seq_stat.itervalues()), len(orphans), len(remains))\n # write batch<i>.remains2.fasta and self align\n cur_file = \"batch{0}.remains2.fasta\".format(batch_index)\n FileIO.write_seqids_to_fasta(remains, cur_file, fasta_d)\n o = ar.run_minimap(cur_file, cur_file, cpus=cpus)\n print >> sys.stderr, \"processing\", o\n pCS, remains = sp.process_self_align_into_seed(o, remains, MiniReader, pCS, dun_use_partial=dun_use_partial)\n print >> sys.stderr, \"pCS: {0}, tucked: {1}, orphans: {2}, remains: {3}\".format( \\\n len(pCS.S), sum(v == 'T' for v in pCS.seq_stat.itervalues()), len(orphans), len(remains))\n # combine remains+orphans to new orphans\n orphans = orphans.union(remains)\n FileIO.write_preClusterSet_to_fasta(pCS, \"seed{0}.S.fasta\".format(batch_index+1), fasta_d)\n FileIO.write_seqids_to_fasta(orphans, \"seed{0}.orphans.fasta\".format(batch_index+1), fasta_d)\n\n return pCS, orphans", "def make_sequence(image_names, dt, output_name):\n nim = nib.load(image_names[0])\n affine = nim.affine\n X, Y, Z = nim.header['dim'][1:4]\n T = len(image_names)\n image = np.zeros((X, Y, Z, T))\n\n for t in range(T):\n image[:, :, :, t] = nib.load(image_names[t]).get_data()\n\n nim2 = nib.Nifti1Image(image, affine)\n nim2.header['pixdim'][4] = dt\n nib.save(nim2, output_name)", "def transmogrify_inputs(self, plates):\n ichips, aliquots = [], []\n # A single plate run must be converted to a list of plates\n if isinstance(plates, dict):\n plates = [plates]\n for plate in plates:\n for chip_nr in range(1, 5):\n for well_nr in range(1, 9):\n key = \"chip-{}_well-{}\".format(chip_nr, well_nr)\n if plate.get(key, False):\n brains = find(object_provides=IAliquot.__identifier__,\n Title=plate[key])\n plate[key] = brains[0].UID\n aliquots.append(brains[0].getObject())\n key = \"chip-id-{}\".format(chip_nr)\n if plate.get(key, False):\n brains = find(object_provides=IiChip.__identifier__,\n Title=plate[key])\n plate[key] = brains[0].UID\n ichips.append(brains[0].getObject())\n\n return plates, ichips, aliquots", "def collate(self, batch):\n \n images = []\n indices = []\n roi_size = 5 if self.Train else 4\n rois = torch.zeros((len(batch), 20, roi_size), dtype=torch.float32)\n rois = rois.to(batch[0][1].device)\n \n for _b in range(len(batch)):\n # Accumulate patches:\n images.append(batch[_b][0].to(torch.float32))\n indices.append(batch[_b][2])\n \n # Accumulate ROI:\n \"\"\"\n image_num = torch.Tensor([_b]).expand(batch[_b][1].size(0))\n image_num = image_num.type(batch[_b][1].dtype).view(-1,1)\n image_num = image_num.to(batch[_b][1].device)\n _roi = torch.cat([image_num, batch[_b][1]], dim=1)\n rois = torch.cat([rois, _roi], dim=0)\n \"\"\"\n num_boxes = batch[_b][1].size(0)\n rois[_b,:num_boxes,:] = batch[_b][1]\n \n \n # Stack outputs and return\n batch = [torch.stack(images, dim=0), rois, torch.Tensor(indices)]\n return batch", "def compress_netcfd(folder_path, start_date, out_folder, file_name, num_of_rivids):\n\n # Based on 15 day forecast\n forecast_day_indices = np.array([0, 8, 16, 24, 32, 40, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84], dtype=np.int8)\n\n # Based on 10 day forecast\n # Excluding the first day because we already have initialization from the normal forecasts\n high_res_forecast_day_indices = np.array([24, 48, 72, 92, 100, 108, 112, 116, 120, 124])\n\n start_datetime = to_datetime(start_date, infer_datetime_format=True)\n dates = date_range(start_datetime + DateOffset(1), periods=15)\n high_res_dates = date_range(start_datetime + DateOffset(1), periods=10)\n\n # Ensemble Dimensions\n # 1) Rivid\n # 2) Number of forecast days (i.e. 15 in a 15 day forecast)\n # 3) Number of ensembles\n\n ensembles = np.zeros((num_of_rivids, 15, 51), dtype=np.float32)\n initialization = np.zeros((num_of_rivids,), dtype=np.float32)\n\n for forecast_number in range(1, 52):\n file = os.path.join(folder_path, \"{}_{}.nc\".format(file_name, forecast_number))\n\n tmp_dataset = xr.open_dataset(file)\n streamflow = tmp_dataset['Qout'].data\n streamflow = streamflow[:, forecast_day_indices]\n\n if forecast_number == 1:\n initialization[:] = streamflow[:, 0]\n rivids = tmp_dataset['rivid'].data\n lat = tmp_dataset['lat'].data\n lon = tmp_dataset['lon'].data\n z = tmp_dataset['z'].data\n\n ensembles[:, :, forecast_number - 1] = streamflow[:, 1:]\n\n tmp_dataset.close()\n\n # High Res Forecast\n file = os.path.join(folder_path, \"{}_52.nc\".format(file_name))\n\n tmp_dataset = xr.open_dataset(file)\n\n high_res_forecast_data = tmp_dataset[\"Qout\"].data\n high_res_forecast_data = high_res_forecast_data[:, high_res_forecast_day_indices]\n\n tmp_dataset.close()\n\n data_variables = {\n \"Qout\": (['rivid', 'date', 'ensemble_number'], ensembles),\n \"Qout_high_res\": (['rivid', 'date_high_res'], high_res_forecast_data)\n }\n\n coords = {\n 'rivid': rivids,\n 'date': dates,\n 'date_high_res': high_res_dates,\n 'ensemble_number': np.arange(1, 52, dtype=np.uint8),\n 'initialization_values': ('rivid', initialization),\n 'lat': ('rivid', lat),\n 'lon': ('rivid', lon),\n 'z': ('rivid', z),\n 'start_date': start_datetime\n }\n\n xarray_dataset = xr.Dataset(data_variables, coords)\n xarray_dataset.to_netcdf(path=os.path.join(out_folder, '{}.nc'.format(start_date)), format='NETCDF4')", "def intercalateCodons( seq ):\r\n\tseqLen = len(seq)\r\n\tsitesPerCod = seqLen/3\r\n\textraSites = seqLen%3\r\n\tif extraSites == 0:\r\n\t\tcodOne = seq[0:sitesPerCod]\r\n\t\tcodTwo = seq[sitesPerCod:2*sitesPerCod]\r\n\t\tcodThree = seq[2*sitesPerCod:3*sitesPerCod]\r\n\telif extraSites == 1:\r\n\t\tcodOne = seq[0:sitesPerCod+1]\r\n\t\tcodTwo = seq[sitesPerCod+1:(2*sitesPerCod)+1]\r\n\t\tcodThree = seq[(2*sitesPerCod)+1:(3*sitesPerCod)+1]\r\n\telse:\r\n\t\tcodOne = seq[0:sitesPerCod+1]\r\n\t\tcodTwo = seq[sitesPerCod+1:(2*sitesPerCod)+2]\r\n\t\tcodThree = seq[(2*sitesPerCod)+2:(3*sitesPerCod)+2]\r\n\tnewSeq = [x for t in itertools.izip_longest(codOne,codTwo,codThree) for x in t]\r\n\t\"\"\"\r\n\thttp://stackoverflow.com/questions/11125212/interleaving-lists-in-python\r\n\t\"\"\"\r\n\tnewSeq = filter(lambda a: a != None, newSeq)\r\n\t\"\"\"\r\n\thttp://stackoverflow.com/questions/1157106/remove-all-occurrences-of-a-value-from-a-python-list\r\n\t\"\"\"\r\n\treturn newSeq", "def fix_flatten_coders(\n stages, pipeline_context, identity_urn=bundle_processor.IDENTITY_DOFN_URN):\n # type: (Iterable[Stage], TransformContext, str) -> Iterator[Stage]\n pcollections = pipeline_context.components.pcollections\n for stage in stages:\n transform = only_element(stage.transforms)\n if transform.spec.urn == common_urns.primitives.FLATTEN.urn:\n output_pcoll_id = only_element(transform.outputs.values())\n output_coder_id = pcollections[output_pcoll_id].coder_id\n for local_in, pcoll_in in list(transform.inputs.items()):\n if pcollections[pcoll_in].coder_id != output_coder_id:\n # Flatten requires that all its inputs be materialized with the\n # same coder as its output. Add stages to transcode flatten\n # inputs that use different coders.\n transcoded_pcollection = unique_name(\n pcollections,\n transform.unique_name + '/Transcode/' + local_in + '/out')\n transcode_name = unique_name(\n pipeline_context.components.transforms,\n transform.unique_name + '/Transcode/' + local_in)\n yield Stage(\n transcode_name,\n [\n beam_runner_api_pb2.PTransform(\n unique_name=transcode_name,\n inputs={local_in: pcoll_in},\n outputs={'out': transcoded_pcollection},\n spec=beam_runner_api_pb2.FunctionSpec(urn=identity_urn),\n environment_id=transform.environment_id)\n ],\n downstream_side_inputs=frozenset(),\n must_follow=stage.must_follow)\n pcollections[transcoded_pcollection].CopyFrom(pcollections[pcoll_in])\n pcollections[transcoded_pcollection].unique_name = (\n transcoded_pcollection)\n pcollections[transcoded_pcollection].coder_id = output_coder_id\n transform.inputs[local_in] = transcoded_pcollection\n\n yield stage", "def _autoplace(self, nodes):\n for node in nodes:\n node.autoplace()", "def placement_automatic(args):\n clarity_epp.placement.plate.copy_layout(lims, args.process_id)", "def make_shaped_repertoire(RNs):\n # get objective distribution\n bin_edges, obj_dist, volume = objective_distribution()\n # get an antigenic epitope sequence, and in case of nkey=1,2 check whether\n # it can populate all required bins, thus avoiding infinite loop below\n AgEpitope = get_AgEpitope(RNs)\n if cf.nkey == 1 or cf.nkey == 2:\n while 1:\n # get list of all possible binding partners and their energies\n all_partners = get_all_partners()\n all_energies = [E_best(partner, AgEpitope)\n for partner in all_partners]\n # check whether all bins are occupiable with these energies,\n # if not, get new epitope sequence\n indices = np.digitize(all_energies, bin_edges, right=True)\n ind_set = set(indices)\n ind_set.discard(0)\n # if all bins can be occupied, move on\n if ind_set == set(range(1, len(bin_edges))):\n break\n # else get a new epitope and check its validity\n else:\n AgEpitope = get_AgEpitope(RNs)\n # initialise empty list for counting how many seqs have been found per bin\n ist_dist = np.zeros(len(obj_dist))\n # seq_list for collecting identified sequences\n seq_list = []\n E_list = []\n # while ist_dist and obj_dist are not equal, get new sequences and position\n # them if they are useful\n # introduce a tolerance of how far bins are allowed to deviate from the\n # goal, as otherwise runtime explodes due to very long waiting times for\n # high binding energy codes in large nkey cases - allow an absolute\n # deviation of volume*tolerance % for each bin.\n abs_tol = volume * 0.005\n while np.sum(np.abs((ist_dist-obj_dist)) > abs_tol) > 0:\n ab = Ab_seq(RNs)\n Emax = E_best(ab, AgEpitope)\n # find index bin of this energy\n indx = np.digitize(Emax, bin_edges, right=True)\n # if the index is in the useful range and the bin is not yet full,\n # count the sequence and store it\n if indx in range(1, len(bin_edges)):\n if obj_dist[indx-1] - ist_dist[indx-1] > 0:\n ist_dist[indx-1] += 1\n seq_list.append(ab)\n E_list.append(Emax)\n\n return seq_list, E_list, AgEpitope", "def office_pack_up_all_seismograms(parser, args, params):\n local_args = parser.parse_known_args(args)\n control.pack_up_all_seismograms(params)", "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def pack():\n PackCommandExecutor().pack()", "def merge_evio_skims(run, seqno, slices):\n inset = {\"BCAL-LED\": \"hd_rawdata_{0:06d}_{1:03d}+{2},{3}.BCAL-LED.evio\",\n \"DIRC-LED\": \"hd_rawdata_{0:06d}_{1:03d}+{2},{3}.DIRC-LED.evio\",\n \"FCAL-LED\": \"hd_rawdata_{0:06d}_{1:03d}+{2},{3}.FCAL-LED.evio\",\n \"CCAL-LED\": \"hd_rawdata_{0:06d}_{1:03d}+{2},{3}.CCAL-LED.evio\",\n \"random\": \"hd_rawdata_{0:06d}_{1:03d}+{2},{3}.random.evio\",\n \"omega\": \"hd_rawdata_{0:06d}_{1:03d}+{2},{3}.omega.evio\",\n \"sync\": \"hd_rawdata_{0:06d}_{1:03d}+{2},{3}.sync.evio\",\n \"ps\": \"hd_rawdata_{0:06d}_{1:03d}+{2},{3}.ps.evio\",\n }\n outset = {\"BCAL-LED\": \"BCAL-LED_{0:06d}_{1:03d}.evio\",\n \"DIRC-LED\": \"DIRC-LED_{0:06d}_{1:03d}.evio\",\n \"FCAL-LED\": \"FCAL-LED_{0:06d}_{1:03d}.evio\",\n \"CCAL-LED\": \"CCAL-LED_{0:06d}_{1:03d}.evio\",\n \"random\": \"random_{0:06d}_{1:03d}.evio\",\n \"omega\": \"omega_{0:06d}_{1:03d}.evio\",\n \"sync\": \"sync_{0:06d}_{1:03d}.evio\",\n \"ps\": \"ps_{0:06d}_{1:03d}.evio\",\n }\n badslices = []\n slicepatt = re.compile(r\"([1-9][0-9]*),([1-9][0-9]*)/\")\n for iset in inset:\n ofile = outset[iset].format(run, seqno)\n ifiles = []\n for sl in slices:\n ifile = \"{0},{1}/\".format(sl[0], sl[1]) +\\\n inset[iset].format(run, seqno, sl[0], sl[1])\n if iset == \"sync\" and not os.path.exists(ifile):\n print(\"Warning in merge_evio_skims - \",\n \"missing sync event skim \",\n \"in slice {0},{1}\".format(sl[0], sl[1])\n )\n continue\n elif iset == \"omega\" and not os.path.exists(ifile):\n print(\"Warning in merge_evio_skims - \",\n \"missing omega event skim \",\n \"in slice {0},{1}\".format(sl[0], sl[1])\n )\n continue\n ifiles.append(ifile)\n cmd = subprocess.Popen([\"eviocat\", \"-o\", ofile] + ifiles,\n stderr=subprocess.PIPE)\n elog = cmd.communicate()\n if cmd.returncode != 0:\n for eline in elog[1].decode(\"ascii\").split('\\n'):\n badslice = slicepatt.search(eline)\n if badslice:\n badslices.append(\"{0},{1}\".format(badslice.group(1),\n badslice.group(2)))\n sys.stderr.write(eline + '\\n')\n sys.stderr.write(\"Error on output file {0}\".format(ofile) +\n \" - evio file merging failed!\\n\")\n sys.stderr.flush()\n continue\n odir = output_area + \"/\" + iset + \"/{0:06d}\".format(run)\n upload(ofile, odir)\n return badslices", "def prepare(preprocessors: transforms, *imgs: np.ndarray):\n return torch.stack([preprocessors(img) for img in imgs], dim=0)", "def __augmented_images(self, info, start):\n count = start\n final_img_to_save = []\n for pair in info:\n processedImage = self.__processImage(os.path.join(WORKING_DIR, pair[0]))\n if processedImage == None:\n continue\n # translation is not that important since CNNs are resistant to image translations\n rotatedImages = self.__applyRotations(processedImage)\n\n rotCount = 1\n for img in rotatedImages:\n filename = str(count) + \"_\" + str(rotCount) + \".jpg\"\n # img.save(os.path.join(directory, filename))\n final_img_to_save.append((img, pair[1], filename))\n rotCount += 1\n\n print(\"Augmenting image: {:05}\".format(count))\n count += 1\n return final_img_to_save", "def batch_expand(self, batch:dict):\n values_keys_order=[\n 'target_feat',\n 'msa_feat',\n 'seq_mask',\n 'aatype',\n 'prev_pos',\n 'prev_msa_first_row',\n 'prev_pair',\n 'residue_index',\n 'template_mask',\n 'template_aatype',\n 'template_pseudo_beta_mask',\n 'template_pseudo_beta',\n 'template_all_atom_positions',\n 'template_all_atom_masks',\n 'extra_msa',\n 'extra_has_deletion',\n 'extra_deletion_value',\n 'extra_msa_mask',\n 'msa_mask'] \n ordered_values=[]\n for i in values_keys_order:\n ordered_values.append(batch[i])\n print(' [INFO] atom37 -> torsion angles')\n ret = all_atom.atom37_to_torsion_angles(\n aatype=np.array(batch['template_aatype']),\n all_atom_pos=np.array(batch['template_all_atom_positions']),\n all_atom_mask=np.array(batch['template_all_atom_masks']),\n # Ensure consistent behaviour during testing:\n placeholder_for_undefined=not self.gc['zero_init'])\n for i in ret.values():\n ordered_values.append(torch.FloatTensor(i))\n return ordered_values", "def ingest_all(self):\n\t\tfor place in self.district_codes():\n\t\t\tself.sequence_ingest(place)\n\t\tif self.edition:\n\t\t\tconfigs.userconfig.update('PHE','latest_cases',self.edition)", "def build(self, origin, adder, items):\n request = self.request\n existing = origin.objectIds()\n # Add items, one by one.\n for item in items:\n identifier = item.get('identifier', '')\n assert len(identifier), 'Invalid json structure'\n if identifier not in existing:\n adder(str(identifier), identifier, no_default_content=True)\n content = origin._getOb(identifier)\n importer = Importer(origin, request, {'update_content': True})\n importer.importStream(io.BytesIO(item['export'].encode('utf-8')))\n # Add index document if needed.\n if 'index' in item and item['index']:\n if content.get_default() is None:\n factory = content.manage_addProduct['silva.app.document']\n factory.manage_addDocument(\n 'index', identifier, no_default_version=True)\n importer = Importer(content, request, {'update_content': True})\n importer.importStream(io.BytesIO(item['index'].encode('utf-8')))\n yield content, item", "def siftRegionsOfInterest(options,mapped_data_per_size_per_register,phase,cycle):\n for chromosome in sorted(mapped_data_per_size_per_register):\n # Make separate files for each chromosome\n output_filename=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest\"\n fhw=open(output_filename,\"w\")\n for register in sorted(mapped_data_per_size_per_register[chromosome]):\n start,end=0,0\n for coordinate in sorted(mapped_data_per_size_per_register[chromosome][register]):\n if start == 0:\n start = coordinate\n elif end == 0:\n if coordinate-start < phase*cycle:\n end = coordinate\n else:\n start = coordinate\n else:\n if coordinate-end < phase*cycle:\n end = coordinate\n else:\n fhw.write(str(register)+\"\\t\"+str(start)+\"\\t\"+str(end+phase-1)+\"\\n\")\n end=0\n start=coordinate\n if end!=0:\n fhw.write(str(register)+\"\\t\"+str(start)+\"\\t\"+str(end+phase-1)+\"\\n\")\n fhw.close()", "def recursive_unpack(self):\n\n def _genflatten(lst):\n if not lst:\n return []\n ##\n if isinstance(lst[0], Assembly):\n lst = lst[0].unpack()\n ##\n for elem in lst:\n if isinstance(elem, Assembly):\n apos = elem.GetPosition()\n asum = np.sum(apos)\n for x in elem.unpack():\n if asum:\n yield x.clone().shift(apos)\n else:\n yield x\n else:\n yield elem\n\n return list(_genflatten([self]))" ]
[ "0.60141784", "0.5004213", "0.4980384", "0.4872931", "0.48631522", "0.47958422", "0.4778473", "0.47028425", "0.4686376", "0.46838748", "0.4683811", "0.4666936", "0.46344525", "0.46170777", "0.46088699", "0.45872328", "0.45521164", "0.45509255", "0.45372292", "0.45364162", "0.45226067", "0.4514729", "0.45135242", "0.45104918", "0.45102927", "0.4504855", "0.450054", "0.450001", "0.44935566", "0.44810787" ]
0.6625913
0
Find the largest difference between consecutive numbers in a list.
def find_largest_diff(list_of_nums): largest_diff = 0 for i in range(len(list_of_nums) - 1): diff = abs(list_of_nums[i] - list_of_nums[i+1]) if diff > largest_diff: largest_diff = diff return largest_diff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def greatest_difference(num_list):", "def find_largest_adjacent_difference(nums):\n pass", "def array_maximal_adjacent_difference( arr ):\n length = len(arr) - 1\n diffs = [ abs( arr[i] - arr[i+1] ) for i in range( length ) ]\n return max(diffs)", "def max_in_list(list):\n x=list[0] #set x be the first number in the list\n for i in range(0,len(list)):#go over the number in the list\n if x<=list[i]: #if the second one is bigger than the first\n x=list[i] #assign x to the bigger one\n else:\n continue#repeat until find the max number\n return x", "def find_greatest_number(incoming_list):\n retval = max(incoming_list)\n return retval", "def _maxdiff(xlist, ylist):\n return(max(abs(x-y) for x, y in zip(xlist, ylist)))", "def brute_force(L):\n\n max_diff = -float(\"inf\")\n length = len(L)\n for i in range(length - 1):\n start = L[i]\n for j in range(i + 1, length):\n end = L[j]\n diff = end - start\n max_diff = max(max_diff, diff)\n return max_diff", "def find_greatest_number(incoming_list: list):\n return max(incoming_list)", "def find_largest_number_in_list(self, list_with_numbers):\n return 0", "def max(input: list[int]) -> int:\n i = 0\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n\n else:\n while i < len(input):\n j = i + 1\n while j < len(input):\n if input[i] > input[j]:\n if j == len(input) - 1:\n if input[i] >= input[len(input) - 1]:\n return input[i]\n j += 1\n else:\n j += len(input)\n i += 1\n return input[len(input) - 1]", "def find_greatest_number(incoming_list):\n #magiclownumber= none\n #retval= magiclownumber\n #for value in incoming_list:\n #if not retval:\n #retval = value\n # if value> retvale\n #retval= value\n #return retval\n greatest_number = max(incoming_list)\n return greatest_number", "def _lis(A, prev):\n if len(A) == 0:\n return 0\n\n m1 = _lis(A[1:], prev) # LIS not including A[0]\n if A[0] > prev:\n m2 = 1 + _lis(A[1:], A[0])\n return max(m1, m2)\n else:\n return m1", "def find_max_distance(l):\n\tcomb = list(combinations(list(range(1, len(l))), 2))\n\tx, y, max_distance = 0, 0, 0\n\n\tfor i,j in comb:\n\t\tif np.sum(np.abs(l[i]-l[j])) > max_distance:\n\t\t\tx, y, max_distance = i, j, np.sum(np.abs(l[i]-l[j]))\n\treturn x, y, max_distance", "def find_greatest_number(incoming_list):\n #return_value = max(incoming_list)\n #return return_value\n\n MAGIC_LOW_NUMBER = None\n retval = MAGIC_LOW_NUMBER\n\n # 1,2,3,4,5,1\n # MAGIC_LOW_NUMBER, 1 ->STORE 1\n #1 , 2 ->STORE 2\n #2, , 3 ->STORE 3\n #3, , 4 ->STORE 4 \n #4, , 5 ->STORE 5\n #5, , 1 ->??? nothing \n for value in incoming_list:\n if not retval:\n retval = value\n if value > retval:\n retval = value", "def bu(lengths: List[int], L: int) -> int:\n N = len(lengths)\n dp = [0] + [-1]*L\n for l in lengths:\n for j in range(l, L+1):\n dp[j] = max(dp[j], dp[j-l]+1 if dp[j-l] != -1 else -1)\n return dp[-1]", "def index_of_max_change(vals):\n i_vals = zip(range(len(vals)), vals)\n vals = [v for i, v in i_vals]\n vals_diff = [abs(v1 - v0) for v0, v1 in zip(vals[:-1], vals[1:])]\n return i_vals[vals_diff.index(max(vals_diff))][0]", "def question_27(list_num: int) -> int:\n return [abs(list_num[i+1] - list_num[i]) for i,v in enumerate(list_num) if\n i <= len(list_num) - 2]", "def my_max(in_list):\n biggest = in_list[0]\n for l in in_list:\n if l > biggest:\n biggest = l\n return biggest", "def max_(lst: Iterable[int]) -> int:\n return reduce(lambda x, y: x if x > y else y, lst)", "def find_max(ls):\n\n if len(ls) == 1:\n return ls[0]\n elif len(ls) == 2:\n return ls[0] if ls[0] > ls[1] else ls[1]\n else:\n mid = len(ls) // 2\n m1 = find_max(ls[0:mid])\n m2 = find_max(ls[mid:])\n return m1 if m1 > m2 else m2", "def longest(my_list):\r\n\treturn sorted(my_list, key=len)[-1]", "def previous_min(L):\n\n return itertoolsextra.max_diff(L)", "def max_num_in_list(a_list):\n max_number = max(a_list)\n return max_number", "def findMaxConsecutiveOnes(nums: List[int]) -> int:\n count = maxCount = 0\n for num in nums:\n if num == 1:\n count += 1\n else:\n maxCount = max([count, maxCount])\n count = 0\n return max([count, maxCount])", "def maxTurbulenceSize(self, arr: List[int]) -> int:\n if len(arr) == 1:\n return 1\n ret = 1\n tmp_ret = 0\n last_flag = None\n for i in range(1, len(arr)):\n if arr[i] == arr[i - 1]:\n current_flag = None\n else:\n current_flag = arr[i] > arr[i - 1]\n\n if current_flag is None:\n ret = max(ret, tmp_ret)\n tmp_ret = 1\n elif last_flag is None or last_flag == current_flag:\n ret = max(ret, tmp_ret)\n tmp_ret = 2\n else:\n tmp_ret += 1\n\n last_flag = current_flag\n return max(ret, tmp_ret)", "def range(l):\n if l:\n s_list = sorted(l)\n return s_list[-1] - s_list[0]\n else:\n raise ValueError(\"list empty\")", "def find_greater_numbers(nums):\n times = 0\n for loop in range(len(nums) - 1):\n for follow in range(loop + 1, len(nums)):\n if (nums[loop] < nums[follow]):\n times+= 1\n return times", "def sum_abs_biggest_3_value(list_data):\n data = copy.deepcopy(list_data)\n data.sort()\n\n return sum_abs_list(data[-3:])", "def first_unique_local_maximum_of_derivative(data):\n\tfirst_unique_local_maximum_index = 1\n\tlast_delta = 0\n\t\n\tfor i in range(1, len(data) - 1):\n\t\tcurrent_delta = data[i] - data[i - 1]\n\t\t\n\t\tif current_delta >= last_delta:\n\t\t\tfirst_unique_local_maximum_index += 1\n\t\t\tlast_delta = current_delta\n\t\telse:\n\t\t\tbreak\n\t\n\treturn first_unique_local_maximum_index", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n else:\n input.sort()\n return input[-1]" ]
[ "0.80127186", "0.71614146", "0.7041943", "0.7025927", "0.6988836", "0.69851476", "0.6963076", "0.68893635", "0.674169", "0.67026246", "0.66979045", "0.6614405", "0.6588544", "0.6538631", "0.65331626", "0.6529724", "0.6506182", "0.6465983", "0.6441132", "0.6433797", "0.6396522", "0.63753307", "0.63467085", "0.6340861", "0.6333718", "0.63239187", "0.6309082", "0.6304836", "0.6300685", "0.62937874" ]
0.84058464
0
Create a new Spirit object Ensures that the default type value is applied if it's missing
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not self.tags('type').filled: self.tags('type').append('spirit')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, type_=None, text=None, ssml=None):\n default_attr = dict(type=str(),\n text=str(),\n ssml=str())\n self.type = type_\n self.text = text\n self.ssml = ssml\n self._set_default_attr(default_attr)", "def __init__(\n self,\n *,\n type: str = \"string\",\n default: str = None,\n optional: bool = None,\n description: str = None,\n **kwargs,\n ):\n pass", "def __init__(self, *args, **kwargs):\n if not args:\n raise TypeError('Field definition incorrect, please provide type')\n elif not isinstance(args[0], type):\n raise TypeError('Field input not a type')\n self.data_type = args[0]\n if ((self.data_type not in self.allowed_types and\n not issubclass(self.data_type, self.allowed_types))):\n raise TypeError('Field input type %s is not allowed' % self.data_type)\n self.check_kwargs(kwargs, self.data_type)\n # attributes\n if 'auto_update' in kwargs and kwargs['auto_update']:\n self.auto_update = self.data_type.utcnow # datetime.datetime\n if 'document_class' in kwargs and kwargs['document_class']:\n self.document_class = kwargs['document_class']\n self.validator = self.generate_validator(self.data_type, **kwargs)\n self.required = kwargs['required'] if 'required' in kwargs else True\n if 'default' in kwargs:\n self.default_value = kwargs['default']\n if not callable(self.default_value):\n validation_failed = False\n try:\n self.validator(self.default_value)\n except ValidationError as e:\n new_err = ('default value \"%s\"' % kwargs['default']) + ''.join(e.args)\n validation_failed = True\n if validation_failed:\n raise TypeError(new_err)\n # check if dict/list type and wrap copy in callable\n if isinstance(self.default_value, (dict, list)):\n def default_value_wrapper():\n return copy.deepcopy(kwargs['default'])\n self.default_value = default_value_wrapper", "def __init__(self, raw_input: Dict):\n self.name = raw_input.get(\"name\")\n self.description = raw_input.get(\"description\")\n self.type: TypeDefer = TypeDefer(raw_input.get(\"type\")) if raw_input.get(\"type\") is not None else None\n self.default_value = raw_input.get(\"defaultValue\")", "def __init__(self, preprocess=None, required=False):\n\n super().__init__(preprocess, required)\n self._type = str\n self._value = []", "def get_default_value_type (ty, none = True) :\n if ty is None and none : return None\n elif ty == str : return \"\"\n elif ty == int : return 0\n elif ty == decimal.Decimal : return decimal.Decimal(0)\n elif ty == float : return 0.0\n else :\n raise PQHException (\"type expected in \" + str (guess_type_value_type ()))", "def __init__(\n self,\n *,\n type: str = \"boolean\",\n default: bool = None,\n optional: bool = None,\n description: str = None,\n **kwargs,\n ):\n pass", "def parse_defaults(self, stmt):\r\n spec_type = stmt['spec_type']\r\n if spec_type in self._defaults:\r\n raise ValueError('More than one default for {}'.format(stmt['spec_type']))\r\n self._defaults[spec_type] = Default(spec_type, stmt)\r\n return None", "def NoneOrType(type_):\n def coercer(value):\n if value is None:\n return value\n else:\n return type_(value)\n return coercer", "def _infer_default_value_type(default_value):\n if default_value is Missing:\n return DefaultValue.missing\n elif default_value is Self:\n return DefaultValue.object\n elif isinstance(default_value, TraitListObject):\n return DefaultValue.trait_list_object\n elif isinstance(default_value, TraitDictObject):\n return DefaultValue.trait_dict_object\n elif isinstance(default_value, TraitSetObject):\n return DefaultValue.trait_set_object\n elif isinstance(default_value, list):\n return DefaultValue.list_copy\n elif isinstance(default_value, dict):\n return DefaultValue.dict_copy\n else:\n return DefaultValue.constant", "def __init__(self, tokenType, value = None):\r\n\t\tif not tokenType in VALID_TOKEN_TYPES:\r\n\t\t\traise ValueError(\"Invalid token type '{}' given\".format(tokenType))\r\n\r\n\t\tself.type = tokenType\r\n\t\tif value:\r\n\t\t\tself.value = value\r\n\t\telse:\r\n\t\t\tself.value = self.type", "def __init__(self, *args, **kwargs):\r\n Grammar.__init__(self)\r\n dict.__init__(self, *args, **kwargs)", "def __init__(\n self,\n *,\n type: str = \"number\",\n default: float = None,\n min: float = None,\n max: float = None,\n optional: bool = None,\n description: str = None,\n **kwargs,\n ):\n pass", "def __init__(self, name, exclusive=False, default=None):\n self.name = name\n self.type = etau.get_class_name(self)[: -len(\"Schema\")]\n self.exclusive = exclusive\n self.default = default\n self._attr_cls = etau.get_class(self.type)", "def _from_clips_value(cls, x):\n return cls.DEFAULT if x is None else cls.PYTHON_TYPE(x)", "def test_alright_when_required_field_is_missing_but_default_is_given():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True,\n 'default': 'portuguese'},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True}}\n product1 = {'source': ['Whatever']}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.", "def __init__(\n self,\n default=['ref', 'sentence', 'text_full'],\n stylize=['sentence', 'text_full']\n ):\n self.default = default\n self.stylize = stylize", "def test_ruler_with_defaults_as_not_dict_raises_error(nlp: Language) -> None:\n with pytest.raises(TypeError):\n SpaczzRuler(nlp, spaczz_fuzzy_defaults=\"ignore_case\")", "def __init__(self, value):\n if value is True or value is None:\n self.weight = new openfst.TropicalWeight(openfst.TropicalWeightOne())\n elif value is False:\n self.weight = new openfst.TropicalWeight(openfst.TropicalWeightZero())\n else:\n self.weight = new openfst.TropicalWeight(float(value))", "def validate_default_element(self, value):\n if isinstance(value, (six.string_types, six.integer_types)):\n # Validation of the value does not happen for delayed resolution\n # enumerated types. Ignore if type is not yet resolved.\n if self.__type:\n self.__type(value)\n return value\n\n return super(EnumField, self).validate_default_element(value)", "def __init__(__self__, *,\n type_name: Optional[pulumi.Input[str]] = None,\n type_version_arn: Optional[pulumi.Input[str]] = None,\n version_id: Optional[pulumi.Input[str]] = None):\n if type_name is not None:\n pulumi.set(__self__, \"type_name\", type_name)\n if type_version_arn is not None:\n pulumi.set(__self__, \"type_version_arn\", type_version_arn)\n if version_id is not None:\n pulumi.set(__self__, \"version_id\", version_id)", "def default():", "def _maybe_use_hardcoded_type(self, value, name):\n if value is not UNRESOLVED_VALUE and not isinstance(value, MultiValuedValue):\n return value\n\n try:\n typ = self.config.NAMES_OF_KNOWN_TYPE[name]\n except KeyError:\n return value\n else:\n return TypedValue(typ)", "def __init__(self, str=None, type=None, dna=None, r=None, b=None, g=None):\n # have they passed in a stringified DNA object?\n if (str != None):\n self.makeFromNetString(str)\n # have they specified what type of DNA?\n elif (type != None):\n if (type == 's'): # Suit\n self.newSuit()\n else:\n # Invalid type\n assert 0\n else:\n # mark DNA as undefined\n self.type = 'u'", "def __init__(self, type_: Union[ConstraintTypes, str], value: Any):\n self.type = ConstraintTypes(type_)\n self.value = value\n enforce(self.check_validity(), \"ConstraintType initialization inconsistent.\")", "def __init__(__self__, *,\n type: Optional[pulumi.Input[Union[str, 'VNetSolutionType']]] = None):\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def __init__(self, raw_arg: Dict):\n self.name = raw_arg.get(\"name\")\n self.description = raw_arg.get(\"description\")\n self.type = TypeDefer(raw_arg.get(\"type\")) if raw_arg.get(\"type\") is not None else None\n self.default_value = raw_arg.get(\"defaultValue\")", "def __init__(self, line, context):\n match = Ftype_type_decl.type_match(line)\n if match is None:\n raise ParseSyntaxError(\"type declaration\", token=line, context=context)\n else:\n self._match_len = len(match.group(0))\n self._class = match.group(1)\n self._typestr = match.group(2)\n self._kind = self.typestr\n # End if", "def __init__(self, default = None, mode = READWRITE, type = None, **keywords):\n self.type = type if self.type is None else self.type\n Property.__init__(self, default, mode, **keywords)", "def __init__(self, name, typ, required, key, default, extra):\n self.name = name\n self.type = typ\n self.required = required\n self.key = key\n self.default = default\n self.extra = extra" ]
[ "0.5977207", "0.5649694", "0.55729514", "0.55137086", "0.5467393", "0.5431013", "0.5268109", "0.52392423", "0.52261865", "0.5222359", "0.520309", "0.5161894", "0.51046956", "0.50672466", "0.50542367", "0.50511277", "0.5050167", "0.50404984", "0.5032374", "0.5022429", "0.5020466", "0.5016619", "0.50108415", "0.49855042", "0.49758932", "0.4972831", "0.49613905", "0.49526945", "0.49449715", "0.4941296" ]
0.6503574
0
Binary Search arr from index l to r for x
def bs(arr, l, r, x): while l <= r: mid = l + (r - l)//2 if arr[mid] == x: return mid elif arr[mid] < x: l = mid + 1 else: r = mid - 1 return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bin_search(arr, x):\n \n low = 0\n hi = len(arr) - 1\n \n while(low <= hi): \n \n mid = int((low + hi) / 2) # find middle idx\n\n if( x >= arr[mid]): # if x on the right, change low idx and search right side\n low = mid + 1; \n else: # else search left side\n hi = mid - 1\n\n return hi", "def BinarySearch(array, l, r, x):\r\n if r >= l:\r\n mid = l + (r - l) // 2\r\n if array[mid][0] == x:\r\n return array[mid][1]\r\n elif array[mid][0] > x:\r\n return BinarySearch(array, l, mid - 1, x)\r\n else:\r\n return BinarySearch(array, mid + 1, r, x)\r\n else:\r\n return \"Not found\"", "def binary_search_iterative(arr, x):\n\n if len(arr) > 1:\n mid = len(arr) // 2\n \n first_half = arr[: mid]\n second_half = arr[mid :]\n \n if x == arr[mid]:\n return True\n \n elif x < arr[mid]:\n i = 0\n while i <= len(first_half):\n if first_half[i] == x:\n return True\n else:\n i += 1\n \n elif x > arr[mid]:\n j = 0 \n while j < len(second_half):\n if second_half[j] == x:\n return True\n else:\n j += 1\n \n else:\n return f\"X: {x} no in array!\"\n \n else:\n return -1", "def bin_search2(A,x, low=0, hi=None):\n hi = hi if hi is not None else len(A)\n pos = bisect.bisect_left(A,x,low,hi)\n return pos", "def binary_search(arr, x):\n if x >= arr[len(arr) - 1]:\n return len(arr)\n\n left = 0\n right = len(arr) - 1\n mid = int((left + right) / 2)\n while(left < right):\n print(str(left) + \" : \" + str(right) + \"\\n\")\n mid = int((left + right) / 2)\n if x >= arr[mid]:\n mid = mid + 1\n left = mid\n else:\n right = mid\n\n return mid", "def binary_search(a,x):\n start = 0 \n end = len(a)-1\n\n while start<= end:\n mid = (start+end)//2 \n if x == a[mid]:\n return mid\n elif x < a[mid]:\n end = mid-1\n else:\n start = mid+1\n return -1", "def fn(arr, x):\n lo, hi = 0, len(arr)\n while lo < hi: \n mid = lo + hi >> 1\n if arr[mid] < x: lo = mid+1\n else: hi = mid\n return lo", "def binary_search(array, x):\n if len(array) < 1:\n return False\n elif len(array) == 1:\n if array[0] == x:\n return True\n else:\n return False\n else:\n _mid = int(len(array) / 2)\n _mid_element = array[_mid]\n if _mid_element == x:\n return True\n else:\n if _mid_element < x:\n return binary_search(array[_mid+1:], x)\n else:\n return binary_search(array[:_mid], x)\n \n return _mid", "def binary_search_whole_array(arr, target):\n return binary_search(arr, target, 0, len(arr))", "def rotated_array_search(input_list, number):\n\n start_idx = 0\n end_idx = len(input_list) - 1\n\n result = mod_binary_search(input_list, number, start_idx, end_idx) \n\n return result", "def rotated_array_search(input_list , number ):\r\n\r\n return binarysearch(input_list, number, 0, len(input_list) - 1)", "def binarysearch(a, i):\n l = 0\n r = len(a) - 1\n \n while l < r:\n m = l + (r-l)/2\n \n if a[m] == i:\n return m\n elif a[m] < i:\n l = m + 1\n else:\n r = m\n \n if l > 0 and abs(i - a[l-1]) < abs(i- a[l]):\n return l-1\n \n return l", "def binary_search(data, x):\n last = len(data) - 1 # Index of last element in list currently be searched\n cursor = 0 # Index of the current value being treated as the element we are looking for\n \n while cursor <= last: \n \n mid = cursor + (last - cursor)/2; \n \n # Check if x is present at mid \n if data[mid] == x: \n return mid \n \n # If x is greater, ignore left half \n elif data[mid] < x: \n cursor = mid + 1\n \n # If x is smaller, ignore right half \n else: \n last = mid - 1\n \n # If we can't halve the list anymore, then the element was not present \n return -1", "def binary_search(array: list[int], target: int) -> int:\n left = 0\n right = len(array) - 1\n\n while left <= right:\n mid = (left + right) // 2\n\n if array[mid] == target:\n return mid\n elif array[mid] < target:\n left = mid + 1\n elif array[mid] > target:\n right = mid - 1\n\n return -1", "def binary_search(arr, number, left=None, right=None):\n\n if not left:\n left = 0\n if not right:\n right = len(arr)\n\n look_index = floor((left + right) / 2)\n val_at_index = arr[look_index]\n\n if number > val_at_index:\n left = look_index\n right = right\n return binary_search(arr, number, left=left, right=right)\n elif number < val_at_index:\n right = look_index\n left = left\n return binary_search(arr, number, left=left, right=right)\n elif number == val_at_index:\n return look_index\n\n return None", "def bi_search(l: int, r: int) -> (int, bool):\n while l <= r:\n m = (l + r) // 2\n if nums[m] > target:\n r = m - 1\n elif nums[m] == target:\n return m, True\n else:\n l = m + 1\n \n return -1, False", "def bin_search(A,x, low=0, hi=None):\n hi = hi if hi is not None else len(A)\n pos = bisect.bisect_left(A,x,low,hi)\n return (pos if pos != hi and A[pos]==x else -1)", "def bin_search(array, key):\n return bin_search_util(array, key, 0, len(array) - 1)", "def binarySearch(lst, x, lo = 0, hi = None):\r\r\n comparisons = 0\r\r\n if hi is None:\r\r\n hi = len(lst)\r\r\n while lo < hi:\r\r\n comparisons += 1\r\r\n mid = (lo + hi)//2\r\r\n midval = lst[mid]\r\r\n if midval < x:\r\r\n lo = mid+1\r\r\n elif midval > x: \r\r\n hi = mid\r\r\n else:\r\r\n return (mid, comparisons)\r\r\n return (-1, comparisons)", "def binary_search(input_array, value):\n first = 0\n last = len(input_array)-1\n\n while(first <= last):\n mid_index = int((first + last) / 2)\n\n if input_array[mid_index] == value:\n return mid_index\n elif input_array[mid_index] < value:\n first = mid_index + 1\n else:\n last = mid_index - 1\n\n return -1", "def binary_search(alist, target):\n index = binary_search_iterative(alist, target)\n return index", "def binary_search(i, l):\n first = 0\n last = len(l) - 1\n found = False\n\n while first <= last and found is False:\n midpoint = (first + last) // 2\n if l[midpoint] == i:\n return True\n else:\n if i < l[midpoint]:\n last = midpoint-1\n else:\n first = midpoint+1", "def linear_search(arr, x):\n for i in range(len(arr)):\n if arr[i] == x:\n return i\n \n return -1", "def binary_search(arr, value, start=None, end=None):\n if start is None:\n start = 0\n if end is None:\n end = len(arr) -1\n \n index = int((end - start)/2 + start)\n mid_value = arr[index]\n if mid_value == value:\n return index\n elif mid_value > value:\n return binary_search(arr, value, start, index)\n elif mid_value < value:\n return binary_search(arr, value, index, end)", "def binary_search(elements, to_find, lo=0, hi=None):\n if hi is None:\n hi = len(elements)\n while lo < hi:\n mid = (lo+hi)//2\n midval = elements[mid]\n if midval < to_find:\n lo = mid+1\n elif midval > to_find: \n hi = mid\n else:\n return hi\n return hi", "def binarySearch(target: int, arr: list, lowIndex: int, highIndex: int):\n if lowIndex > highIndex:\n return False\n else:\n mid = (lowIndex + highIndex) // 2\n if target < arr[mid]:\n return binarySearch(target, arr, lowIndex, mid - 1)\n elif target > arr[mid]:\n return binarySearch(target, arr, mid + 1, highIndex)\n else:\n return True", "def binary_search(arr: List[int], value: int):\n start = 0\n end = len(arr) - 1\n\n while start <= end:\n # to avoid start+end overflow and bit operate is faster, use current start+((end-start)>>1)\n # which is start+(end-start)/2\n mid = start + ((end - start) >> 1)\n mid_value = arr[mid]\n if value == mid_value:\n return mid\n elif value < mid_value:\n end = mid - 1\n else:\n start = mid + 1\n\n return -1", "def binary_search(input_array, value):\n \n array_length = len(input_array)\n \n #(\"array length:\", array_length)\n \n left = 0\n right = array_length-1\n \n while left <= right:\n \n mid = ( left + right ) // 2\n #print(\"mid, mid value: \", mid, input_array[mid])\n \n if input_array[ mid ] == value:\n return mid\n \n elif input_array[ mid ] < value:\n # midpoint value is smaller than target, then search right half\n left = mid + 1\n \n else:\n # midpoint value is larger than target, then search left half\n right = mid - 1\n \n \n \n return -1", "def binarySearch(arr=[], min=None, max=None, find=0):\n\n if min == None:\n min = 0\n if max == None:\n max = len(arr) - 1\n\n while min <= max:\n\n print \"min:{} max:{}\".format(min, max)\n mid = (min + max) / 2\n if arr[mid] == find:\n return mid\n elif arr[mid] < find:\n min = mid + 1\n else:\n max = mid - 1\n\n return None", "def test_bin_search(self):\n list_val =[0,1,2,3,4,7,8,9,10]\n low = 0\n high = len(list_val)-1\n self.assertEqual(bin_search(4, 0, len(list_val)-1, list_val), 4 )" ]
[ "0.8095521", "0.76254034", "0.74995446", "0.73398685", "0.73267186", "0.7304711", "0.725838", "0.7249893", "0.72426206", "0.72374916", "0.71893203", "0.71111673", "0.71048427", "0.7081518", "0.7067945", "0.7062905", "0.70593274", "0.6954989", "0.69393975", "0.69384575", "0.6921447", "0.6917471", "0.690807", "0.69070226", "0.6897834", "0.686309", "0.68512", "0.6838862", "0.68034035", "0.67973715" ]
0.77745306
1
Loads a buffer with the render of the darkening around the halo.
def pre_render(self) -> None: self.buffer = Surface((self.render_width, self.render_height), SRCALPHA) self.buffer.fill(list(self.halo_texture.surfaces.values())[0].get_at((0, 0))) self.buffer.fill((0, 0, 0, 0), Rect( (self.render_width - self.halo_texture.get_width()) // 2, (self.render_height - self.halo_texture.get_height()) // 2, self.halo_texture.get_width(), self.halo_texture.get_height() ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_buffer(self, width, height):\n fb_prop = p3d.FrameBufferProperties(p3d.FrameBufferProperties.get_default())\n fb_prop.set_multisamples(self._multisamples)\n fb_prop.set_srgb_color(self._srgb_color)\n\n self._buffer = self._engine.make_output(\n self._pipe, name=\"offscreen\", sort=0,\n fb_prop=p3d.FrameBufferProperties.get_default(),\n win_prop=p3d.WindowProperties(size=(width, height)),\n flags=p3d.GraphicsPipe.BFRefuseWindow)\n\n self._region = self._buffer.make_display_region()\n\n self._depth_tex = p3d.Texture()\n self._depth_tex.setFormat(p3d.Texture.FDepthComponent)\n self._buffer.addRenderTexture(\n self._depth_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPDepth)\n\n self._color_tex = p3d.Texture()\n self._color_tex.setFormat(p3d.Texture.FRgba8)\n self._buffer.addRenderTexture(\n self._color_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPColor)", "def set_light(r, g, b):\r\n for x in range(4):\r\n set_pixel(x, r, g, b)\r\n\r\n \"\"\"Output the buffer \"\"\"\r\n _sof()\r\n\r\n for pixel in pixels:\r\n r, g, b, brightness = pixel\r\n _write_byte(0b11100000 | brightness)\r\n _write_byte(b)\r\n _write_byte(g)\r\n _write_byte(r)\r\n\r\n _eof()", "def glblshow(X, border=0.0):\n from numpy import take, resize, shape\n from numpy.random import rand\n\n mmin = X.min()\n mmax = X.max()\n ncolors = mmax - mmin + 1\n R = to_int32(rand(ncolors)*255)\n G = to_int32(rand(ncolors)*255)\n B = to_int32(rand(ncolors)*255)\n if mmin == 0:\n R[0],G[0],B[0] = 0,0,0\n r=resize(take(R, X.ravel() - mmin),X.shape)\n g=resize(take(G, X.ravel() - mmin),X.shape)\n b=resize(take(B, X.ravel() - mmin),X.shape)\n Y=concat('d',r,g,b)\n return Y", "def set_hslColorBuffer(self, ledIndex, buff):\n return self._upload(\"hsl:0:\" + str(int(ledIndex)), buff)", "def _save_buffer(self):\n img_data = renderer.fbuffer.read(mode='color', alpha=False)\n img = Image.fromarray(img_data)\n img.save(self._save_fname)\n self._save_flag = False", "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])", "def CopyFromBufferRGBA(self, buffer):\n self.CopyFromBuffer(buffer, wx.BitmapBufferFormat_RGBA)", "def place_headlamp_light():\n\n lx = 1.0\n ly = light_height\n lz = 2.0\n #light_position = [lx, ly, lz, 1.0]\n light_position = [0.0, 0.0, 0.0, 1]\n light_ambient = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_diffuse = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_specular = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_direction = [1.0, -1.0, 1.0, 0.0] # Light points down\n # glViewport(0, 0, win_width, win_height)\n # glMatrixMode(GL_PROJECTION)\n # glLoadIdentity()\n # gluPerspective(40.0, float(win_width) / float(win_height), 0.01, 100.0)\n #\n # glMatrixMode(GL_MODELVIEW)\n # glLoadIdentity()\n # glPushMatrix()\n glLightfv(GL_LIGHT4, GL_POSITION, light_position)\n\n\n\n #glLightfv(GL_LIGHT4, GL_POSITION, (GLfloat * 4)(0.0, 0.0, 0.0, 1))\n glLightfv(GL_LIGHT4, GL_AMBIENT, light_ambient)\n glLightfv(GL_LIGHT4, GL_DIFFUSE, light_diffuse)\n glLightfv(GL_LIGHT4, GL_SPECULAR, light_specular)\n\n # Constant attenuation (for distance, etc.)\n # Only works for fixed light locations! Otherwise disabled\n # glLightf(GL_LIGHT1, GL_CONSTANT_ATTENUATION, 2.0)\n # glLightf(GL_LIGHT1, GL_LINEAR_ATTENUATION, 0.0)\n # glLightf(GL_LIGHT1, GL_QUADRATIC_ATTENUATION, 0.0)\n\n glLightf(GL_LIGHT4, GL_CONSTANT_ATTENUATION, 3.0)\n glLightf(GL_LIGHT4, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT4, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if headlamp_is_on:\n glLightf(GL_LIGHT4, GL_SPOT_CUTOFF, 30.0)\n glLightf(GL_LIGHT4, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT4, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT4, GL_SPOT_CUTOFF, 180.0)\n glLightf(GL_LIGHT4, GL_SPOT_EXPONENT, 0.0)\n\n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n\n glEnable(GL_LIGHT4)\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx, ly, lz)\n glDisable(GL_LIGHTING)\n glColor3f(brightness, brightness, brightness)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()", "def serialize_buffer(cls, gl_buffer, w, h):\n\n data = gl_buffer.read()\n data = np.frombuffer(data, dtype=np.float32)\n data = data.reshape((h, w, 4))\n data = np.multiply(data, 255.0)\n data = data.astype(np.uint8)\n return data", "def __enter__(self):\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)", "def __init__(self, scene = base.render, ambient = 0.2, hardness = 16, fov = 40, near = 10, far = 100):\n \n # Read and store the function parameters\n self.scene = scene\n self.__ambient = ambient\n self.__hardness = hardness\n \n # By default, mark every object as textured.\n self.flagTexturedObject(self.scene)\n \n # Create the buffer plus a texture to store the output in\n buffer = createOffscreenBuffer(-3)\n depthmap = Texture()\n buffer.addRenderTexture(depthmap, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)\n \n # Set the shadow filter if it is supported\n if(base.win.getGsg().getSupportsShadowFilter()):\n depthmap.setMinfilter(Texture.FTShadow)\n depthmap.setMagfilter(Texture.FTShadow) \n \n # Make the camera\n self.light = base.makeCamera(buffer)\n self.light.node().setScene(self.scene)\n self.light.node().getLens().setFov(fov)\n self.light.node().getLens().setNearFar(near, far)\n\n # Put a shader on the Light camera.\n lci = NodePath(PandaNode(\"lightCameraInitializer\"))\n lci.setShader(loader.loadShader(\"caster.sha\"))\n self.light.node().setInitialState(lci.getState())\n\n # Put a shader on the Main camera.\n mci = NodePath(PandaNode(\"mainCameraInitializer\"))\n mci.setShader(loader.loadShader(\"softshadow.sha\"))\n base.cam.node().setInitialState(mci.getState())\n\n # Set up the blurring buffers, one that blurs horizontally, the other vertically\n #blurXBuffer = makeFilterBuffer(buffer, \"Blur X\", -2, loader.loadShader(\"blurx.sha\"))\n #blurYBuffer = makeFilterBuffer(blurXBuffer, \"Blur Y\", -1, loader.loadShader(\"blury.sha\"))\n\n # Set the shader inputs\n self.scene.setShaderInput(\"light\", self.light)\n #self.scene.setShaderInput(\"depthmap\", blurYBuffer.getTexture())\n self.scene.setShaderInput(\"depthmap\", buffer.getTexture())\n self.scene.setShaderInput(\"props\", ambient, hardness, 0, 1)", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.pixels = []\n self.r = 255\n self.g = 0\n self.b = 0\n self.pointSize = 30\n self.vr = 255\n self.vg = 200\n self.vb = 200\n self.glclear()", "def get_dark():\n\n # -- utilities\n nwav = 872\n nrow = 1600\n ncol = 20\n dpath = \"../../data/middleton/night time vnir full frame\"\n dname = \"full frame 20ms dark_VNIR.raw\"\n fname = os.path.join(dpath,dname)\n\n # -- read the file\n raw = 1.0*np.fromfile(open(fname,'rb'),np.uint16 \\\n ).reshape(ncol,nwav,nrow \\\n )[:,:,::-1].transpose(1,2,0)\n\n # -- take the mean spectrum of the upper and lower half and smooth\n upper = raw[:,:800,:].mean(-1).mean(-1)\n lower = raw[:,800:,:].mean(-1).mean(-1)\n\n smoff = [sm.nonparametric.lowess(upper,\n np.arange(len(upper)),frac=0.2)[:,1], \n sm.nonparametric.lowess(lower,\n np.arange(len(lower)),frac=0.2)[:,1]]\n\n return smoff, raw", "def __init__(self, name):\r\n super(OffScreenTexture, self).__init__(name)\r\n from pi3d.Display import Display\r\n self.ix, self.iy = Display.INSTANCE.width, Display.INSTANCE.height\r\n self.im = Image.new(\"RGBA\",(self.ix, self.iy))\r\n self.image = self.im.convert(\"RGBA\").tostring('raw', \"RGBA\")\r\n self.alpha = True\r\n self.blend = False\r\n\r\n self._tex = ctypes.c_int()\r\n self.framebuffer = (ctypes.c_int * 1)()\r\n opengles.glGenFramebuffers(1, self.framebuffer)\r\n self.depthbuffer = (ctypes.c_int * 1)()\r\n opengles.glGenRenderbuffers(1, self.depthbuffer)", "def filterToLight( bmp, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n HSL = RGBtoHSL( bmp.pixels[h][w] )\n lit = int(255*HSL[2]) # convert to 0-255 range\n bmp.pixels[h][w] = (lit,lit,lit)\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def _start(self):\r\n opengles.glBindFramebuffer(GL_FRAMEBUFFER, self.framebuffer[0])\r\n opengles.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,\r\n GL_TEXTURE_2D, self._tex.value, 0)\r\n #thanks to PeterO c.o. RPi forum for pointing out missing depth attchmnt\r\n opengles.glBindRenderbuffer(GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16,\r\n self.ix, self.iy)\r\n opengles.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,\r\n GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT)\r\n\r\n #assert opengles.glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE\r", "def update(): # (12)\n with canvas(device) as draw:\n for led_pos in range(0, len(color_buffer)):\n color = color_buffer[led_pos]\n\n ## If your LED strip's colors are are not in the expected\n ## order, uncomment the following lines and adjust the indexes\n ## in the line color = (rgb[0], rgb[1], rgb[2])\n # rgb = getrgb(color)\n # color = (rgb[0], rgb[1], rgb[2])\n # if len(rgb) == 4:\n # color += (rgb[3],) # Add in Alpha\n\n draw.point((led_pos, 0), fill=color)", "def show(self):\n\t\tself.processQueue()\n\t\tself.flattenLayers()\n\t\tcount = 0\n\t\tfor v in self.ledsColorBuffer:\n\t\t\tself.strip.setPixelColor(count, v)\n\t\t\tcount += 1\n\t\tself.strip.show()", "def convert_lf(self, lf):\n lf = np.float32(lf)\n if np.max(lf) > 1:\n lf = lf/256\n lf = np.uint8(lf*256)\n \n if lf.shape[-1] > 3:\n lf[lf[:,:,:,:,3] == 0] = (255,255,255,0) #convert alpha to white. \n lf = lf[:,:,:,:,:3]\n # while lf.ndim < 6:\n # lf = np.expand_dims(lf, 0)\n \n lf = resize_lightfield(lf, (self.width, self.height)) \n return lf", "def dark(s='dark'):\n s = s.strip()[:80] #truncate to 80 char to fit in FITS header\n print camera.SetShutter(2)\n camera.status.imgtype = 'DARK'\n camera.status.object = s\n camera.status.update()", "def render(self, surface: Surface) -> None:\n surface.blit(self.buffer, (self.render_position.x, self.render_position.y))\n self.halo_texture.render(surface, Position(\n (self.render_width - self.halo_texture.get_width()) // 2,\n (self.render_height - self.halo_texture.get_height()) // 2\n ))", "def place_blue_light():\n glMatrixMode(GL_MODELVIEW)\n lx = 3.0\n ly = light_height\n lz = 1.0\n light_position = [ lx, ly, lz, 1.0 ]\n\n lightb_ambient = [0.0, 0, 1, 1] #blue\n lightb_diffuse = [0.4, 0.4, 0.6, 1] #blue\n lightb_specular = [0.0, 0, 0.8, 1] #blue\n light_direction = [ 1.0, -1.0, 1.0, 0.0 ] # Light points down\n\n\n # For Light 0 (blue), set position, ambient, diffuse, and specular values\n glLightfv(GL_LIGHT0, GL_POSITION, light_position)\n glLightfv(GL_LIGHT0, GL_AMBIENT, lightb_ambient)\n glLightfv(GL_LIGHT0, GL_DIFFUSE, lightb_diffuse)\n glLightfv(GL_LIGHT0, GL_SPECULAR, lightb_specular)\n\n\n\n # Constant attenuation (for distance, etc.)\n # Only works for fixed light locations! Otherwise disabled\n glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 1.0)\n glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT0, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if blue_light:\n glLightf(GL_LIGHT0, GL_SPOT_CUTOFF, 45.0)\n glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT0, GL_SPOT_CUTOFF,180.0)\n glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 0.0)\n \n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n \n glEnable(GL_LIGHT0)\n\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx,ly,lz)\n glDisable(GL_LIGHTING)\n glColor3f(0, 0, brightness)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()", "def init(filename):\n global trackball, flashlight, vertex_buffer, normal_buffer, color_buffer, colors, vertices, normals\n\n # initialize quaternions for the light and trackball\n flashlight = quat.for_rotation(0.0,vector(1.0,0.0,0.0))\n trackball = quat.for_rotation(0.0,vector(1.0,0.0,0.0))\n\n # read the .OBJ file into VBOs\n scene.read(filename)\n vertices,normals,colors = scene.compile()\n \n vertex_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, vertex_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(vertices)*4, \n (c_float*len(vertices))(*vertices), GL_STATIC_DRAW)\n\n normal_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, normal_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(normals)*4, \n (c_float*len(normals))(*normals), GL_STATIC_DRAW)\n\n color_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, color_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(colors)*4, \n (c_float*len(colors))(*colors), GL_STATIC_DRAW)\n\n\n # set up the object shaders\n init_shaders()\n\n glEnable (GL_DEPTH_TEST)", "def init_gl(self):\n size = self.GetClientSize()\n self.SetCurrent(self.context)\n\n GL.glViewport(0, 0, size.width, size.height)\n\n GL.glMatrixMode(GL.GL_PROJECTION)\n GL.glLoadIdentity()\n GLU.gluPerspective(45, size.width / size.height, 10, 10000)\n\n GL.glMatrixMode(GL.GL_MODELVIEW)\n GL.glLoadIdentity() # lights positioned relative to the viewer\n GL.glLightfv(GL.GL_LIGHT0, GL.GL_AMBIENT, self.no_ambient)\n GL.glLightfv(GL.GL_LIGHT0, GL.GL_DIFFUSE, self.med_diffuse)\n GL.glLightfv(GL.GL_LIGHT0, GL.GL_SPECULAR, self.no_specular)\n GL.glLightfv(GL.GL_LIGHT0, GL.GL_POSITION, self.top_right)\n GL.glLightfv(GL.GL_LIGHT1, GL.GL_AMBIENT, self.no_ambient)\n GL.glLightfv(GL.GL_LIGHT1, GL.GL_DIFFUSE, self.dim_diffuse)\n GL.glLightfv(GL.GL_LIGHT1, GL.GL_SPECULAR, self.no_specular)\n GL.glLightfv(GL.GL_LIGHT1, GL.GL_POSITION, self.straight_on)\n\n GL.glMaterialfv(GL.GL_FRONT, GL.GL_SPECULAR, self.mat_specular)\n GL.glMaterialfv(GL.GL_FRONT, GL.GL_SHININESS, self.mat_shininess)\n GL.glMaterialfv(GL.GL_FRONT, GL.GL_AMBIENT_AND_DIFFUSE,\n self.mat_diffuse)\n GL.glColorMaterial(GL.GL_FRONT, GL.GL_AMBIENT_AND_DIFFUSE)\n\n GL.glClearColor(1.0, 1.0, 1.0, 0.0)\n GL.glDepthFunc(GL.GL_LEQUAL)\n GL.glShadeModel(GL.GL_SMOOTH)\n GL.glDrawBuffer(GL.GL_BACK)\n GL.glCullFace(GL.GL_BACK)\n GL.glEnable(GL.GL_COLOR_MATERIAL)\n GL.glEnable(GL.GL_CULL_FACE)\n GL.glEnable(GL.GL_DEPTH_TEST)\n GL.glEnable(GL.GL_LIGHTING)\n GL.glEnable(GL.GL_LIGHT0)\n GL.glEnable(GL.GL_LIGHT1)\n GL.glEnable(GL.GL_NORMALIZE)\n\n # Viewing transformation - set the viewpoint back from the scene\n GL.glTranslatef(0.0, 0.0, -self.depth_offset)\n\n # Modelling transformation - pan, zoom and rotate\n GL.glTranslatef(self.pan_x, self.pan_y, 0.0)\n GL.glMultMatrixf(self.scene_rotate)\n GL.glScalef(self.zoom, self.zoom, self.zoom)", "def light_blur_skeleton_hand(save_path=None):\n im = auto_hand_img() # reload the edge map\n blurred = gaussian(np.copy(im)) \n #blurred = blurred * blurred # strengthen the image by multiplying\n im2 = to_rgba(np.copy(im)) # take an RGBA copy to add the skeleton onto\n skel = skeletonize(blurred) # given as a Boolean array\n skel_blur = gaussian(np.copy(skel), sigma=2)\n skel_blur *= (255/np.max(skel_blur))\n # manually examine the distribution to set a threshold for binarisation\n # for i in np.arange(0,101,1): print(np.percentile(skel_blur, i))\n skel_blur[skel_blur >= 30] = 255\n skel_blur[skel_blur < 30] = 0\n skel2 = (skel_blur/255).astype(bool)\n # also expand the edge map using the blurred version for visibility\n im2[blurred <= 0.75] = [0,0,0,255]\n # set the skeleton pixels to red in the edge map copy\n im2[skel2] = [255, 0, 0, 255]\n if save_path is None:\n return im2\n else:\n save_image(im2, (8,8), save_path)\n return im2", "def _load_opengl(self):\r\n pass", "def glclear(self):\n self.pixels = [\n [color(self.r, self.g, self.b) for x in range(self.width)]\n for y in range(self.height)\n ]", "def init_gl(self):\n\n # default background color is white-ish\n background = [.99, .99, .99, 1.0]\n # if user passed a background color use it\n if 'background' in self.kwargs:\n try:\n # convert to (4,) uint8 RGBA\n background = to_rgba(self.kwargs['background'])\n # convert to 0.0 - 1.0 float\n background = background.astype(np.float64) / 255.0\n except BaseException:\n log.error('background color wrong!',\n exc_info=True)\n # apply the background color\n gl.glClearColor(*background)\n\n max_depth = (np.abs(self.scene.bounds).max(axis=1) ** 2).sum() ** .5\n max_depth = np.clip(max_depth, 500.00, np.inf)\n gl.glDepthRange(0.0, max_depth)\n\n gl.glClearDepth(1.0)\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glDepthFunc(gl.GL_LEQUAL)\n\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glEnable(gl.GL_LIGHTING)\n gl.glEnable(gl.GL_LIGHT0)\n gl.glEnable(gl.GL_LIGHT1)\n\n # put the light at one corner of the scenes AABB\n gl.glLightfv(gl.GL_LIGHT0,\n gl.GL_POSITION,\n rendering.vector_to_gl(np.append(self.scene.bounds[1], 0)))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_SPECULAR,\n rendering.vector_to_gl(.5, .5, 1, 1))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_DIFFUSE,\n rendering.vector_to_gl(1, 1, 1, .75))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_AMBIENT,\n rendering.vector_to_gl(.1, .1, .1, .2))\n\n gl.glColorMaterial(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT_AND_DIFFUSE)\n gl.glEnable(gl.GL_COLOR_MATERIAL)\n gl.glShadeModel(gl.GL_SMOOTH)\n\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_AMBIENT,\n rendering.vector_to_gl(0.192250, 0.192250, 0.192250))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_DIFFUSE,\n rendering.vector_to_gl(0.507540, 0.507540, 0.507540))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_SPECULAR,\n rendering.vector_to_gl(.5082730, .5082730, .5082730))\n\n gl.glMaterialf(gl.GL_FRONT,\n gl.GL_SHININESS,\n .4 * 128.0)\n\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n\n gl.glEnable(gl.GL_LINE_SMOOTH)\n gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)\n\n gl.glLineWidth(1.5)\n gl.glPointSize(4)", "def light(brightness, filter):\n brightness = clamp(MIN_BRIGHTNESS, round(brightness), MAX_BRIGHTNESS)\n for col in range(DISPLAY_WIDTH):\n for row in range(DISPLAY_HEIGHT):\n if filter(col, row):\n microbit.display.set_pixel(col, row, brightness)", "def load(self):\n\t\tglTexImage3D(GL_TEXTURE_3D, 0, GL_LUMINANCE16_ALPHA16, \n\t\t\tself.width, self.width, self.width, 0, GL_LUMINANCE_ALPHA, \n\t\t\tGL_UNSIGNED_SHORT, ctypes.byref(self.data))" ]
[ "0.56681794", "0.54977167", "0.5324041", "0.5287743", "0.5260473", "0.5205611", "0.51873535", "0.5163916", "0.5071536", "0.5049212", "0.5048237", "0.5035555", "0.50335544", "0.5024333", "0.50097406", "0.49990943", "0.49946952", "0.49940115", "0.49929282", "0.49905205", "0.49888054", "0.49843892", "0.49644062", "0.49564114", "0.4932736", "0.49284846", "0.49283198", "0.4925118", "0.49157286", "0.49156234" ]
0.6222023
0
Renders the halo on the desired surface.
def render(self, surface: Surface) -> None: surface.blit(self.buffer, (self.render_position.x, self.render_position.y)) self.halo_texture.render(surface, Position( (self.render_width - self.halo_texture.get_width()) // 2, (self.render_height - self.halo_texture.get_height()) // 2 ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self):\n\n surf = self.get_oxygen_surface()\n surf.set_alpha(255)\n self.screen.blit(surf, self.pos)", "def pre_render(self) -> None:\n self.buffer = Surface((self.render_width, self.render_height), SRCALPHA)\n self.buffer.fill(list(self.halo_texture.surfaces.values())[0].get_at((0, 0)))\n\n self.buffer.fill((0, 0, 0, 0), Rect(\n (self.render_width - self.halo_texture.get_width()) // 2,\n (self.render_height - self.halo_texture.get_height()) // 2,\n self.halo_texture.get_width(),\n self.halo_texture.get_height()\n ))", "def basic_render(self, surface) -> None:\n if not self.visible:\n return\n l, t = self.pos\n r, b = self.get_anchor_pos(Anchor.bottom_right)\n tpos = self.get_anchor_pos(Anchor.middle)\n backcolor = (128, 128, 128)\n forecolor = {False: (255, 255, 192), True: (255, 0, 0)}\n pts = ((l, t), (r, t), (r, b), (l, b))\n pygame.draw.polygon(surface, backcolor, pts, 0)\n pygame.draw.polygon(surface, forecolor[self.hover], pts, 1)\n BitmapFont.set_colors(BitmapFont.medium, backcolor, forecolor[self.hover])\n BitmapFont.render(surface, str(self.label), BitmapFont.medium, tpos, Anchor.middle)", "def drawSimple(self, screen):\r\n self.worlds[0].renderer.render(screen)", "def render(self):\n self.axial.Render()\n self.coronal.Render()\n self.sagittal.Render()\n #self.isosurface.Render()\n #self.rwi_pcp.Render()", "def hello(self):\n surface_hi = pygame.font.SysFont('Helvetic', 50).render('Do not approach my car', False, BLACK)\n screen.blit(surface_hi, (700, 50))", "def display(self):\n\n self.screen.fill(self.background)\n\n if self.displayEdges:\n for n1, n2 in self.tf_wireframe.edges:\n start_pos = self.tf_wireframe.nodes[n1][:2] + [self.ws, self.hs]\n end_pos = self.tf_wireframe.nodes[n2][:2] + [self.ws, self.hs]\n pygame.draw.aaline(self.screen, self.edgeColor,\n start_pos,\n end_pos, 2)\n if self.displayNodes:\n for i, node in enumerate(self.tf_wireframe.nodes):\n c = self.wireframe_col[i, :]\n if np.all(c != self.background):\n pygame.draw.circle(self.screen, c, (int(self.ws + node[0]), int(self.hs + node[1])), self.nodeRadius, 0)", "def render_capsule(self, l, r):\n # draw cylinder\n glPushMatrix()\n glScalef(l, r, r)\n glCallList(self.cylinder)\n glPopMatrix()\n # draw +x hemisphere\n glPushMatrix()\n glTranslatef(l/2.0, 0, 0)\n glScalef(r, r, r)\n glCallList(self.hemisphere)\n glPopMatrix()\n # draw -x hemisphere\n glPushMatrix()\n glRotatef(180.0, 0, 0, 1)\n glTranslatef(l/2.0, 0, 0)\n glScalef(r, r, r)\n glCallList(self.hemisphere)\n glPopMatrix()", "def renderizar(self):\n\t\t# Limpiar la pantalla\n\t\tglClear(GL_COLOR_BUFFER_BIT)\n\t\t# Renderizar la escena\n\t\tself.escena.renderizar()\n\t\t# Renderizar los buffers a la pantalla\n\t\tpygame.display.flip()", "def draw_h(self):\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(50)", "def draw(self, win,cameraPos):\n super().draw(win,cameraPos)\n pygame.draw.rect(win, (0, 0, 255), self.hp_rect)\n pygame.draw.rect(win, (255, 255, 255), self.hp_rect_frame, 2)\n win.blit(self.fontSurf,self.fontRect)", "def display(self):\n\n self.screen.fill(self.background)\n\n for wireframe in self.wireframes.values():\n if self.displayEdges:\n for n1, n2 in wireframe.edges:\n pygame.draw.aaline(self.screen, self.edgeColour, wireframe.nodes[n1][:2], wireframe.nodes[n2][:2], 1)\n\n if self.displayNodes:\n for node in wireframe.nodes:\n pygame.draw.circle(self.screen, self.nodeColour, (int(node[0]), int(node[1])), self.nodeRadius, 0)", "def __display(self):\n self.__rotate_model()\n self.__generate_shadows()\n self.__generate_model()\n\n glutSwapBuffers()\n if self.__callback is not None:\n self.__callback()", "def display(swap=1, clear=1):\r\n if clear:\r\n glClearColor(0.5, 0.5, 0.5, 0)\r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\r\n\r\n # establish the projection matrix (perspective)\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n x, y, width, height = glGetDoublev(GL_VIEWPORT)\r\n gluPerspective(\r\n 45, # field of view in degrees\r\n width / float(height or 1), # aspect ratio\r\n .25, # near clipping plane\r\n 200, # far clipping plane\r\n )\r\n\r\n # and then the model view matrix\r\n glMatrixMode(GL_MODELVIEW)\r\n glLoadIdentity()\r\n gluLookAt(\r\n 0, 1, 20, # eyepoint\r\n 0, 0, 0, # center-of-view\r\n 0, 1, 0, # up-vector\r\n )\r\n glLightfv(GL_LIGHT0, GL_DIFFUSE, GLfloat_3(.8, .8, .3))\r\n glLightfv(GL_LIGHT0, GL_POSITION, GLfloat_4(1, 1, 3, 0))\r\n glEnable(GL_LIGHT0)\r\n\r\n rotation()\r\n drawCheckerBoard()\r\n drawSphere()\r\n if swap:\r\n glutSwapBuffers()", "def draw(self, surface):\n temp = pygame.Surface(self.renderer.pixel_size)\n self.renderer.render_map(temp)\n pygame.transform.smoothscale(temp, surface.get_size(), surface)", "def draw_level(self):\r\n self.level_surface.blit(self.map_image, self.viewport, self.viewport)\r\n self.level_surface.blit(self.title_box, self.title_rect)", "def draw():", "def render(self, tela: pg.Surface):\n if (not self._run) or (not self.IA.is_loaded()):\n return\n self.IA.BG_MAP.render(tela=tela)\n self.IA.O_ATUAL.render(tela=tela)\n self._desintegrator.render(tela=tela)\n\n # pg.draw.line(tela, (255, 255, 255),\n # self.player.get_sensor().get_center_pos_player(),\n # self.player.get_sensor().get_first_obstacle().to_rect().center)\n #\n # pg.draw.line(tela, (255, 255, 255),\n # self.player.get_sensor().get_center_pos_shadow(),\n # self.player.get_sensor().get_first_obstacle().to_rect().center)", "def draw(self, surface):\n surface.blit(self.base, self.base_rect)\n surface.blit(self.barrel, self.rect)", "def draw(self):\n\n self.squares.draw(self.screen)\n if not self.hide_grid:\n self.draw_grid()\n self.fleas.draw(self.screen)\n pygame.display.flip()", "def drawScene(self):\n glBegin(GL_LINES)\n # draw axes\n glColor3f(1, 0, 0)\n glVertex3f(0, 0, 0)\n glVertex3f(self.worldSize / 2, 0, 0)\n glColor3f(0, 1, 0)\n glVertex3f(0, 0, 0)\n glVertex3f(0, self.worldSize / 2, 0)\n glColor3f(0, 0, 1)\n glVertex3f(0, 0, 0)\n glVertex3f(0, 0, self.worldSize / 2)\n # draw bounding box\n glColor3f(1, 1, 1)\n scalar = (self.worldSize - 1) / 2\n for x in [-1, 1]:\n for y in [-1, 1]:\n for z in [-1, 1]:\n glVertex3f(scalar * x, scalar * y, scalar * z)\n for z in [-1, 1]:\n for x in [-1, 1]:\n for y in [-1, 1]:\n glVertex3f(scalar * x, scalar * y, scalar * z)\n for y in [-1, 1]:\n for z in [-1, 1]:\n for x in [-1, 1]:\n glVertex3f(scalar * x, scalar * y, scalar * z)\n glEnd()\n # draw spheres if in POINTS mode\n if self.displayMode is self.DISPLAYMODE_POINTS:\n prev = (0, 0, 0)\n offset = int(self.worldSize / 2)\n for x in range(self.worldSize):\n for y in range(self.worldSize):\n for z in range(self.worldSize):\n glTranslatef(x - offset - prev[0], y - offset - prev[1], z - offset - prev[2])\n # use threshold for black/white coloring\n if self.world[x][y][z] > self.worldThreshold:\n glColor3f(1, 1, 1)\n else:\n glColor3f(0, 0, 0)\n gluSphere(self.sphere, 0.1, 8, 4)\n prev = (x - offset, y - offset, z - offset)\n # draw mesh if in MESH mode\n elif self.displayMode is self.DISPLAYMODE_MESH:\n offset = int(self.worldSize / 2)\n for x in range(self.worldSize - 1):\n for y in range(self.worldSize - 1):\n for z in range(self.worldSize - 1):\n if self.polygons[x][y][z]:\n glBegin(GL_POLYGON)\n glColor3f(x / self.worldSize, y / self.worldSize, z / self.worldSize)\n for vertex in self.polygons[x][y][z]:\n glVertex3f(x + vertex[0] - offset, y + vertex[1] - offset, z + vertex[2] - offset)\n glEnd()\n # draw wireframe in in WIRE mode\n elif self.displayMode is self.DISPLAYMODE_WIREFRAME:\n offset = int(self.worldSize / 2)\n for x in range(self.worldSize - 1):\n for y in range(self.worldSize - 1):\n for z in range(self.worldSize - 1):\n glBegin(GL_LINES)\n glColor3f(x / self.worldSize, y / self.worldSize, z / self.worldSize)\n for vertex in self.polygons[x][y][z]:\n glVertex3f(x + vertex[0] - offset, y + vertex[1] - offset, z + vertex[2] - offset)\n glEnd()\n # draw background in the distance\n glLoadIdentity()\n glBegin(GL_QUADS)\n glColor3f(59 / 256, 102 / 256, 212 / 256)\n glVertex3f(-30, -23, -49.5)\n glVertex3f(30, -23, -49.5)\n glColor3f(184 / 256, 201 / 256, 242 / 256)\n glVertex3f(30, 23, -49.5)\n glVertex3f(-30, 23, -49.5)\n glEnd()\n # HUD in white\n glColor3f(1, 1, 1)\n # lower left\n glWindowPos2f(10, 10)\n for ch in 'WASD: Rotate':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 25)\n for ch in 'Wheel: Thresh':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 40)\n for ch in 'R: Randomize':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 55)\n for ch in 'O: Object':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 70)\n for ch in 'I: Wireframe':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 85)\n for ch in 'P: Points':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n # upper right\n glWindowPos2f(self.displaySize[0] - 118, self.displaySize[1] - 25)\n for ch in 'Thresh: %0.2f' % self.worldThreshold:\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))", "def render(self):\n self.screen.fill(prepare.BACKGROUND_COLOR)\n self.health_bar()\n # self.enemy_health()\n self.energy_bar()\n self.level.draw(self.screen)\n pg.display.update()", "def visualise(self, obj):\n self.clear()\n self.draw(obj)\n self.show()", "def draw(self, surface):\n color = pygame.Color(255, 255, 255)\n pygame.draw.circle(surface, color, self.position, Molecule.radius, 2)", "def render(self,tela):\r\n self.fundo.render(tela,(0,0))\r\n self.player1.render(tela, (-(self.player1.frames[0].get_width()*0.75), (self.height * 0.41)), (self.deslocax,0), False)\r\n self.player2.render(tela, ((self.player2.frames[0].get_width()*1.75),(self.height * 0.41)), (-self.deslocax,0), True)", "def draw_scene():\n # Place the camera\n camera.placeCamera()\n \n \n # Set up the global ambient light. (Try commenting out.)\n amb = [ 0*brightness, 0*brightness, 0*brightness, 1.0 ]\n glLightModelfv(GL_LIGHT_MODEL_AMBIENT, amb)\n\n # Set up the main light (LIGHT0)... or not.\n if is_light_on:\n place_blue_light()\n place_red_light()\n place_green_light()\n place_lamp_light()\n else:\n glDisable(GL_LIGHT0)\n glDisable(GL_LIGHT1)\n glDisable(GL_LIGHT2)\n glDisable(GL_LIGHT3)\n\n if lamp_light:\n place_lamp_light()\n else:\n glDisable(GL_LIGHT3)\n\n if headlamp_is_on:\n place_headlamp_light()\n else:\n glDisable(GL_LIGHT4)\n\n # Now spin the world around the y-axis (for effect).\n glRotated(angle_movement, 0, 1, 0)\n draw_objects()", "def draw_level(self, surface):\n surface.blit(self.background, (0, 0))\n surface.blit(self.player.image, self.player.rect)\n surface.blit(self.message_box.image, self.message_box.rect)\n surface.blit(self.arrow.image, self.arrow.rect)\n surface.blit(self.transition_surface, (0, 0))", "def draw(self, surface):\n ent = self.controller.entity_selection\n\n # If we have not selected an entity.\n if not ent:\n self.surface.blit(self.background, (0, 0))\n self.controller.entity_selection_track = False\n return\n \n # And provide details about the unit.\n unit_text = self.font.render(\"%s (id: %s)\" % (ent.name, ent.id), True, (255, 255, 255))\n w, _ = unit_text.get_size()\n self.surface.blit(unit_text, ((self.width / 2) - w / 2, 15))\n \n output = [\"Location: (%d, %d)\" % tuple(ent.location)]\n\n if ent.name == \"ant\":\n output.append(\"Energy: %s\" % ent.c[\"attrs\"][\"energy\"])\n output.append(\"Health: %s\" % ent.c[\"attrs\"][\"health\"])\n output.append(\"Brain state: %s\" % ent.brain.active_state.name)\n output.append(\"Speed: %d\" % ent.c[\"velocity\"].speed)\n if ent.c[\"destination\"].location:\n output.append(\"Destination: (%s, %s)\" % tuple(ent.c[\"destination\"].location))\n if ent.c[\"destination\"].isentity:\n output.append(\"Target: (%s)\" % ent.c[\"destination\"].val.name)\n \n for i, line in enumerate(output):\n text = self.font.render(line, True, (255, 255, 255))\n self.surface.blit(text, (10, 30 + i*15))\n \n # Blit to the main surface.\n surface.blit(self.surface, ((self.x, self.y)))", "def draw_housing_2():\r\n tom.pensize(3)\r\n tom.color(\"black\", \"darkgrey\")\r\n tom.begin_fill()\r\n tom.forward(80)\r\n tom.left(90)\r\n tom.forward(200)\r\n tom.circle(40, 180)\r\n tom.forward(200)\r\n tom.left(90)\r\n tom.end_fill()\r\n tom.hideturtle()", "def render(self, mode='human'):\n screen_width = 800\n screen_height = 550\n\n # Width is one column for each variable\n n_sect = 7\n world_width = n_sect*2 # X axis is just pixels\n \n buff_axis = cfg['buff_axis']\n #bottom of the screen scales to the input/output range of values\n world_height_bottom = np.max(self.maxes)+buff_axis\n \n # Top is for counting steps\n world_height_top = 100\n\n #Split the screen:\n world_top = .3\n world_bottom = 1-world_top\n screen_height_bottom = world_bottom*screen_height\n\n #Set where to draw the steps axis\n axes_line1 = screen_height*(world_bottom + .2)\n\n # Scale the pixels in the screen:\n scalex = screen_width/world_width\n scaley_bottom= screen_height_bottom/world_height_bottom\n\n # Some adjustments to move some objects up/ right\n move_oval = -scalex*.2\n move_up= scaley_bottom * buff_axis*.5\n\n #set sizes of shapes:\n self.oval_length = 25.0\n self.oval_width = 50.0\n self.rect_width = 70.0\n self.rect_height = 5.0 \n\n #Step plot:\n scalestep = screen_width/cfg['scalestep']\n\n #color shades:\n light_col = .7\n dark_col = 1\n c11 = .6\n c22 = .8\n c33 = 1\n\n if self.viewer is None:\n #TO DO: find an alternative to copy-paste to generate multiple similar shapes\n self.viewer = rendering.Viewer(screen_width, screen_height)\n \n #Input states:\n\n #the temp action\n self.temptrans1 = self.make_oval(0,0,light_col)\n self.temptrans2 = self.make_oval(0,0,dark_col)\n #flow action:\n self.flowtrans1 = self.make_oval(light_col,0,light_col)\n self.flowtrans2 = self.make_oval(dark_col,0,dark_col)\n\n #output states:\n #out1:\n #the gauge is a rectangle \n self.outgauge1 = self.make_rect(0,c33,0)\n #goal is red rectangle\n self.outgoal1= self.make_rect(c33,0,0)\n \n #out2:\n #the gauge is a rectangle \n self.outgauge2 = self.make_rect(0,c22,0)\n #goal is red rectangle\n self.outgoal2= self.make_rect(c22,0,0)\n\n #out3:\n #the gauge is a rectangle \n self.outgauge3 = self.make_rect(0,c11,0)\n #goal is red rectangle\n self.outgoal3 = self.make_rect(c11,0,0)\n\n #lines on which \"controls\" sit\n for l in range(n_sect): \n self.make_line(scalex*((l*2)+1),0, scalex*((l*2)+1),screen_height*world_bottom)\n\n # Line separating the top and bottom of the screen. \n self.make_line(0,world_bottom*screen_height,screen_width,world_bottom*screen_height)\n # Step # axis.\n self.make_line(scalex*1.5,axes_line1,screen_width-scalex*1,axes_line1)\n\n # The dot tracking the step #\n dot = rendering.make_circle(self.oval_length)\n self.dottrans = rendering.Transform()\n dot.add_attr(self.dottrans)\n dot.set_color(0,0,0)\n self.viewer.add_geom(dot)\n\n #labels: \n num = 0\n label_buff_y = 1.07\n label_buff_x = .2\n img_scale = .5\n img_wid = 179 *img_scale\n img_height = 124 * img_scale\n\n for label in self.labels:\n pth = (self.label_dir+label+'.png')\n self.txt = rendering.Image(pth,img_wid,img_height)\n locx = (num*2)+1\n self.txtis = rendering.Transform(translation=(scalex*locx +locx* label_buff_x,world_bottom*screen_height*label_buff_y))\n self.txt.add_attr(self.txtis)\n self.viewer.add_geom(self.txt)\n num = num+1\n\n #step label\n pth = (self.label_dir+'Step.png')\n self.txt = rendering.Image(pth,img_wid,img_height)\n self.txtis = rendering.Transform(translation=(scalex*.5,axes_line1))\n self.txt.add_attr(self.txtis)\n self.viewer.add_geom(self.txt)\n\n if self.state is None: return None\n\n x = self.state\n\n # 4 ins:\n self.flowtrans1.set_translation(move_oval+scalex*1,move_up+scaley_bottom*x[0])\n self.temptrans1.set_translation(move_oval+scalex*3,move_up+scaley_bottom*x[1])\n self.flowtrans2.set_translation(move_oval+scalex*5,move_up+scaley_bottom*x[2])\n self.temptrans2.set_translation(move_oval+scalex*7,move_up+scaley_bottom*x[3])\n\n # 3 outs: current & goal:\n self.outgauge1.set_translation(scalex*9,move_up+scaley_bottom*x[4])\n self.outgoal1.set_translation(scalex*9,move_up+scaley_bottom*x[7])\n self.outgauge2.set_translation(scalex*11,move_up+scaley_bottom*x[5])\n self.outgoal2.set_translation(scalex*11,move_up+scaley_bottom*x[8])\n self.outgauge3.set_translation(scalex*13,move_up+scaley_bottom*x[6])\n self.outgoal3.set_translation(scalex*13,move_up+scaley_bottom*x[9])\n\n #step info:\n self.dottrans.set_translation(scalex*1.5 + self.steps*scalestep, axes_line1)\n done_grow = .5*self.done\n self.dottrans.set_scale(1+done_grow,1+done_grow) #expand size when done\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')" ]
[ "0.6726035", "0.6504167", "0.6232434", "0.6078131", "0.6019421", "0.5973637", "0.59126973", "0.5894387", "0.58872277", "0.58129996", "0.5808655", "0.5794351", "0.57840055", "0.5774184", "0.5767208", "0.57486993", "0.57462645", "0.5710917", "0.57049483", "0.5687367", "0.56863385", "0.5663984", "0.564514", "0.5642672", "0.56395656", "0.5618785", "0.56142354", "0.56091523", "0.5589932", "0.5582501" ]
0.7035735
0
Expand the given first postioin (2length tuple or 2length list) to have numrep points by specifing horizontal or vertical interval.
def expand_by_interval(firstPos,numrep,horizontal=None,vertical=None): x0,y0 = firstPos poslist = [] if horizontal is not None: for num in range(numrep): xt,yt = (x0+horizontal*num,y0) poslist.append((xt,yt)) return poslist elif vertical is not None: for num in range(numrep): xt,yt = (x0,y0+vertical*num) poslist.append((xt,yt)) return poslist else: raise ValueError("both horizontal and vertical are None!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grids_augmented_with_number(part_grid, val, curr_row=0):\n if curr_row == len(part_grid):\n return [part_grid]\n else:\n res = []\n for option in grids_augmented_in_row(part_grid, val, curr_row):\n res += grids_augmented_with_number(option, val, curr_row + 1)\n return res", "def expanding(self,pos_0,pos_1,n):\r\n cnvt_front=self.string(pos_0,pos_1,n)\r\n if int(cnvt_front) in self.expanded:\r\n\r\n a=1\r\n else:\r\n self.expanded.append(int(cnvt_front))", "def sliceshape(slicetuple, totshape):\n res = []\n for i,s in enumerate(slicetuple):\n if isinstance(s,int):\n #n = 1\n pass\n else:\n i0,i1,istep = s.indices(totshape[i])\n n = (i1-i0)//istep\n res.append(n)\n return res", "def nprepeat_row(v: np.ndarray, m: int) -> np.ndarray:\n return np.repeat(v[np.newaxis, :], m, axis=0)", "def _rep(self, num_repeats):\n return int(np.ceil(self.depth_mul * num_repeats))", "def repeat_nd(x, reps):\n return RepeatND(reps)(x)", "def resize_points(x, n: int, mode: str = 'lin'):\n n_old = len(x)\n xn = np.copy(x)\n if n == n_old: # nothing to do, return the copy\n return xn\n\n if n > n_old:\n f = lambda xxx: add_point(xxx, mode=mode)\n else:\n f = lambda xxx: remove_point(xxx)\n\n for i in range(abs(n - n_old)):\n xn = f(xn)\n return xn", "def number2multisize_patten(number, min_length, max_length):\n lengths = np.arange(min_length, max_length + 2) # +2 Include last interval\n offsets = np.cumsum(4**lengths)\n\n try:\n index = np.where((offsets - number) > 0)[0][0]\n org_length = lengths[index]\n number -= np.concatenate(([0], offsets))[index]\n return number2patten(number, org_length)\n except IndexError:\n raise ValueError('Provided number (%d) do not match ' % number +\n 'list of provided lengths %s nt.' % lengths)", "def MakeDifficulties(center, width, n):\n low, high = center-width, center+width\n return numpy.linspace(low, high, n)", "def create_coords_medium(ph):\n # Min: 8, max 12\n for start_row in xrange(ph.pizza.shape[0]):\n for start_col in xrange(ph.pizza.shape[1]-2*ph.min_ing_per_slice+1):\n # First scenario\n for i in xrange(ph.min_ing_per_slice*2, ph.max_cells_per_slice+1):\n end_row = start_row + 1\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0]-1):\n for start_col in xrange(ph.pizza.shape[1]-3):\n # Second scenario\n for i in xrange(ph.min_ing_per_slice, ph.min_ing_per_slice+3):\n end_row = start_row + 2\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0] - 2):\n for start_col in xrange(ph.pizza.shape[1] - 2):\n # Third scenario\n for i in xrange(3, 5):\n end_row = start_row + 3\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)", "def plot_insertsize():", "def repeat_elements(x, rep, axis):\n x_shape = x.get_shape().as_list()\n if x_shape[axis] is None:\n raise ValueError('Axis ' + str(axis) + ' of input tensor '\n 'should have a defined dimension, but is None. '\n 'Full tensor shape: ' + str(tuple(x_shape)) + '. '\n 'Typically you need to pass a fully-defined '\n '`input_shape` argument to your first layer.')\n # slices along the repeat axis\n splits = array_ops.split(value=x, num_or_size_splits=x_shape[axis], axis=axis)\n # repeat each slice the given number of reps\n x_rep = [s for s in splits for _ in range(rep)]\n return concatenate(x_rep, axis)", "def slices(utt, rep, index=lambda ms: ms//10, aggregate=lambda x: x.mean(axis=0)):\n for phoneme in phones(utt):\n phone, start, end = phoneme\n assert index(start)<index(end)+1, \"Something funny: {} {} {} {}\".format(start, end, index(start), index(end))\n yield (phone, aggregate(rep[index(start):index(end)+1]))", "def tile(A, reps):\n from ..merge import concatenate\n\n try:\n tup = tuple(reps)\n except TypeError:\n tup = (reps,)\n\n d = len(tup)\n if A.ndim < d:\n A = A[tuple(np.newaxis for _ in range(d - A.ndim))]\n elif A.ndim > d:\n tup = (1,) * (A.ndim - d) + tup\n\n a = A\n for axis, rep in enumerate(tup):\n if rep == 0:\n slc = (slice(None),) * axis + (slice(0),)\n a = a[slc]\n elif rep < 0:\n raise ValueError(\"negative dimensions are not allowed\")\n elif rep > 1:\n a = concatenate([a] * rep, axis=axis)\n\n return a", "def grid_numbering(n, x_0, y_0, x_1, y_1):\n \n if n == 0:\n return \"\"\n\n arg = complex_number(x_0 + 0.5 - x_1, y_0 + 0.5 - y_1).argument()\n\n if arg >= 0 and arg < np.pi / 2: \n x = \"1\"\n x_1 += 2 ** (n - 2)\n y_1 += 2 ** (n - 2)\n elif arg >= np.pi / 2 and arg <= np.pi:\n x = \"2\"\n x_1 -= 2 ** (n - 2)\n y_1 += 2 ** (n - 2)\n elif arg < 0 and arg >= -np.pi / 2:\n x = \"4\"\n x_1 += 2 ** (n - 2)\n y_1 -= 2 ** (n - 2)\n else:\n x = \"3\"\n x_1 -= 2 ** (n - 2)\n y_1 -= 2 ** (n - 2)\n\n return str(x) + grid_numbering(n - 1, x_0, y_0, x_1, y_1)", "def ipset_x_repeating():\n x = np.linspace(0, 10, 11)\n x[5] = x[4]\n return IPSet(x=x, y=np.linspace(-1, 1, 11), x_new=np.linspace(2, 5, 7))", "def vec_repeat_at_start(x, p):\n n = x.shape[0]\n indices = (jnp.arange(p) + n - p) % n\n padding = x[indices]\n return jnp.concatenate((padding, x))", "def ipset_num_x_y_different():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 3), x_new=np.linspace(2, 5, 4))", "def reshape(self, nz: int, name=None, start: int = 0, end: int = None, axis: str = sM, xmode: str = 'resize', kind='np'):\n from scipy.interpolate import interp1d\n # from scipy.interpolate import splev, splrep\n from scipy.interpolate import UnivariateSpline\n from scipy.ndimage import gaussian_filter1d\n\n def rlogspace(s, e, n):\n r = np.exp(np.linspace(np.log(s), np.log(e), n))\n r = (e - r + s)\n return r[::-1]\n\n def add_point(x, mode='lin'): # 'lin' 'log'\n \"\"\"\n Find max interval in x and insert the new point in the middle (lin or geom) of it\n :param x: array\n :param mode:\n :return:\n \"\"\"\n dif = np.diff(x) / x[1:]\n idx = np.argmax(dif)\n if mode == 'lin':\n p = (x[idx] + x[idx + 1]) / 2.\n elif mode == 'geom':\n p = np.sqrt(x[idx] * x[idx + 1])\n else:\n raise ValueError('Mode should be \"lin\" lor \"geom\"')\n logger.info(' To interval {}[{:.6e} - {:.6e}] added {} '.format(idx, x[idx], x[idx + 1], p))\n xn = np.insert(x, idx + 1, p)\n return xn\n\n def remove_point(x): # 'lin' 'log'\n \"\"\"\n Find min delta and remove the right point\n \"\"\"\n dif = np.diff(x) / x[1:]\n idx = np.argmin(dif)\n xn = np.delete(x, idx + 1)\n if idx+2 == len(x):\n logger.debug(' Remove right point {} {:.8e} [dx= {:.8e}] '.format(idx+1, x[idx + 1], dif[idx]))\n else:\n logger.debug(' Remove point {} {:.8e} [dx= {:.8e}] next: {:.8e}'.format(idx+1, x[idx + 1], dif[idx], x[idx + 2])) \n return xn\n\n def resize_points(x, n: int, mode: str = 'lin'):\n \"\"\"\n Add or remove points in the array x\n :param x: the array is not changed\n :param n: number points to add or remove\n :param mode: should be \"lin\" or \"geom\". Default: lin\n :return: the resized array\n \"\"\"\n n_old = len(x)\n xn = np.copy(x)\n if n == n_old: # nothing to do, return the copy\n return xn\n\n if n > n_old:\n f = lambda xxx: add_point(xxx, mode=mode)\n else:\n f = lambda xxx: remove_point(xxx)\n\n for i in range(abs(n - n_old)):\n xn = f(xn)\n return xn\n\n def x_reshaped(x, n):\n if xmode == 'lin':\n res = np.linspace(x[0], x[-1], n)\n elif xmode == 'rlog':\n res = rlogspace(x[0], x[-1], n)\n elif xmode == 'resize':\n res = resize_points(x, n)\n else:\n raise ValueError('Such xmode \"{}\" is not supported.'.format(xmode))\n return res\n\n def interp(xn, x, v, s: int, e: int, kind: str, is_log: bool = False):\n res = []\n if s > 0:\n res = v[:s] # save points before start\n xi = x[s:e]\n yi = v[s:e]\n if is_log:\n yi = np.log10(yi)\n\n if kind == 'np':\n yy = np.interp(xn, xi, yi)\n elif kind == 'spline':\n spl = UnivariateSpline(xi, yi)\n yy = spl(xn)\n elif kind == 'gauss':\n yii = gaussian_filter1d(yi, 3)\n yy = np.interp(xn, xi, yii)\n else:\n interp_linear = interp1d(xi, yi, kind=kind)\n yy = interp_linear(xn)\n\n if is_log:\n yy = 10. ** yy\n res = np.append(res, yy)\n return res\n\n if nz <= 0:\n nz = self.nzon\n\n nznew = start + nz\n if name is None:\n name = self.Name\n\n newPreSN = PreSN(name, nznew, elements=self.Elements)\n\n if end is None:\n end = self.nzon\n\n logger.info(f'axis= {axis} nz= {nz} nznew= {nznew} start= {start} end= {end}')\n\n # hyd reshape\n if axis == PreSN.sM:\n xx = self.m\n elif axis == PreSN.sR:\n xx = self.r\n elif axis == PreSN.sV:\n xx = self.V\n else:\n raise ValueError('Such axis \"{}\" is not supported.'.format(axis))\n\n xx = xx / max(abs(xx)) # norm\n xxx = x_reshaped(xx, nz)\n if np.any(np.diff(xxx) < 0.):\n for i, dx in enumerate(np.diff(xxx)):\n if dx <= 0:\n logger.error(\"ERROR reshaped: {} xxx= {} dx= {} \".format(i, xxx[i], dx))\n raise ValueError('The interval beetween some of {} elements is < 0.'.format(len(xxx)))\n\n # from pprint import pprint\n\n for vv in PreSN.presn_hydro:\n old = self.hyd(vv)\n new = interp(xxx, xx, old, s=start, e=end, kind=kind, is_log=False)\n # if vv == PreSN.sRho:\n # rho_new = interp(xxx, xx, old, s=start, e=end, kind='next') #, is_log=True)\n # else:\n # new = interp(xxx, xx, old, s=start, e=end, kind=kind)\n newPreSN.set_hyd(vv, new)\n # print(f'{vv} before: old[{len(xx)}-1]= {old[len(xx)-2]:12.7e} new[{len(xxx)}-1]= {new[len(xxx)-2]:12.7e}')\n logger.info(f'{vv} before: old[0]= {old[0]:12.7e} new[0]= {new[0]:12.7e}')\n logger.info(f'{vv} before: old[{len(xx)}]= {old[len(xx) - 1]:12.7e} new[{len(xxx)}]= {new[len(xxx) - 1]:12.7e}')\n # print(f'\\n{vv} before: {len(xx)}')\n # pprint(list(zip(range(1, len(xx)+1), xx, old)))\n # print(f'{vv} after: {len(xxx)}')\n # pprint(list(zip(range(1, len(xxx)+1), xxx, new)))\n\n # Density Normalization: m_tot(NEW) should be equal m_tot(OLD)\n m_rho = newPreSN.mass_tot_rho() + newPreSN.m_core\n rho = newPreSN.rho * newPreSN.m_tot / m_rho\n newPreSN.set_hyd(PreSN.sRho, rho)\n\n # abn reshape\n for el in self.Elements:\n old = self.el(el)\n new = interp(xxx, xx, old, s=start, e=end, kind='np')\n # new = interp(xxx, xx, old, s=start, e=end, kind=kind, is_log=True)\n newPreSN.set_chem(el, new)\n\n # copy parameters\n newPreSN.copy_par(self) # keys=['time_start', 'm_tot', 'm_core', 'r_cen'])\n\n return newPreSN", "def reduce_mini(minigrid):\n row = []\n for i in range(3):\n for j in range(3):\n row.append(minigrid[i][j])\n for i in range(9):\n if len(row[i]) == 1:\n for j in range(9):\n if i != j:\n if row[i] in row[j]:\n chunks = row[j].split(row[i])\n row[j] = chunks[0] + chunks[1]\n\n count_dict = {}\n for i in range(9):\n for char in row[i]:\n if char in count_dict:\n count_dict[char] = \"X\"\n else:\n count_dict[char] = i\n\n for key in count_dict:\n if count_dict[key] != \"X\":\n row[count_dict[key]] = key\n\n for i in range(3):\n for j in range(3):\n minigrid[i][j] = row[(i*3)+j]\n\n return minigrid", "def downsampleShape(self, numDesiredPoints):\n\n if len(self.x) > 2:\n t_current_x = np.linspace(0, 1, len(self.x))\n t_current_y = np.linspace(0, 1, len(self.y))\n t_desired_x = np.linspace(0, 1, numDesiredPoints)\n t_desired_y = np.linspace(0, 1, numDesiredPoints)\n f = interpolate.interp1d(t_current_x, self.x, kind='linear')\n self.x = f(t_desired_x).tolist()\n f = interpolate.interp1d(t_current_y, self.y, kind='linear')\n self.y = f(t_desired_y).tolist()\n\n self.len = numDesiredPoints", "def prepare_input(seating_plan):\n height = len(seating_plan)\n width = len(seating_plan[0]) \n concat='.'.join([ '.' * width ] + seating_plan )\n return width, list(map( DECODE.get, concat ))", "def rec_transform(image, pts):\n ord_pts = order_points(pts)\n\n # find the dimension of the rectangular created by the given points", "def _repack(linear, n=3):\n return list(zip(*[iter(linear)] * n))", "def extend(inarray, extension, d):\n n = inarray.shape[d]\n reps = concatenate(([extension+1], ones(n-2), [extension+1])).astype(int64)\n return inarray.repeat(reps, axis=d)", "def add_numprocs(value):\n return_list = []\n for l in value:\n _tmp_dict = l\n _tmp_dict[\"NP\"] = l[\"totsize\"] / l[\"subdimsize\"]\n return_list.append(_tmp_dict)\n return sorted(return_list, key=lambda i: i[\"NP\"])", "def reconstruct_image(patch_list, patch_nb=2):\n line_list = []\n for i in range(0, patch_nb ** 2 - 1, patch_nb):\n line_list.append(cv2.hconcat(patch_list[i : i + patch_nb]))\n final_img = cv2.vconcat(line_list)\n return final_img", "def discretized_line(x_start, y_start, x_end, y_end, n_elements):\n n_pts = n_elements + 1\n x = np.linspace(x_start, x_end, n_pts)\n y = np.linspace(y_start, y_end, n_pts)\n x1 = x[:-1]\n y1 = y[:-1]\n x2 = x[1:]\n y2 = y[1:]\n return x1, y1, x2, y2", "def tower_of_hanoi_stack(n, beg, aux, end):", "def _new_rep(self, rep):\n return self._new(rep, self.shape, self.domain)" ]
[ "0.52478415", "0.51951337", "0.5072514", "0.5000615", "0.49894366", "0.4934885", "0.4900051", "0.48402202", "0.47977155", "0.4778517", "0.47763914", "0.47651535", "0.47565943", "0.4728484", "0.47228515", "0.47023296", "0.46736914", "0.46479553", "0.4616724", "0.45915994", "0.45875713", "0.4584844", "0.4581399", "0.45808384", "0.4574141", "0.45544618", "0.45501894", "0.45459387", "0.45450562", "0.4525531" ]
0.6975873
0
Propagate the arg input to a (ordered)dict. The behaviour varies according to input of arg and itearg.
def _propagate(tags,arg,itearg=False): tagnum = len(tags) if isinstance(arg,list): if itearg: if isinstance(arg[0],(tuple,list,np.ndarray)): if len(arg) != tagnum: raise ValueError("""list length expected to be {0}""" .format(tagnum)) else: return OrderedDict(zip(tags,arg)) else: return dict(zip(tags,[arg]*tagnum)) else: if len(arg) != tagnum: raise ValueError("""list length expected to be {0}""" .format(tagnum)) else: return OrderedDict(zip(tags,arg)) elif isinstance(arg,dict): return arg else: return dict(zip(tags,[arg]*tagnum))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _map_args_kwargs_to_input(self, *args, **kwargs) -> Dict[str, Any]:\n input_dict = {k: v for k, v in zip(self.inputs, args)}\n input_dict.update(kwargs)\n\n return input_dict", "def argdict(self):\n return dict((arg.name, val) for arg, val in zip(self.sig, self))", "def Dict(**args):\n return args", "def build_dict(arg):\n # helper function to the Evaluator.to_property_di_graph() method that\n # packages the dictionaries returned by the \"associate_\" family of\n # functions and then supplies the master dict (one_dict) to the Vertex\n # obj as **kwargs\n one_dict = {}\n for ar in arg:\n one_dict.update(ar)\n return one_dict", "def stubbornDict(*arg, **kwarg):\n result = {}\n for a in arg:\n result.update(StubbornDict.to_dict(a))\n result.update(kwarg)\n return StubbornDict(result)", "def _build_rebind_dict(req_args, rebind_args):\n if rebind_args is None:\n return collections.OrderedDict()\n elif isinstance(rebind_args, (list, tuple)):\n # Attempt to map the rebound argument names position by position to\n # the required argument names (if they are the same length then\n # this determines how to remap the required argument names to the\n # rebound ones).\n rebind = collections.OrderedDict(zip(req_args, rebind_args))\n if len(req_args) < len(rebind_args):\n # Extra things were rebound, that may be because of *args\n # or **kwargs (or some other reason); so just keep all of them\n # using 1:1 rebinding...\n rebind.update((a, a) for a in rebind_args[len(req_args):])\n return rebind\n elif isinstance(rebind_args, dict):\n return rebind_args\n else:\n raise TypeError(\"Invalid rebind value '%s' (%s)\"\n % (rebind_args, type(rebind_args)))", "def _deep_copy_arg_dict(input_arg_dict):\n output_arg_dict = {}\n for name, param in input_arg_dict.items():\n output_arg_dict[name] = param.copy()\n return output_arg_dict", "def arglist2dict(args):\n arg_dict = {}\n\n if len(args) == 0:\n return arg_dict\n\n if not args[0].startswith('--'):\n raise ValueError(f\"Positional keywords are not supported: {args[0]}\")\n\n i = 0\n while i < len(args):\n arg = args[i]\n i = i + 1\n if arg.startswith('--'):\n dest = arg[2:]\n j, arglist = Parser.get_args(args[i:])\n i = i + j\n Parser.update_arg_dict(arg_dict, dest, arglist)\n return arg_dict", "def epc_arg_transformer(arg):\n if type(arg) != list:\n return arg\n\n # NOTE: Empty list elisp can be treated as both empty python dict/list\n # Convert empty elisp list to empty python dict due to compatibility.\n\n # check if we can tranform arg to python dict instance\n type_dict_p = len(arg) % 2 == 0\n if type_dict_p:\n for v in arg[::2]:\n if type(v) != sexpdata.Symbol or not v.value().startswith(\":\"):\n type_dict_p = False\n break\n\n if type_dict_p:\n # transform [Symbol(\":a\"), 1, Symbol(\":b\"), 2] to dict(a=1, b=2)\n ret = dict()\n for i in range(0, len(arg), 2):\n ret[arg[i].value()[1:]] = epc_arg_transformer(arg[i + 1])\n return ret\n else:\n return list(map(epc_arg_transformer, arg))", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def args_to_dictionaty(args):\n\tres_args = {}\n\tfor i, arg in enumerate(args[1:]):\n\t\tif i % 2 == 0:\n\t\t\tkey = arg\n\t\telse:\n\t\t\tres_args[key] = arg\n\treturn res_args", "def _dict_to_args(self, arg_dict):\n if arg_dict:\n yield \"--{}=data:application/json;charset=utf-8,{}\".format(\n self._CONFIG_FLAG.name,\n urllib.parse.quote(json_encode(arg_dict, pretty=False), encoding=\"utf-8\")\n )", "def _create_param_dict(self, func_args):\n for i, a in enumerate(func_args):\n self.fn.args[i].name = str(a)\n self.param_dict[a] = self.fn.args[i]", "def process_arg(arg):\n return get_object(arg, all_dicts, existing_objects, ignore_names, depth + 1)", "def _arg2kw(self, mixed_args):\n def insert(dict_, k, v):\n if k in dict_:\n print \"duplicated args : %s \" % kv[0]\n raise ArgParseError\n dict_[k] = v\n \n opts = []\n args = {}\n\n n = len(mixed_args)\n i = 0\n while i < n:\n a = mixed_args[i]\n if a == '-' or a == '--' :\n opts.append(a)\n elif a.startswith(\"---\"):\n print \"invalid args: %s\" % mixed_args\n print \"only the following formats are supported:\"\n print \" arg1\"\n print \" --input=name1\"\n print \" --output name3\"\n print \" -oname2\"\n print \" -o name4\"\n raise ArgParseError\n elif a.startswith(\"--\"):\n kv = a[2:].split(\"=\", 1)\n if len(kv) == 2:\n insert(args, kv[0], kv[1])\n else:\n i += 1\n insert(args, kv[0], mixed_args[i])\n elif a.startswith(\"-\"):\n if len(a) > 2:\n insert(args, a[1], a[2:])\n else:\n i += 1\n insert(args, a[1], mixed_args[i])\n else:\n opts.append(a)\n i += 1\n \n return opts, args", "def create_dict(*args):\n output = {}\n idx = 0\n while idx < len(args):\n output[args[idx + 1]] = args[idx]\n idx += 2\n\n return output", "def __init__(self, *args):\n\t\tfrom collections import OrderedDict\n\t\tnew_dict = {}\n\t\tfor x, y in enumerate(args):\n\t\t\tnew_dict.update({x: y})\n\t\tnew_dict = OrderedDict(sorted(new_dict.items()))\n\t\tself.__dict__ = new_dict", "def override__args(self,arg,value):\n self.__args[arg] = value", "def args2dict(args, dict_args={}):\n \n for arg in args:\n #this_entry = re.findall(r'[^\"\\s]\\S*|\".+?\"', arg)\n p_arg = arg.split('=')\n if len(p_arg) > 1:\n dict_args[p_arg[0]] = False if p_arg[1].lower() == 'false' else \\\n True if p_arg[1].lower() == 'true' else \\\n None if p_arg[1].lower() == 'none' else \\\n '='.join(p_arg[1:]) if len(p_arg) > 2 else \\\n p_arg[1]\n \n return(dict_args)", "def filter_args_dict(self, args):\n return dict((k,v) for (k,v) in viewitems(args) if self.has_arg(k))", "def parse_arguments(args: List[Dict]) -> 'Dict[str, Argument]':\n if not args:\n return {}\n result = {}\n for a in args:\n if not a:\n continue\n arg = Argument(a)\n result[arg.name] = arg\n return result", "def process_args(self, args, cache=None):\n\n new_args = dict()\n\n for k, v in args.items():\n if k == 'trial':\n if isinstance(v, str):\n hashid, rev = v.split('_')\n rev = int(rev)\n\n v = self.backend.get_trial(Trial(_hash=hashid, revision=rev))\n for i in v:\n if i.revision == rev:\n v = i\n break\n else:\n warning('Was not able to find the correct trial revision')\n\n v = from_json(v)\n\n elif k == 'project':\n if isinstance(v, str):\n v = self.backend.get_project(Project(name=v))\n\n v = from_json(v)\n\n elif k == 'group':\n if isinstance(v, str):\n v = self.backend.get_trial_group(TrialGroup(_uid=v))\n\n v = from_json(v)\n\n new_args[k] = v\n\n return new_args", "def test_kw_args_with_dict():\n arg_dict = {'visited_color': 'blue',\n 'link_color': 'red',\n 'back_color': 'yellow',\n 'fore_color': 'orange'}\n assert arguments.fun_opt_kw_params(**arg_dict) == ('orange', 'yellow',\n 'red', 'blue')", "def selectArgsFromDict(func, argdict):\n return dict([(i, argdict[i]) for i in getArgs(func) if i in argdict])", "def _args_to_params(self, args, tree):\n with tree.treeChangeBlocker():\n for key, val in args.items():\n if 'range' in key:\n _range = tree.child(key)\n if val is None:\n _range.child(\"Auto\").setValue(True)\n else:\n _range.child(\"Low\").setValue(val[0])\n _range.child(\"High\").setValue(val[1])\n _range.child(\"Auto\").setValue(False)\n elif key == 'polarization_factor':\n if val is None:\n tree.child('Apply polarization factor').setValue(True)\n else:\n tree.child('Apply polarization factor').setValue(True)\n tree.child(key).setValue(val)\n else:\n try:\n child = tree.child(key)\n except:\n # No specific error thrown for missing child\n child = None\n if child is not None:\n if val is None:\n child.setValue('None')\n else:\n child.setValue(val)", "def _JsonDictToArgs(cls, path_context, data_location, dct, memo=None):\n if(cls is InputGenerator):\n tag = dct['tag']\n data = dct['data']\n return cls._registered[tag]._JsonDictToArgs(path_context, data_location, data, memo=memo)\n else:\n _, args, kwargs = super()._JsonDictToArgs(path_context, data_location, dct, memo=memo)\n args.append(StageMeta.Load(path_context, dct['meta'], memo=memo))\n return cls, args, kwargs", "def dictVarArgs(arg1, arg2='default', **theDict ):\n len = 0\n print \"************ Presently dictionary elaborated variable args **************\"\n print \"Function\\'s first is \", arg1\n print \"Funx second is \", arg2\n for var in theDict:\n print \"additional args %d for key \" % (len), str(var), \"=\", str(theDict[var])\n len = len + 1", "def flexdictargs(func: Callable[[dict], RT]) -> Callable[[Iterable, Any], RT]:\n\n @wraps(func)\n def f(self, *args, **kwargs):\n if args and isinstance(args[0], MutableMapping):\n d = args[0]\n elif kwargs:\n d = kwargs\n else:\n raise TypeError(\"invalid input arguments\")\n return func(self, normalize(d))\n\n return f", "def parse_request_arg_dict(arg, exception_class=Exception):\n arg_dict = {}\n arg_pairs = arg.split(';')\n for arg_pair in arg_pairs:\n try:\n arg_name, arg_value = arg_pair.split('=', 1)\n except Exception as error:\n logging.exception(error)\n raise exception_class(\n 'there is no `=` in %s' % arg_pair\n )\n arg_dict[arg_name] = arg_value\n return arg_dict", "def get_order_args():\n orders = {}\n for arg in request.args:\n re_match = re.findall(\"_oc_(.*)\", arg)\n if re_match:\n order_direction = request.args.get(\"_od_\" + re_match[0])\n if order_direction in (\"asc\", \"desc\"):\n orders[re_match[0]] = (request.args.get(arg), order_direction)\n return orders" ]
[ "0.66467035", "0.6391914", "0.63108075", "0.62305945", "0.61446375", "0.612977", "0.6094551", "0.60677683", "0.6064272", "0.5990363", "0.5974368", "0.5922521", "0.5919071", "0.5812439", "0.5783572", "0.57812166", "0.5690085", "0.5683032", "0.56811297", "0.56756127", "0.5619738", "0.56161535", "0.5601811", "0.5509863", "0.54720145", "0.5468395", "0.54674405", "0.5452806", "0.5435135", "0.53998953" ]
0.7073806
0
Generator for exported functions.
def _exported_functions(self): mod_base = self.obj_parent.DllBase exp_dir = self.obj_parent.export_dir() # PE files with a large number of functions will have arrays # that spans multiple pages. Thus the first entries may be valid, # last entries may be valid, but middle entries may be invalid # (paged). In the various checks below, we test for None (paged) # and zero (non-paged but invalid RVA). # Array of RVAs to function code address_of_functions = obj.Object('Array', offset = mod_base + self.AddressOfFunctions, targetType = 'unsigned int', count = self.NumberOfFunctions, vm = self.obj_native_vm) # Array of RVAs to function names address_of_names = obj.Object('Array', offset = mod_base + self.AddressOfNames, targetType = 'unsigned int', count = self.NumberOfNames, vm = self.obj_native_vm) # Array of RVAs to function ordinals address_of_name_ordinals = obj.Object('Array', offset = mod_base + self.AddressOfNameOrdinals, targetType = 'unsigned short', count = self.NumberOfNames, vm = self.obj_native_vm) # When functions are exported by Name, it will increase # NumberOfNames by 1 and NumberOfFunctions by 1. When # functions are exported by Ordinal, only the NumberOfFunctions # will increase. First we enum functions exported by Name # and track their corresponding Ordinals, so that when we enum # functions exported by Ordinal only, we don't duplicate. seen_ordinals = [] # Handle functions exported by name *and* ordinal for i in range(self.NumberOfNames): name_rva = address_of_names[i] ordinal = address_of_name_ordinals[i] if name_rva in (0, None): continue # Check the sanity of ordinal values before using it as an index if ordinal == None or ordinal >= self.NumberOfFunctions: continue func_rva = address_of_functions[ordinal] if func_rva in (0, None): continue # Handle forwarded exports. If the function's RVA is inside the exports # section (as given by the VirtualAddress and Size fields in the # DataDirectory), the symbol is forwarded. Return the name of the # forwarded function and None as the function address. if (func_rva >= exp_dir.VirtualAddress and func_rva < exp_dir.VirtualAddress + exp_dir.Size): n = self._name(func_rva) f = obj.NoneObject("Ordinal function {0} in module {1} forwards to {2}".format( ordinal, str(self.obj_parent.BaseDllName or ''), n)) else: n = self._name(name_rva) f = func_rva # Add the ordinal base and save it ordinal += self.Base seen_ordinals.append(ordinal) yield ordinal, f, n # Handle functions exported by ordinal only for i in range(self.NumberOfFunctions): ordinal = self.Base + i # Skip functions already enumberated above if ordinal not in seen_ordinals: func_rva = address_of_functions[i] if func_rva in (0, None): continue seen_ordinals.append(ordinal) # There is no name RVA yield ordinal, func_rva, obj.NoneObject("Name RVA not accessible")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exports():", "def get_functions():\n\treturn [f for f in globals() if f.startswith('make_')]", "def exports(self):\n\n try:\n data_dir = self.export_dir()\n except ValueError, why:\n raise StopIteration(why)\n\n expdir = obj.Object('_IMAGE_EXPORT_DIRECTORY',\n offset = self.DllBase + data_dir.VirtualAddress,\n vm = self.obj_native_vm,\n parent = self)\n\n if expdir.valid(self._nt_header()):\n # Ordinal, Function RVA, and Name Object \n for o, f, n in expdir._exported_functions():\n yield o, f, n", "def __getattr__(self, exported_function_name: str) -> ExportedFunction:\n pass", "def instructions(self):\n for inst in self.global_insts[:]:\n yield inst\n for function in self.functions[:]:\n for inst in function.instructions():\n yield inst", "def all_decorated_module_functions( this, module, exclude_methods = False, exclude_functions = False) :\n\t\tmodule_names = []\n\t\tfor el in dir( module) :\n\t\t\tfn = module.__dict__.get( el)\n\n\t\t\t# lookup for functions\n\t\t\tif not exclude_functions and type( fn) in [types.FunctionType, staticmethod, classmethod] :\n\t\t\t\tfn = this._getfn( fn)\n\t\t\t\tif len( this.get_decorators( fn)) > 0 :\n\t\t\t\t\tfname = fn.__annotations__[this.NATIVE_FUNCTION].__name__\n\t\t\t\t\tif fname not in module_names :\n\t\t\t\t\t\tyield { fname : module.__dict__.get( fname) }\n\t\t\t\t\t\tmodule_names += [fname]\n\t\t\t\n\t\t\t# lookup for class methods\n\t\t\tif not exclude_methods and type( fn) is type :\n\t\t\t\tfor cls_el in dir( fn) :\n\t\t\t\t\tmethod = fn.__dict__.get( cls_el)\n\t\t\t\t\tif type( method) in [types.FunctionType, staticmethod, classmethod] :\n\t\t\t\t\t\tmethod = this._getfn( method)\n\t\t\t\t\t\tif len( this.get_decorators( method)) > 0:\n\t\t\t\t\t\t\tfname = method.__annotations__[this.NATIVE_FUNCTION].__name__\n\t\t\t\t\t\t\tif fname not in module_names :\n\t\t\t\t\t\t\t\tyield { \"%s.%s\" %(fn.__name__, fname) : fn.__dict__.get( fname) }\n\t\t\t\t\t\t\t\tmodule_names += [fname]", "def main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', help='Input .py file', nargs='+')\n args = parser.parse_args()\n\n mod_func = []\n\n for pyfile in args.input:\n tree = ast.parse(open(pyfile).read())\n\n methods = sorted({node.name for node in ast.walk(tree)\n if isinstance(node, ast.FunctionDef)})\n mod_func.extend([[pyfile, methods[i]] for i in range(len(methods))])\n\n write_csv(\"meth_func.csv\", mod_func)", "def get_user_functions(table):\n for f in [(f) for f in table.values() if type(f) == types.FunctionType]:\n yield f", "def export():\n def wrap(func, *args, **kwargs):\n func._rpcserver_export = True\n doc = func.__doc__\n func.__doc__ = \"**RPC Exported Function**\"\n if doc:\n func.__doc__ += doc\n\n return func\n return wrap", "def descriptors(mol, functions):\n for name, function in functions:\n yield (name, function(mol))", "def bunch__functions(idfobject): \n funcdct = idfobject.__functions\n funcsresults = [(key, funcdct[key](idfobject)) for key in funcdct.keys()]\n return funcsresults", "def create_included_function_list():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n it = Iterable(chain(iteration_utilities._cfuncs.__dict__.items(),\n iteration_utilities._helpers._performance.__dict__.items(),\n iteration_utilities._recipes._core.__dict__.items(),\n iteration_utilities._recipes._additional.__dict__.items())\n # Exclude PY2 variable and private functions\n ).filterfalse(lambda x: x[0].startswith(('PY2', '_'))\n # Exclude everything that has no __module__\n ).filter(lambda x: hasattr(x[1], '__module__')\n # Only include functions that come from the package\n ).filter(lambda x: x[1].__module__.startswith('iteration_utilities')\n # Remove duplicate names\n ).unique_everseen(itemgetter(0)\n # Sort lexically\n ).get_sorted(key=lambda x: x[0].lower())\n\n it = Iterable(it\n # Create a Sphinx link from function name and module\n ).map(lambda i: ':py:func:`~{}.{}`'.format(i[1].__module__, i[0])\n # Group into 4s so we get a 4 column Table\n ).grouper(4, fillvalue=''\n # Convert to list because Table expects it.\n ).as_list()\n\n return '\\n'.join(RST().write(Table(rows=it)))", "def module_functions_decorated_with( this, module, decorator, exclude_methods = False, exclude_functions = False) :\n\t\tfor mfn in this.all_decorated_module_functions( module, exclude_methods, exclude_functions) :\n\t\t\tfor fname, fn in mfn.items() :\n\t\t\t\tif decorator in this.get_decorators( fn) :\n\t\t\t\t\tyield { fname : fn }", "def gen_functions_decl(self, functions):\n for node in functions:\n node.declgen = node.ast.gen_decl()", "def __def_function__():\n pass", "def get_functions():\n\n filenames = set()\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'functions')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n functions_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'functions')\n sys.path.append(functions_path)\n for file_ in os.listdir(functions_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n functions = {}\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n if hasattr(module, 'function_name'):\n functions[module.function_name] = module\n except ImportError:\n pass\n\n return functions", "def get_package_exports(package): \n fns = []\n for cls in package.classes:\n fn = get_class_decoder_function_name(cls)\n if fn not in fns:\n fns.append(fn)\n\n exports = ''\n for fn in fns:\n if len(exports) > 0:\n exports += ', ' + emit_line_return() + emit_indent()\n exports += '\\\"{0}\\\"'.format(fn)\n\n return exports", "def LookupAndFixupExports(self, names):\n for o, param in self.LookupExports(names):\n yield o, self._FixExportName(o.obj, param)", "def functions(self):\n return [v for v in self.globals.values()\n if isinstance(v, values.Function)]", "def __call__(fun_name):", "def visit_Module(self, node):\n self.generic_visit(node)\n return self.functions", "def _init_builtins(self):\n for k, rexp in self.expressions.items():\n func = getattr(self, \"%s_processor\"%k)()\n yield (rexp, [func] + self._extra_rules.get(k, []))", "def ls():\n for f in get_user_functions(globals()):\n print (str(f).split()[1])", "def getGlobalFunctions(self, name: unicode) -> List[ghidra.program.model.listing.Function]:\n ...", "def collect_functions(self):\n if not self.functions:\n for item in dir(self.file_import):\n new_function = getattr(self.file_import, item)\n # if it is a YMLMetadataCollector wrapper, add it to the list.\n if (\n callable(new_function)\n and isinstance(new_function, FunctionType)\n and \"YMLMetadataCollector\" in repr(new_function)\n ):\n self.functions.append(new_function)", "def funcs(self) -> {str:TFunc}:\n raise NotImplementedError()", "def find_functions(module):\n for attrname in dir(module):\n attr = getattr(module, attrname)\n # iteratively get __module__ or __class__ (where __module__ fails for clas\n if callable(attr) and getattr(attr, '__module__', getattr(attr, '__class__', '')) == module.__name__:\n yield attr", "def export_for_pydoc(self, module_globals):\n module_all = module_globals.setdefault(\"__all__\", [])\n for k, v in sorted(self.constants.items()):\n module_globals[k] = v\n module_all.append(k)\n for k, v in sorted(self.enums.items()):\n module_globals[k] = v\n module_all.append(k)\n for fname, (argtypes, argtuple, restype) in sorted(\n self.fundecls.items()):\n prototype = \"def {}{}: pass\".format(\n fname, inspect.formatargspec(argtuple._fields))\n d = {}\n exec(prototype, globals(), d)\n func = d[fname]\n for arg, argtype in zip(argtuple._fields, argtypes):\n func.__annotations__[arg] = argtype\n func.__annotations__[\"return\"] = restype\n module_globals[fname] = func\n module_all.append(fname)", "def functions(self):\n return functions(self.startEA, self.endEA)", "def get_fns(self):\n return self.key_to_fn" ]
[ "0.682521", "0.6562752", "0.64066714", "0.6176337", "0.61679244", "0.60143894", "0.5998909", "0.59507465", "0.5946031", "0.5929983", "0.5886861", "0.58813053", "0.5822374", "0.5819807", "0.5795701", "0.57572097", "0.5720245", "0.5719738", "0.5695228", "0.56891406", "0.5686481", "0.5657168", "0.5647249", "0.5629858", "0.56257164", "0.5609698", "0.5604936", "0.55967456", "0.55830854", "0.55801713" ]
0.66660017
1
Return a String object for the name at the given RVA
def _name(self, name_rva): return obj.Object("String", offset = self.obj_parent.DllBase + name_rva, vm = self.obj_native_vm, length = 128)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _name(self, name_rva):\n return obj.Object(\"String\",\n offset = self.obj_parent.DllBase + name_rva,\n vm = self.obj_native_vm, length = 128)", "def getAName(self, v):\n return self.avars[v]", "def get_name() -> str:", "def get_name():", "def getString(self, name: unicode) -> unicode:\n ...", "def get_name(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetName', self.handle)", "def name(self):\n return 'PM_VAR_RAS_Geoquimica'", "def get_name():\n return \"SVMd+ - simplified approach\"", "def getName(self, index) -> Str:\n ...", "def generate_vpt_title(radar, field):\n time_str = generate_radar_time_begin(radar).isoformat() + \"Z\"\n l1 = f\"{generate_radar_name(radar)} {time_str} \"\n field_name = generate_field_name(radar, field)\n return l1 + \"\\n\" + field_name", "def _get_vrf_label(self):\n return self.__vrf_label", "def get_volume_name(self, vid):\n return \"cv-{0}\".format(vid)", "def get_name():\n return \"SVMd+\"", "def getNameFromNimRti(rti):\n try:\n # sometimes there isn't a name field -- example enums\n return rti['name'].string(encoding=\"utf-8\", errors=\"ignore\")\n except:\n return None", "def printname(bruce):", "def roo_name ( prefix = 'roo_' , suffix = '' ) :\n regname = ROOT.RooNameReg.instance()\n name = prefix + suffix\n MakeVar.__numnames += 1 \n while name in MakeVar.__pdf_names or name in MakeVar.__var_names or regname.known ( name ) or not name :\n name = prefix + ''.join ( ( random.choice ( ascii_letters ) for i in range ( 6 ) ) ) + suffix \n MakeVar.__numnames += 1 \n return name", "def get_rt_name ( base_name, sub_name) :\n return base_name + '-' + sub_name +'-RT'", "def get_vip_resource_name(vip_name):\n return \"\".join([\"vip__\", vip_name])", "def name(self) -> str:\n\t\treturn self._raw_result['name']", "def get_name(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetName', self.handle)", "def get_rep_name(self, name):\n return \"r{0}\".format(name)", "def get_name(view, tid):\n\n kdv = get_kdv(view)\n name = kdv[tid].Name\n\n return name", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def get_vehicle_name(self, vehid):\n vehid_cond = SQLBinaryExpr(COL_NAME_VEHICLES_VEHICLEID, OP_EQ, SQLLiteral(vehid))\n entries = self.select_generic_data(select_list=[COL_NAME_VEHICLES_NAME],\n table_list=[TABLE_NAME_VEHICLES],\n where=vehid_cond)\n if len(entries) == 1:\n return entries[0][COL_NAME_VEHICLES_NAME]\n elif len(entries) > 1:\n raise AdasDBError(\"Vehicle ID '%s' cannot be resolved because it is ambiguous. (%s)\" % (vehid, entries))\n\n raise AdasDBError(\"No resolution of '%s'. (%s)\" % (vehid, entries))", "def get_name(self):", "def get_name(self):" ]
[ "0.7861681", "0.68811107", "0.60367334", "0.6008466", "0.5906894", "0.59004587", "0.58106697", "0.5782724", "0.5740362", "0.5736517", "0.57215333", "0.5647274", "0.56344426", "0.5604475", "0.56030816", "0.56015515", "0.55886394", "0.55614364", "0.55610186", "0.55606437", "0.55517954", "0.5516681", "0.5487255", "0.5487255", "0.5487255", "0.5487255", "0.5487255", "0.5474647", "0.546624", "0.546624" ]
0.7883464
0
Returns the name of the DLL for this IID
def dll_name(self): return self._name(self.Name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interface_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"interface_name\")", "def module_name(self):\n return self.lib.get_module_name()", "def libraryName(self):\n ret=\"\"\n if self.kind == \"lib\":\n ret = self.name + \"Engine\"\n elif self.kind == \"exe\":\n ret = self.name + \"Exelib\"\n else:\n raise Invalid(\"Invalid kind of component: %s. Supported kinds are 'lib' and 'exe'\" % self.name)\n return ret", "def interface_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"interface_name\")", "def _get_interface_name(self):\n return self.__interface_name", "def get_library_name(self, linker):\n return linker.get_library_name(self.__library.get_name())", "def get_name_with_lib(datablock):\r\n text = datablock.name\r\n if datablock.library:\r\n # text += ' (Lib: \"%s\")' % datablock.library.name\r\n text = \"L \" + text\r\n return text", "def module_name(self):\n return \"py{0:s}\".format(self.library_name[3:])", "def __GetLibFileName(cls, src, name):\n bin_path = FileUtils.GetBinPathForFile(src)\n return os.path.join(os.path.dirname(bin_path), '_%s.so' % name)", "def name(cls):\n return MODULE_NAME", "def filename(self):\n return self.hfile.GetName()", "def module_name(self):\n return self.name()", "def name(self):\n return self._modname", "def product_name(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n self._dll.JLINKARM_EMU_GetProductName(buf, self.MAX_BUF_SIZE)\n return ctypes.string_at(buf).decode()", "def name(self):\n module_filepath = inspect.getfile(type(self))\n module_filename = os.path.basename(module_filepath)\n command_name, _ = os.path.splitext(module_filename)\n return command_name", "def name_from_iid(self, iid):\n retVal = iid.replace(\"_IID\", \"\")\n retVal = retVal.replace(\"_\", \" \")\n return retVal", "def call_name(self):\n return str(self.executable.name)", "def plugin_name(self):\n return os.path.splitext(os.path.basename(os.path.abspath(inspect.getfile(self.__class__))))[0]", "def interface_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interface_name\")", "def module_name(self):\n return \"Infoservices\"", "def module_name(self):\n return self.name", "def get_library_name(name):\n suffix = get_sharedlib_suffix()\n if not is_windows() and name.startswith(\"lib\"):\n name = name[len(\"lib\"):]\n if suffix and name.endswith(suffix):\n name = name[:-len(suffix)]\n return name", "def modulename():\n from inspect import getmodulename,getfile\n return getmodulename(getfile(lambda x:x))", "def getName(self):\n return _libsbml.Port_getName(self)", "def get_name():\n return __name__", "def libraryName(self):\n return _osgAnimation.LinkVisitor_libraryName(self)", "def AssemblyName(self) -> str:", "def gethandlername(URL):\n match = re.search(\"/([a-zA-Z0-9_-]+)\\.prog($|/|\\?)\", URL)\n if not match:\n # Couldn't find the requested module\n raise404(\"Couldn't find a module name in URL \" + URL)\n return match.group(1)", "def get_name(self):\n bcname = _pychidg.f90wrap_get_name(self=self._handle)\n return bcname", "def libraryName(self):\n return _osgAnimation.VertexInfluenceMap_libraryName(self)" ]
[ "0.658748", "0.6539247", "0.6489708", "0.636311", "0.62957597", "0.62046534", "0.61847323", "0.61674047", "0.61405915", "0.6040135", "0.60379577", "0.60340816", "0.6029928", "0.5979551", "0.59447545", "0.5925962", "0.5914885", "0.5880614", "0.5879981", "0.5864137", "0.5864069", "0.58621705", "0.5817298", "0.57763773", "0.5774507", "0.5769711", "0.5698577", "0.5679867", "0.56427217", "0.5636061" ]
0.82039464
0
Generator for imported functions.
def _imported_functions(self): i = 0 while 1: thunk = obj.Object('_IMAGE_THUNK_DATA', offset = self.obj_parent.DllBase + self.OriginalFirstThunk + i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'), vm = self.obj_native_vm) # We've reached the end when the element is zero if thunk == None or thunk.AddressOfData == 0: break o = obj.NoneObject("Ordinal not accessible?") n = obj.NoneObject("Imported by ordinal?") f = obj.NoneObject("FirstThunk not accessible") # If the highest bit (32 for x86 and 64 for x64) is set, the function is # imported by ordinal and the lowest 16-bits contain the ordinal value. # Otherwise, the lowest bits (0-31 for x86 and 0-63 for x64) contain an # RVA to an _IMAGE_IMPORT_BY_NAME struct. if thunk.OrdinalBit == 1: o = thunk.Ordinal & 0xFFFF else: iibn = obj.Object("_IMAGE_IMPORT_BY_NAME", offset = self.obj_parent.DllBase + thunk.AddressOfData, vm = self.obj_native_vm) o = iibn.Hint n = iibn.Name # See if the import is bound (i.e. resolved) first_thunk = obj.Object('_IMAGE_THUNK_DATA', offset = self.obj_parent.DllBase + self.FirstThunk + i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'), vm = self.obj_native_vm) if first_thunk: f = first_thunk.Function.v() yield o, f, str(n or '') i += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_functions():\n\treturn [f for f in globals() if f.startswith('make_')]", "def collect_functions(self):\n if not self.functions:\n for item in dir(self.file_import):\n new_function = getattr(self.file_import, item)\n # if it is a YMLMetadataCollector wrapper, add it to the list.\n if (\n callable(new_function)\n and isinstance(new_function, FunctionType)\n and \"YMLMetadataCollector\" in repr(new_function)\n ):\n self.functions.append(new_function)", "def imports():\n for name, val in globals().items():\n if isinstance(val, getattr(types, \"ModuleType\")):\n yield val.__name__", "def instructions(self):\n for inst in self.global_insts[:]:\n yield inst\n for function in self.functions[:]:\n for inst in function.instructions():\n yield inst", "def _init_builtins(self):\n for k, rexp in self.expressions.items():\n func = getattr(self, \"%s_processor\"%k)()\n yield (rexp, [func] + self._extra_rules.get(k, []))", "def create_included_function_list():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n it = Iterable(chain(iteration_utilities._cfuncs.__dict__.items(),\n iteration_utilities._helpers._performance.__dict__.items(),\n iteration_utilities._recipes._core.__dict__.items(),\n iteration_utilities._recipes._additional.__dict__.items())\n # Exclude PY2 variable and private functions\n ).filterfalse(lambda x: x[0].startswith(('PY2', '_'))\n # Exclude everything that has no __module__\n ).filter(lambda x: hasattr(x[1], '__module__')\n # Only include functions that come from the package\n ).filter(lambda x: x[1].__module__.startswith('iteration_utilities')\n # Remove duplicate names\n ).unique_everseen(itemgetter(0)\n # Sort lexically\n ).get_sorted(key=lambda x: x[0].lower())\n\n it = Iterable(it\n # Create a Sphinx link from function name and module\n ).map(lambda i: ':py:func:`~{}.{}`'.format(i[1].__module__, i[0])\n # Group into 4s so we get a 4 column Table\n ).grouper(4, fillvalue=''\n # Convert to list because Table expects it.\n ).as_list()\n\n return '\\n'.join(RST().write(Table(rows=it)))", "def enaml_importer():\n print(imports, dir(imports))\n old = imports.get_importers()\n\n yield imports\n\n imports._imports__importers = old", "def imports(self):\n\n try:\n data_dir = self.import_dir()\n except ValueError, why:\n raise StopIteration(why)\n\n i = 0\n\n desc_size = self.obj_vm.profile.get_obj_size('_IMAGE_IMPORT_DESCRIPTOR')\n\n while 1:\n desc = obj.Object('_IMAGE_IMPORT_DESCRIPTOR',\n vm = self.obj_native_vm,\n offset = self.DllBase + data_dir.VirtualAddress + (i * desc_size),\n parent = self)\n\n # Stop if the IID is paged or all zeros\n if desc == None or desc.is_list_end():\n break\n\n # Stop if the IID contains invalid fields \n if not desc.valid(self._nt_header()):\n break\n\n dll_name = desc.dll_name()\n\n for o, f, n in desc._imported_functions():\n yield dll_name, o, f, n\n\n i += 1", "def _isolateImports(mf, f, *a, **kw):\n\n\n oldMetaPath = sys.meta_path\n oldPathHooks = sys.path_hooks\n _PEP302Mapper._oldSysModules = sys.modules.copy()\n oldImport = __builtin__.__import__\n #where is your god now?\n sys.path_hooks = []\n sys.modules.clear()\n sys.meta_path = [mf]\n __builtins__['__import__'] = mf.xocImport\n\n\n\n #stupid special case for the stdlib\n if mf.mapper.contains('warnings'):\n sys.modules['warnings'] = mf.mapper.lookup('warnings')\n\n try:\n return f(*a, **kw)\n finally:\n sys.meta_path = oldMetaPath\n sys.path_hooks = oldPathHooks\n sys.modules.clear()\n sys.modules.update(_PEP302Mapper._oldSysModules)\n __builtins__['__import__'] = oldImport", "def importer():\n pass", "def getAllImportFiles():\n\tdef get_path(base):\n\t\tb, t = os.path.split(base)\n\t\tif __name__ == t:\n\t\t\treturn [\"animation_nodes\"]\n\t\telse:\n\t\t\treturn get_path(b) + [t]\n\n\tfor root, dirs, files in os.walk(currentPath):\n\t\tpath = \".\".join(get_path(root))\n\t\tfor f in filter(lambda f:f.endswith(\".py\"), files):\n\t\t\tname = f[:-3]\n\t\t\tif not name == \"__init__\":\n\t\t\t\tyield path + \".\" + name", "def get_reader_funcs():\n return READERS", "def find_functions(module):\n for attrname in dir(module):\n attr = getattr(module, attrname)\n # iteratively get __module__ or __class__ (where __module__ fails for clas\n if callable(attr) and getattr(attr, '__module__', getattr(attr, '__class__', '')) == module.__name__:\n yield attr", "def caller():\n\n for func in funcs:\n func()", "def get_user_functions(table):\n for f in [(f) for f in table.values() if type(f) == types.FunctionType]:\n yield f", "def _get_import_addresses(runtime: 'Runtime',\n imports: Tuple[Import, ...]) -> Iterable[TAddress]:\n for import_ in imports:\n if not runtime.has_module(import_.module_name):\n raise Unlinkable(f\"Runtime has no known module named '{import_.module_name}'\")\n module = runtime.get_module(import_.module_name)\n for export in module.exports:\n if export.name == import_.as_name:\n yield export.value\n break\n else:\n raise Unlinkable(\n f\"No export found with name '{import_.as_name}'\"\n )", "def setUp(self):\n\n def import_hook(name, *args, **kwargs):\n if name == 'actstream':\n raise ImportError('test case module import failure')\n else:\n return self.original_imports(name, *args, **kwargs)\n\n self.original_imports = builtins.__import__\n builtins.__import__ = import_hook", "def _import_custom(self, custom_modules):\n for filter_module in custom_modules:\n info('Loading {}'.format(filter_module))\n funs = module_utils.get_all_functions(filter_module)\n for fun_name, fun in funs.items():\n if fun_name.startswith('function'):\n import_name = '_'.join(fun_name.split('_')[1:])\n debug('Adding function {}'.format(import_name))\n self._functions[import_name] = fun\n elif fun_name.startswith('filter'):\n import_name = '_'.join(fun_name.split('_')[1:])\n debug('Adding filter {}'.format(import_name))\n self._filters[import_name] = fun", "def FileIter(func_name):\n \n if func_name == 'convert_pmids_to_pmcs':\n sdir = partial(os.path.join,'Data', 'SearchResults')\n pmc_file = os.path.join('Data', 'PMC-ids.csv')\n files = [x for x in os.listdir(sdir('')) if x.endswith('.res')]\n for f in files:\n yield (sdir(f), pmc_file), sdir(f+'.conv')\n\n elif func_name == 'search_pubmed':\n sdir = partial(os.path.join,'Data', 'SearchResults')\n queryfile = os.path.join('Data', 'QueryList.txt')\n with open(queryfile) as handle:\n for row in csv.DictReader(handle):\n fname = '%s--%s.res' % (GeneralUtils.slugify(row['org']), \n GeneralUtils.slugify(row['search']))\n ofile = sdir(fname)\n yield queryfile, ofile, row['search']\n\n elif func_name == 'download_pmids':\n \n sdir = partial(os.path.join,'Data', 'SearchResults')\n odir = os.path.join('Data', 'RawXML')\n files = [x for x in os.listdir(sdir('')) if x.endswith('.conv')]\n \n for f in files:\n yield sdir(f), sdir(f+'.dl'), odir\n\n elif func_name == 'extract_text':\n \n sdir = partial(os.path.join, 'Data', 'RawXML')\n odir = partial(os.path.join, 'Data', 'SentenceFiles')\n\n files = sorted([x for x in os.listdir(sdir('')) if x.endswith('.xml')])\n for f in files:\n name = f.split('.')[0]\n if f.startswith('PMC'):\n typ = 'pmc'\n else:\n typ = 'pubmed'\n\n yield sdir(f), odir(name+'.sent'), typ\n\n elif func_name == 'get_mutations':\n \n sdir = partial(os.path.join, 'Data', 'SentenceFiles')\n odir = partial(os.path.join, 'Data', 'MutFiles')\n finder = None#mutfinder_gen('regex.txt')\n\n files = sorted([x for x in os.listdir(sdir('')) if x.endswith('.sent')])\n\n for f in files:\n name = f.split('.')[0]\n yield sdir(f), odir(name + '.mut')\n \n elif func_name == 'process_mut_file':\n \n sdir = partial(os.path.join, 'Data', 'MutFiles')\n odir = partial(os.path.join, 'Data', 'ProteinFiles')\n\n files = sorted([x for x in os.listdir(sdir('')) if x.endswith('.mut')])\n\n for f in files:\n name = f.split('.')[0]\n yield sdir(f), (odir(name + '.prot'), odir(name + '.sen'))\n elif func_name == 'mapping_files':\n path = 'Data/Mapping/'\n items = (('ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/idmapping.dat.gz', 'idmapping.dat.sort'),\n ('ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene_info.gz', 'gene_info'),\n ('ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/PMC-ids.csv.gz', 'PMC-ids.csv'),\n ('ftp://nlmpubs.nlm.nih.gov/online/mesh/.asciimesh/d2011.bin', 'd2011.bin'))\n for url, ofile in items:\n yield None, os.path.join(path, ofile), url, path", "def main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', help='Input .py file', nargs='+')\n args = parser.parse_args()\n\n mod_func = []\n\n for pyfile in args.input:\n tree = ast.parse(open(pyfile).read())\n\n methods = sorted({node.name for node in ast.walk(tree)\n if isinstance(node, ast.FunctionDef)})\n mod_func.extend([[pyfile, methods[i]] for i in range(len(methods))])\n\n write_csv(\"meth_func.csv\", mod_func)", "def all_decorated_module_functions( this, module, exclude_methods = False, exclude_functions = False) :\n\t\tmodule_names = []\n\t\tfor el in dir( module) :\n\t\t\tfn = module.__dict__.get( el)\n\n\t\t\t# lookup for functions\n\t\t\tif not exclude_functions and type( fn) in [types.FunctionType, staticmethod, classmethod] :\n\t\t\t\tfn = this._getfn( fn)\n\t\t\t\tif len( this.get_decorators( fn)) > 0 :\n\t\t\t\t\tfname = fn.__annotations__[this.NATIVE_FUNCTION].__name__\n\t\t\t\t\tif fname not in module_names :\n\t\t\t\t\t\tyield { fname : module.__dict__.get( fname) }\n\t\t\t\t\t\tmodule_names += [fname]\n\t\t\t\n\t\t\t# lookup for class methods\n\t\t\tif not exclude_methods and type( fn) is type :\n\t\t\t\tfor cls_el in dir( fn) :\n\t\t\t\t\tmethod = fn.__dict__.get( cls_el)\n\t\t\t\t\tif type( method) in [types.FunctionType, staticmethod, classmethod] :\n\t\t\t\t\t\tmethod = this._getfn( method)\n\t\t\t\t\t\tif len( this.get_decorators( method)) > 0:\n\t\t\t\t\t\t\tfname = method.__annotations__[this.NATIVE_FUNCTION].__name__\n\t\t\t\t\t\t\tif fname not in module_names :\n\t\t\t\t\t\t\t\tyield { \"%s.%s\" %(fn.__name__, fname) : fn.__dict__.get( fname) }\n\t\t\t\t\t\t\t\tmodule_names += [fname]", "def iter_spider_classes(module):\n ...", "def module_functions_decorated_with( this, module, decorator, exclude_methods = False, exclude_functions = False) :\n\t\tfor mfn in this.all_decorated_module_functions( module, exclude_methods, exclude_functions) :\n\t\t\tfor fname, fn in mfn.items() :\n\t\t\t\tif decorator in this.get_decorators( fn) :\n\t\t\t\t\tyield { fname : fn }", "def ls():\n for f in get_user_functions(globals()):\n print (str(f).split()[1])", "def bunch__functions(idfobject): \n funcdct = idfobject.__functions\n funcsresults = [(key, funcdct[key](idfobject)) for key in funcdct.keys()]\n return funcsresults", "def all_registered_modules():\n yield from iterchain(modules.values() for modules in Registry.monomers.values())", "def visit_Module(self, node):\n self.generic_visit(node)\n return self.functions", "def modules():", "def test_import_string():\n tests = [\n 'virtstrap.commands',\n 'virtstrap.options'\n ]\n for string in tests:\n yield import_a_string, string", "def iter_import_chunks(self):\r\n chunk = []\r\n last_line = None\r\n for leaf in self.python_file.tree.body:\r\n if isinstance(leaf, (ast.Import, ast.ImportFrom)):\r\n # we've seen previous imports but this import is not in the same chunk\r\n if last_line and leaf.lineno != last_line[1]:\r\n yield chunk\r\n chunk = [leaf]\r\n # we've either not seen previous imports or this is part of the same chunk\r\n elif not last_line or last_line and leaf.lineno == last_line[1]:\r\n chunk.append(leaf)\r\n last_line = self.python_file.logical_lines[leaf.lineno]\r\n if chunk:\r\n yield chunk" ]
[ "0.6609343", "0.6466236", "0.6398134", "0.62762254", "0.619583", "0.6191924", "0.613506", "0.6125171", "0.6005815", "0.59317636", "0.5931635", "0.5896827", "0.582619", "0.58228326", "0.57856643", "0.57601804", "0.575943", "0.5733376", "0.5729741", "0.5694585", "0.5692064", "0.56764007", "0.567436", "0.5669576", "0.56679827", "0.5655817", "0.56467235", "0.5630116", "0.56256044", "0.56119597" ]
0.6907061
0
Return the _IMAGE_NT_HEADERS object
def _nt_header(self): try: dos_header = obj.Object("_IMAGE_DOS_HEADER", offset = self.DllBase, vm = self.obj_native_vm) return dos_header.get_nt_header() except ValueError: return obj.NoneObject("Failed initial sanity checks") except exceptions.SanityCheckException: return obj.NoneObject("Failed initial sanity checks. Try -u or --unsafe")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_nt_header(self):\n\n if self.e_magic != 0x5a4d:\n raise ValueError('e_magic {0:04X} is not a valid DOS signature.'.format(self.e_magic))\n\n nt_header = obj.Object(\"_IMAGE_NT_HEADERS\",\n offset = self.e_lfanew + self.obj_offset,\n vm = self.obj_vm,\n native_vm = self.obj_native_vm)\n\n if nt_header.Signature != 0x4550:\n raise ValueError('NT header signature {0:04X} is not a valid'.format(nt_header.Signature))\n\n return nt_header", "def get_headers(self):\n return self.numHeadList", "def get_headers(self, ):\n return self.attrs.get(self.AttributeNames.HEADERS, None)", "def GetHeaders(the_file):\n\n data = exifread.process_file(the_file, 'UNDEF', False, False, False)\n return data", "def get_headers(self):\n \n return self.headers", "def get_http_headers(self):\n return dict(self.headers)", "def get_headers(self):\r\n raise NotImplementedError", "def _image_hdr(self, hdr):\n # Called ... in OpenMIMS\n d = {}\n d['header size'], d['type'], d['width'], d['height'], \\\n d['bytes per pixel'], d['masses'], d['planes'], \\\n d['raster'], d['original filename'] = \\\n unpack(self._bo + 'i 6h i 64s', hdr.read(84))\n\n # Called nickname in OpenMIMS\n d['original filename'] = self._cleanup_string(d['original filename'])\n if d['header size'] != 84:\n raise ValueError(\"Image header size is {}, not 84.\".format(d['header size']))\n return d", "def getAllHeaders():", "def getheaders(self):\n return self.__headers", "def __get_headers(self):\n\n return {}", "def getHeaders(self):\n hd = {}\n line = self.conn.readline()\n while line != \"\\r\\n\":\n print \":\"+line+\":\"+\" len = \",len(line)\n key,value = line.split(':',1)\n hd[key] = value.rstrip()\n line = self.conn.readline()\n return hd", "def headers(self):\n return self._header", "def header(self):\n header_str = self._base[0:self.s_allocator_header].tostring()\n magic, pos, used = struct.unpack(str('III'), header_str)\n\n assert magic == self._magic_num, \\\n 'invalid header magic[%d] in shared memory' % (magic)\n return self._header_pages, self._total_pages, pos, used", "def headers(self):\n return self.generator.headers", "def headers(self):\n return(self.__response.headers)", "def getHeader():\n return _HEADER", "def getheaders(self):\n return self.urllib3_response.getheaders()", "def getheaders(self):\n return self.urllib3_response.getheaders()", "def getheaders(self):\n return self.urllib3_response.getheaders()", "def headers(self):\n\n return None", "def headers(self):\r\n return dict(**self._get_headers())", "def read_headers(filelike):\n return reader.Reader.read_headers(filelike).datafile", "def image_header(self):\n\n if not self._image_header:\n path_image_header = os.path.join(\n self._path, f\"ImageSet_{self._image['ImageSetID']}.header\"\n )\n\n # Make sure the ImageInfo file really exists\n if not os.path.exists(path_image_header):\n self.logger.warning(\n \"ImageHeader path doesn't exist: %s\", path_image_header\n )\n return None\n\n self.logger.debug(\"Reading image data from: %s\", path_image_header)\n self._image_header = {}\n with open(path_image_header) as f:\n for line in f:\n parts = line.split(\" = \")\n\n if len(parts) < 2:\n parts = line.split(\" : \")\n\n if len(parts) > 1:\n self._image_header[parts[0].strip()] = (\n parts[1].replace(\";\", \"\").replace(\"\\n\", \"\")\n )\n\n return self._image_header", "def fusion_api_get_headers(self):\n return self.fusion_client._headers.copy()", "def getHeaders(self):\n return [\"Temp\"]", "def get_header(self):\n return self._header", "def get_headers(self):\n return ['dep_red', 'dep_sd', 'hyp_red', 'hyp_sd']", "def get_private_headers(self):\n if self._private_headers is None:\n with open(self.headername, 'r') as headerfile:\n included = re.findall(r'#include \"(.*)\\.h\"', headerfile.read())\n self._private_headers = list(included)\n return self._private_headers", "def headers(self) -> dict:\n raise NotImplementedError # pragma: no cover" ]
[ "0.77482045", "0.65472335", "0.62982917", "0.629468", "0.6281683", "0.6223163", "0.6189997", "0.61775583", "0.6140071", "0.613318", "0.6096307", "0.6025481", "0.5988569", "0.59720755", "0.5940392", "0.5910091", "0.5899667", "0.58247423", "0.58247423", "0.58247423", "0.5807061", "0.57864684", "0.57754666", "0.57698125", "0.5761981", "0.56893474", "0.5677138", "0.56539327", "0.5651349", "0.56361437" ]
0.7455477
1
Return the IMAGE_DEBUG_DIRECTORY for debug info
def debug_dir(self): return self._directory(6) # IMAGE_DEBUG_DIRECTORY
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_debug_directory(self):\n \n try:\n data_dir = self.debug_dir()\n except ValueError, why:\n return obj.NoneObject(str(why))\n\n return obj.Object(\"_IMAGE_DEBUG_DIRECTORY\", \n offset = self.DllBase + data_dir.VirtualAddress, \n vm = self.obj_native_vm)", "def getDebugDirectory(self) -> ghidra.app.util.bin.format.pe.debug.DebugDirectory:\n ...", "def setup_debug(debug_flag):\n parent_path = os.getcwd() + '/'\n if debug_flag:\n return(parent_path + 'debug_data/')\n else:\n return(parent_path)", "def img_dir(self):\n try:\n return dirname(self.img_files[0])\n except:\n return \"Not available\"", "def get_image_dir(self):\n return self.img_dir", "def get_debug():\n return _DEBUG", "def debug_filename(pe):\n if hasattr(pe, 'DIRECTORY_ENTRY_DEBUG'):\n for i in pe.DIRECTORY_ENTRY_DEBUG:\n if hasattr(i.entry, 'PdbFileName'):\n return i.entry.PdbFileName.decode('utf-8', 'ignore')\n return None", "def get_image_dir():\n directory = os.path.abspath(os.path.dirname(__file__))\n directory = os.path.join(directory, 'images')\n return directory", "def _debug():\n return _DEBUG", "def environmentImagesPath():\n # A recursion counter to make sure that the loop ends.\n count = 0\n # Get the path to the Blender executable.\n filePath = os.path.dirname(bpy.app.binary_path)\n # Find the lowest path level which contains Blender.\n while \"blender\" not in os.path.basename(filePath).lower():\n filePath = os.path.dirname(filePath)\n if not filePath or count == 20:\n break\n count += 1\n\n # Search all subpaths for the datafiles folder. Based on this folder\n # the path can be completed.\n for dirPath, dirs, fileList in os.walk(filePath):\n if os.path.basename(dirPath) == \"datafiles\":\n return os.path.join(os.path.join(dirPath, \"studiolights\"), \"world\")", "def GetSystemImageDir(sys_dir=None):\n if sys_dir:\n return _GetFilePath(sys_dir)\n return _GetFilePath(FLAGS.system_image_dir)", "def dockerfile_dir(self):\n return self._dockerfile_dir", "def find_diagnostics_dir(cube, image):\n return find_subdir(cube, image, 'diagnostics')", "def _image_location(image_info):\n return os.path.join(tempfile.gettempdir(), image_info['id'])", "def imagePath(self):\n if self.use_dic:\n if self.imlist:\n paths = []\n for img in self.allimgs:\n paths.append(join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(img)+'.jpg'))\n return paths\n else:\n path = join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(self.img)+'.jpg')\n else:\n path = self.img\n return path", "def get_dir_path():\n return DIR_PATH", "def debug(self):\n return Config.DEBUG", "def get_packaging_pictures_path(self):\n file_path = os.path.dirname(__file__)\n file_path = os.path.join(file_path, \"Packaging\")\n return file_path", "def GetPath () :\n return sys.hal_log_values [\"__log_path\"]", "def log_directory(self):\n\n return self.get_raw(\"log_directory\")", "def get_pathname(self):\n return self.image_data.path", "def instance_dir(self):\n\t\treturn os.path.join(self.basedir, self.yml['instdir'])", "def getImagePath():\n currentPath = os.path.dirname(__file__)\n resourcesPath = os.path.join(currentPath, \"Resources\")\n imagesPath = os.path.join(resourcesPath, \"Images\")\n return imagesPath", "def getType(self):\n return consts.IMAGE_DATA_DIRECTORY", "def get_images_dir(args):\n if args.install_location:\n return args.install_location\n if os.environ.get(\"UHD_IMAGES_DIR\"):\n log(\"DEBUG\",\n \"UHD_IMAGES_DIR environment variable is set, using to set \"\n \"install location.\")\n return os.environ.get(\"UHD_IMAGES_DIR\")\n return _DEFAULT_INSTALL_PATH", "def info_directory(self) -> Optional[str]:\n raise NotImplementedError()", "def debug():\n return int(DEBUG)", "def process_image_debug(self, image):\n return self.process_image(image, debug=True)", "def debug(self):\n return self.settings['debug']", "def get_media_directory():\n\treturn _paths[_MEDIA_DIRECTORY_KEY]" ]
[ "0.8450183", "0.7843698", "0.6724272", "0.6555663", "0.6499181", "0.6438066", "0.6284945", "0.6233516", "0.6100424", "0.60697454", "0.5912636", "0.5892272", "0.588483", "0.58785224", "0.58783144", "0.5827344", "0.58154136", "0.57860655", "0.57479817", "0.57219386", "0.5719696", "0.5709489", "0.57065463", "0.5705651", "0.57015866", "0.5675109", "0.5659467", "0.5642595", "0.56307316", "0.5614941" ]
0.9076247
0
Return the debug directory object for this PE
def get_debug_directory(self): try: data_dir = self.debug_dir() except ValueError, why: return obj.NoneObject(str(why)) return obj.Object("_IMAGE_DEBUG_DIRECTORY", offset = self.DllBase + data_dir.VirtualAddress, vm = self.obj_native_vm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDebugDirectory(self) -> ghidra.app.util.bin.format.pe.debug.DebugDirectory:\n ...", "def debug_dir(self):\n return self._directory(6) # IMAGE_DEBUG_DIRECTORY", "def debug_filename(pe):\n if hasattr(pe, 'DIRECTORY_ENTRY_DEBUG'):\n for i in pe.DIRECTORY_ENTRY_DEBUG:\n if hasattr(i.entry, 'PdbFileName'):\n return i.entry.PdbFileName.decode('utf-8', 'ignore')\n return None", "def getDir( self ):\n return self.dir", "def get_directory(self):\n return self.directory", "def get_dir(self):\n return self.dir", "def setup_debug(debug_flag):\n parent_path = os.getcwd() + '/'\n if debug_flag:\n return(parent_path + 'debug_data/')\n else:\n return(parent_path)", "def dir(self):\n return self._dir", "def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath", "def getDesDir(self):\n if self.desdir == None:\n cwd = os.getcwd()\n self.desdir = os.path.join(cwd, self.args.dest)\n return self.desdir", "def path(self):\n return self._dir_entry.path", "def GetPath(self):\r\n\r\n return self.directory", "def getDirectory(self):\n logger.debug(\"WarningSc.getDirectory\")\n self.directory = self.gui.get_object(\"dirname\").get_text().strip()\n return self.directory", "def getCoreDir(thisDir):\n coreFolder = str(CoreDirectory())\n # print(('coreDirectory: %s'% coreFolder))\n #the stuff testing for existance of natlinkmain.py etc has been removed, no\n #longer required.\n return coreFolder", "def get_standard_directory(self):\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')", "def dir(self):\n return os.path.dirname(self.path)", "def dir(self):\n if not self.on:\n return None\n try:\n return self._dir\n except AttributeError:\n pass\n try:\n parentdir = self._parent.dir\n ownername = self._ownername\n except AttributeError:\n return None\n if parentdir is None:\n return None\n return os.path.join(parentdir, ownername)", "def export_dir(self):\n return self._directory(0) # DIRECTORY_ENTRY_EXPORT", "def __enter__(self):\n dirpath_bytes = tempfile.mkdtemp()\n self.dirpath = str(dirpath_bytes.replace('\\\\', '\\\\\\\\'))\n return self.dirpath", "def get(self):\n return self.directory_name", "def dicomDir(self):\n return self.__dicomDir", "def pdb_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\", \"pdb\")", "def path(self):\n return self._container_dir", "def dirpath(self):\n return self.__edir", "def getDireito(self):\n return self.__direito", "def projectDir(self):\n logger.debug(\"Func: projectDir/getter\")\n return self._pathsDict[\"projectDir\"]", "def get_dir_path():\n return DIR_PATH", "def data_dir(self):\r\n return self._data_dir", "def proof_dir(self):\n return self.dir", "def project_directory(self):\n\n # try to figure it out from the maps\n # search for Project path\n\n project_dir = None\n maps = self.comp_prefs['Paths'].get('Map', None)\n if maps:\n project_dir = maps.get('Project:', None)\n\n #if not project_dir:\n # # set the map for the project dir\n # if self.version:\n # project_dir = os.path.dirname(self.version.absolute_path)\n # self.project_directory = project_dir\n\n return project_dir" ]
[ "0.851801", "0.78498775", "0.666689", "0.6380202", "0.63746494", "0.6281661", "0.6262398", "0.62353474", "0.6201577", "0.6109504", "0.6097475", "0.60705024", "0.59828883", "0.5972983", "0.5886844", "0.5879197", "0.5876597", "0.5818916", "0.5818242", "0.5777866", "0.5771645", "0.5761484", "0.57482845", "0.57433414", "0.57426214", "0.57173187", "0.56998104", "0.567243", "0.5669295", "0.56600106" ]
0.8507757
1
Generator for the PE's imported functions. The _DIRECTORY_ENTRY_IMPORT.VirtualAddress points to an array of _IMAGE_IMPORT_DESCRIPTOR structures. The end is reached when the IID structure is all zeros.
def imports(self): try: data_dir = self.import_dir() except ValueError, why: raise StopIteration(why) i = 0 desc_size = self.obj_vm.profile.get_obj_size('_IMAGE_IMPORT_DESCRIPTOR') while 1: desc = obj.Object('_IMAGE_IMPORT_DESCRIPTOR', vm = self.obj_native_vm, offset = self.DllBase + data_dir.VirtualAddress + (i * desc_size), parent = self) # Stop if the IID is paged or all zeros if desc == None or desc.is_list_end(): break # Stop if the IID contains invalid fields if not desc.valid(self._nt_header()): break dll_name = desc.dll_name() for o, f, n in desc._imported_functions(): yield dll_name, o, f, n i += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _imported_functions(self):\n\n i = 0\n while 1:\n thunk = obj.Object('_IMAGE_THUNK_DATA',\n offset = self.obj_parent.DllBase + self.OriginalFirstThunk +\n i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'),\n vm = self.obj_native_vm)\n\n # We've reached the end when the element is zero \n if thunk == None or thunk.AddressOfData == 0:\n break\n\n o = obj.NoneObject(\"Ordinal not accessible?\")\n n = obj.NoneObject(\"Imported by ordinal?\")\n f = obj.NoneObject(\"FirstThunk not accessible\")\n\n # If the highest bit (32 for x86 and 64 for x64) is set, the function is \n # imported by ordinal and the lowest 16-bits contain the ordinal value. \n # Otherwise, the lowest bits (0-31 for x86 and 0-63 for x64) contain an \n # RVA to an _IMAGE_IMPORT_BY_NAME struct. \n if thunk.OrdinalBit == 1:\n o = thunk.Ordinal & 0xFFFF\n else:\n iibn = obj.Object(\"_IMAGE_IMPORT_BY_NAME\",\n offset = self.obj_parent.DllBase +\n thunk.AddressOfData,\n vm = self.obj_native_vm)\n o = iibn.Hint\n n = iibn.Name\n\n # See if the import is bound (i.e. resolved)\n first_thunk = obj.Object('_IMAGE_THUNK_DATA',\n offset = self.obj_parent.DllBase + self.FirstThunk +\n i * self.obj_vm.profile.get_obj_size('_IMAGE_THUNK_DATA'),\n vm = self.obj_native_vm)\n if first_thunk:\n f = first_thunk.Function.v()\n\n yield o, f, str(n or '')\n i += 1", "def _exported_functions(self):\n\n mod_base = self.obj_parent.DllBase\n exp_dir = self.obj_parent.export_dir()\n\n # PE files with a large number of functions will have arrays\n # that spans multiple pages. Thus the first entries may be valid, \n # last entries may be valid, but middle entries may be invalid\n # (paged). In the various checks below, we test for None (paged)\n # and zero (non-paged but invalid RVA). \n\n # Array of RVAs to function code \n address_of_functions = obj.Object('Array',\n offset = mod_base + self.AddressOfFunctions,\n targetType = 'unsigned int',\n count = self.NumberOfFunctions,\n vm = self.obj_native_vm)\n # Array of RVAs to function names \n address_of_names = obj.Object('Array',\n offset = mod_base + self.AddressOfNames,\n targetType = 'unsigned int',\n count = self.NumberOfNames,\n vm = self.obj_native_vm)\n # Array of RVAs to function ordinals \n address_of_name_ordinals = obj.Object('Array',\n offset = mod_base + self.AddressOfNameOrdinals,\n targetType = 'unsigned short',\n count = self.NumberOfNames,\n vm = self.obj_native_vm)\n\n # When functions are exported by Name, it will increase\n # NumberOfNames by 1 and NumberOfFunctions by 1. When \n # functions are exported by Ordinal, only the NumberOfFunctions\n # will increase. First we enum functions exported by Name \n # and track their corresponding Ordinals, so that when we enum\n # functions exported by Ordinal only, we don't duplicate. \n\n seen_ordinals = []\n\n # Handle functions exported by name *and* ordinal \n for i in range(self.NumberOfNames):\n\n name_rva = address_of_names[i]\n ordinal = address_of_name_ordinals[i]\n\n if name_rva in (0, None):\n continue\n\n # Check the sanity of ordinal values before using it as an index\n if ordinal == None or ordinal >= self.NumberOfFunctions:\n continue\n\n func_rva = address_of_functions[ordinal]\n\n if func_rva in (0, None):\n continue\n\n # Handle forwarded exports. If the function's RVA is inside the exports \n # section (as given by the VirtualAddress and Size fields in the \n # DataDirectory), the symbol is forwarded. Return the name of the \n # forwarded function and None as the function address. \n\n if (func_rva >= exp_dir.VirtualAddress and\n func_rva < exp_dir.VirtualAddress + exp_dir.Size):\n n = self._name(func_rva)\n f = obj.NoneObject(\"Ordinal function {0} in module {1} forwards to {2}\".format(\n ordinal, str(self.obj_parent.BaseDllName or ''), n))\n else:\n n = self._name(name_rva)\n f = func_rva\n\n # Add the ordinal base and save it \n ordinal += self.Base\n seen_ordinals.append(ordinal)\n\n yield ordinal, f, n\n\n # Handle functions exported by ordinal only \n for i in range(self.NumberOfFunctions):\n\n ordinal = self.Base + i\n\n # Skip functions already enumberated above \n if ordinal not in seen_ordinals:\n\n func_rva = address_of_functions[i]\n\n if func_rva in (0, None):\n continue\n\n seen_ordinals.append(ordinal)\n\n # There is no name RVA \n yield ordinal, func_rva, obj.NoneObject(\"Name RVA not accessible\")", "def _get_import_addresses(runtime: 'Runtime',\n imports: Tuple[Import, ...]) -> Iterable[TAddress]:\n for import_ in imports:\n if not runtime.has_module(import_.module_name):\n raise Unlinkable(f\"Runtime has no known module named '{import_.module_name}'\")\n module = runtime.get_module(import_.module_name)\n for export in module.exports:\n if export.name == import_.as_name:\n yield export.value\n break\n else:\n raise Unlinkable(\n f\"No export found with name '{import_.as_name}'\"\n )", "def gen_new_import_lookup_table(self, fn_name, dll_name):\n # TODO : currently, this method modify import lookup table directly,\n # it must be abstract.\n name = self.import_entries[-1].dll\n name_rva = self.import_entries[-1].struct.Name\n next_ilt_rva = name_rva + len(name) + 1\n fn_name = '\\x00' + fn_name\n self.PE.set_bytes_at_rva(next_ilt_rva, fn_name)\n dll_name = '\\x00' + dll_name + '\\x00'\n dll_rva = next_ilt_rva + len(dll_name)\n self.PE.set_bytes_at_rva(dll_rva, dll_name)\n return next_ilt_rva, dll_rva + 1", "def getImportTableData(self):\n\n def imp_cb(ea, name, ord):\n \"\"\"\n Import enumeration callback function. used by idaapi.enum_import_names .\n \"\"\"\n\n tmpImports.append([self.current_module_name, ea, name, ord])\n return True\n\n # Contains static import table data (w\\o real function addresses)\n tmpImports = []\n imp_num = idaapi.get_import_module_qty() # Number of imported modules\n\n for i in xrange(0, imp_num):\n self.current_module_name = idaapi.get_import_module_name(i).lower()\n idaapi.enum_import_names(i, imp_cb)\n\n # Get runtime function addresses and store in self.rt_import_table\n if not idaapi.is_debugger_on():\n raise RuntimeError(\"Debugger is not currently active.\")\n\n for module_name, ea, name, ord in tmpImports:\n func_real_adrs = get_adrs_mem(ea)\n self.rt_import_table[func_real_adrs] = (module_name, ea, name, ord)", "def exports(self):\n\n try:\n data_dir = self.export_dir()\n except ValueError, why:\n raise StopIteration(why)\n\n expdir = obj.Object('_IMAGE_EXPORT_DIRECTORY',\n offset = self.DllBase + data_dir.VirtualAddress,\n vm = self.obj_native_vm,\n parent = self)\n\n if expdir.valid(self._nt_header()):\n # Ordinal, Function RVA, and Name Object \n for o, f, n in expdir._exported_functions():\n yield o, f, n", "def get_imported_endpoints(self):\n with self.__import_lock:\n return [reg.get_import_reference() for reg in self.__imported_regs]", "def add_function_to_import(self, dll_import_descriptor, dll_name, fn_name):\n\n # TODO : Currently, only the functions in the list are supported.\n ordinal = self.get_ordinal_from_common_library(dll_name, fn_name)\n if ordinal == 0:\n print(\"not supported yet.\")\n exit()\n\n ordinal += self._ORDINAL_MASK_\n thunk = self.pe_manager.gen_new_thunk(ordinal)\n last_import_thunk_offset = self.get_last_import_thunk_offset()\n print(\"IMPORT THUNK OFFSET : {:x}\".format(last_import_thunk_offset))\n print(\"IMPORT THUNK RVA : {:x}\".format(\n self.PE.get_rva_from_offset(last_import_thunk_offset)\n ))\n thunk.set_file_offset(last_import_thunk_offset + 4)\n self.append_import_thunk_to_descriptor(dll_import_descriptor, thunk)", "def _getImports_pe(pth):\n dlls = set()\n # By default library pefile parses all PE information.\n # We are only interested in the list of dependent dlls.\n # Performance is improved by reading only needed information.\n # https://code.google.com/p/pefile/wiki/UsageExamples\n\n pe = pefile.PE(pth, fast_load=True)\n pe.parse_data_directories(directories=[\n pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],\n pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'],\n ],\n forwarded_exports_only=True,\n import_dllnames_only=True,\n )\n\n # Some libraries have no other binary dependencies. Use empty list\n # in that case. Otherwise pefile would return None.\n # e.g. C:\\windows\\system32\\kernel32.dll on Wine\n for entry in getattr(pe, 'DIRECTORY_ENTRY_IMPORT', []):\n dll_str = winutils.convert_dll_name_to_str(entry.dll)\n dlls.add(dll_str)\n\n # We must also read the exports table to find forwarded symbols:\n # http://blogs.msdn.com/b/oldnewthing/archive/2006/07/19/671238.aspx\n exportSymbols = getattr(pe, 'DIRECTORY_ENTRY_EXPORT', None)\n if exportSymbols:\n for sym in exportSymbols.symbols:\n if sym.forwarder is not None:\n # sym.forwarder is a bytes object. Convert it to a string.\n forwarder = winutils.convert_dll_name_to_str(sym.forwarder)\n # sym.forwarder is for example 'KERNEL32.EnterCriticalSection'\n dll = forwarder.split('.')[0]\n dlls.add(dll + \".dll\")\n\n pe.close()\n return dlls", "def gen_new_lib(self, target_pe, filter=lambda _: True):\n\n new_lib = []\n for lib_name, ad in self.name2off.items():\n # Build an IMAGE_IMPORT_DESCRIPTOR\n\n # Get fixed addresses\n out_ads = dict() # addr -> func_name\n for func_name, dst_addresses in self.lib_imp2dstad[ad].items():\n out_ads.update({addr:func_name for addr in dst_addresses})\n\n # Filter available addresses according to @filter\n all_ads = [addr for addr in out_ads.keys() if filter(addr)]\n log.debug('ads: %s' % map(hex, all_ads))\n if not all_ads:\n continue\n\n # Keep non-NULL elements\n all_ads.sort()\n for i, x in enumerate(all_ads):\n if x not in [0, None]:\n break\n all_ads = all_ads[i:]\n\n while all_ads:\n # Find libname's Import Address Table\n othunk = all_ads[0]\n i = 0\n while i + 1 < len(all_ads) and all_ads[i] + 4 == all_ads[i + 1]:\n i += 1\n # 'i + 1' is IAT's length\n\n # Effectively build an IMAGE_IMPORT_DESCRIPTOR\n funcs = [out_ads[addr] for addr in all_ads[:i + 1]]\n try:\n rva = target_pe.virt2rva(othunk)\n except pe.InvalidOffset:\n pass\n else:\n new_lib.append(({\"name\": lib_name,\n \"firstthunk\": rva},\n funcs)\n )\n\n # Update elements to handle\n all_ads = all_ads[i + 1:]\n\n return new_lib", "def build_import_table(self):\n self.section_alignment = 4096\n pe = pefile.PE(data=self.mem_dump.dump)\n IAT_addr = pe.OPTIONAL_HEADER.DATA_DIRECTORY[1].VirtualAddress\n print 'IAT_addr: ' + hex(IAT_addr)\n raw_size = self.import_table.raw_memory_size()\n jump_table_size = self.import_table.get_jump_table_size()\n\n print \"raw_size: \", `raw_size`\n print \"jump table size: \", `jump_table_size`\n\n size = ((raw_size+jump_table_size) / self.section_alignment + 1) * self.section_alignment\n print size\n byte_array = bytearray(size)\n IID_pos = 0\n content_pos = (len(self.import_table.dlls) + 1) * 20\n\n jump_table_pos = IID_pos + raw_size\n\n print \"jump table position: \", hex(jump_table_pos)\n\n for dll in self.import_table.dlls:\n print dll\n thunk_position = content_pos + len(dll) + 1\n byte_array[IID_pos + 12:IID_pos + 16] = pack('<L', content_pos + IAT_addr)\n byte_array[IID_pos + 16:IID_pos + 20] = pack('<L', thunk_position + IAT_addr)\n\n print 'Dll position: ', content_pos\n byte_array[content_pos:content_pos + len(dll)] = dll\n names_pos = thunk_position + (len(self.import_table.dlls[dll]) + 1) * 4\n\n for function_name in self.import_table.dlls[dll]:\n self.import_table.set_thunk_addr(dll, function_name, thunk_position + IAT_addr)\n byte_array[thunk_position:thunk_position + 4] = pack('<L', names_pos + IAT_addr)\n func_name = '\\x00\\x00' + function_name\n func_name_len = len(function_name) + 2\n byte_array[names_pos:names_pos + func_name_len] = func_name\n\n\n # Write the jump table entry \n byte_array[jump_table_pos] = 0xFF\n byte_array[jump_table_pos+1] = 0x25\n byte_array[jump_table_pos+2:jump_table_pos+6] = pack(\"<L\", thunk_position + IAT_addr + self.mem_dump.base_address)\n\n self.import_table.set_jump_addr(dll, function_name, jump_table_pos + IAT_addr)\n\n # Accumulate the new counters\n thunk_position += 4\n names_pos += func_name_len\n jump_table_pos += 6\n\n IID_pos += 20\n content_pos = names_pos + 1\n\n self.mem_dump.append_memory_before_end(byte_array, IAT_addr)", "def print_debug_imports(self):\n for dbgImp in self.rt_import_table:\n (module_name, ea, name, ord) = self.rt_import_table[dbgImp]\n idaapi.msg(\"ModuleName - %s,\\t\\tFunctionName - %s,\\t\\t Address in IAT - %s,\\t\\t Real address - %s\\n\" %\n (module_name, name, hex(ea), hex(dbgImp)))", "def get_last_import_lookup_thunk(self):\n (import_address_table_rva, size) = \\\n self.pe_manager.get_import_address_table_address_range()\n offset = 0\n import_lookup_thunk = None\n for entry in self.import_structures:\n if entry.name == 'IMAGE_THUNK_DATA':\n entry_offset = entry.get_file_offset()\n entry_rva = self.PE.get_rva_from_offset(entry_offset)\n if entry_offset > offset \\\n and not (import_address_table_rva\n <= entry_rva\n <= import_address_table_rva + size):\n if entry.AddressOfData > 0:\n offset = entry_offset\n import_lookup_thunk = entry\n return import_lookup_thunk", "def move_imports_offset_to_new_section(self):\n self.print_imports_offset()\n (entry_rva, size) = self.pe_manager.get_import_descriptor_address_range()\n section = self.pe_manager.get_section_belong_rva(entry_rva)\n data = self.pe_manager.get_section_raw_data(section)\n # append free space that to use be import descriptor.\n import_free_space = 0x3000\n data = data + bytearray(import_free_space)\n new_section = self.pe_manager.create_new_data_section(data, \".newdata\")\n self._origin_import_section = section\n self._new_import_section = new_section\n\n rva_gap_size = new_section.VirtualAddress - section.VirtualAddress\n offset_gap_size = new_section.PointerToRawData \\\n - section.PointerToRawData\n\n origin_iat_rva = 0\n origin_iat_size = 0\n for entry in self.PE.OPTIONAL_HEADER.DATA_DIRECTORY:\n if entry.name == 'IMAGE_DIRECTORY_ENTRY_IMPORT':\n entry.VirtualAddress += (rva_gap_size\n + self._IMPORT_DESCRIPTOR_TABLE_RVA_)\n elif entry.name == 'IMAGE_DIRECTORY_ENTRY_IAT':\n origin_iat_rva = entry.VirtualAddress\n origin_iat_size = entry.Size\n entry.VirtualAddress += rva_gap_size\n\n for entry in self.import_structures:\n entry_rva = self.PE.get_rva_from_offset(entry.get_file_offset())\n if entry.name == 'IMAGE_IMPORT_DESCRIPTOR':\n entry.set_file_offset(\n self.PE.get_offset_from_rva(entry_rva + rva_gap_size\n + self._IMPORT_DESCRIPTOR_TABLE_RVA_)\n )\n if entry.OriginalFirstThunk > 0:\n entry.OriginalFirstThunk += (rva_gap_size\n + self._IMPORT_LOOKUP_TABLE_RVA_)\n if entry.Characteristics > 0:\n entry.Characteristics += (rva_gap_size\n + self._IMPORT_LOOKUP_TABLE_RVA_)\n if entry.FirstThunk > 0:\n # FirstThunk point to _IMPORT_ADDRESS_TABLE_\n entry.FirstThunk += (rva_gap_size + self._IMPORT_ADDRESS_TABLE_RVA_)\n if entry.Name > 0:\n entry.Name += rva_gap_size\n elif entry.name == 'IMAGE_THUNK_DATA':\n entry_rva = self.PE.get_rva_from_offset(entry.get_file_offset())\n if (origin_iat_rva\n <= entry_rva\n <= origin_iat_rva + origin_iat_size):\n # this entry is located at import address table\n entry.set_file_offset(\n self.PE.get_offset_from_rva(\n entry_rva + rva_gap_size\n + self._IMPORT_ADDRESS_TABLE_RVA_)\n )\n else:\n # this entry is located at import lookup table\n entry.set_file_offset(\n self.PE.get_offset_from_rva(\n entry_rva + rva_gap_size\n + self._IMPORT_LOOKUP_TABLE_RVA_)\n )\n\n if entry.Ordinal & 0x80000000:\n # This is Ordinal import\n pass\n else:\n # IMPORT_THUNK_DATA is not moving.\n if entry.Ordinal > 0:\n entry.Ordinal += rva_gap_size + self._IMPORT_ADDRESS_TABLE_RVA_\n if entry.AddressOfData > 0:\n entry.AddressOfData += rva_gap_size + self._IMPORT_ADDRESS_TABLE_RVA_\n if entry.ForwarderString > 0:\n entry.ForwarderString += rva_gap_size + self._IMPORT_ADDRESS_TABLE_RVA_\n if entry.Function > 0:\n entry.Function += rva_gap_size + self._IMPORT_ADDRESS_TABLE_RVA_\n\n for entry in self.import_structures:\n if entry.name == 'IMAGE_IMPORT_DESCRIPTOR':\n if entry.OriginalFirstThunk > 0:\n pass\n if entry.FirstThunk > 0:\n pass\n elif entry.name == 'IMAGE_THUNK_DATA':\n if entry.Ordinal & 0x80000000:\n # This is Ordinal import\n pass\n\n self.adjust_references_of_iat(origin_iat_rva,\n origin_iat_rva + origin_iat_size,\n rva_gap_size)", "def get_last_import_address_thunk(self):\n (import_address_table_rva, size) = \\\n self.pe_manager.get_import_address_table_address_range()\n offset = 0\n import_address_thunk = None\n for entry in self.import_structures:\n if entry.name == 'IMAGE_THUNK_DATA':\n entry_offset = entry.get_file_offset()\n entry_rva = self.PE.get_rva_from_offset(entry_offset)\n if entry_offset > offset \\\n and (import_address_table_rva\n <= entry_rva\n <= import_address_table_rva + size):\n if entry.AddressOfData > 0:\n offset = entry_offset\n import_address_thunk = entry\n return import_address_thunk", "def add_dll_to_import_descriptor(self, first_thunk_rva, dll_name_rva,\n iat_rva):\n empty_import_descriptor = \\\n self.pe_manager.gen_new_empty_import_descriptor()\n setattr(empty_import_descriptor, \"Characteristics\", 0)\n setattr(empty_import_descriptor, \"FirstThunk\", iat_rva)\n setattr(empty_import_descriptor, \"ForwarderChain\", 0)\n setattr(empty_import_descriptor, \"Name\", dll_name_rva)\n setattr(empty_import_descriptor, \"OriginalFirstThunk\", first_thunk_rva)\n setattr(empty_import_descriptor, \"TimeDateStamp\", 0)\n\n # TODO : inject dll_name and get its rva for set name\n\n last_descriptor = self.import_structures[-1]\n if last_descriptor.name != 'IMAGE_IMPORT_DESCRIPTOR':\n print(\"something wrong\")\n exit\n\n last_descriptor_offset = self.get_last_import_descriptor_offset()\n last_descriptor = self.get_last_import_descriptor()\n last_descriptor_index = self.import_structures.index(last_descriptor)\n\n empty_import_descriptor.set_file_offset(last_descriptor_offset)\n last_descriptor.set_file_offset(last_descriptor_offset\n + empty_import_descriptor.sizeof())\n self.import_structures.insert(last_descriptor_index,\n empty_import_descriptor)\n # print(\"OFFSET : {:x}\".format(last_descriptor_offset))\n self.count_of_additional_dll += 1\n return empty_import_descriptor", "def find_func_iat_adrs(self, ea):\n if ea in self.rt_import_table:\n (module_name, iat_ea, name, ord) = self.rt_import_table[ea]\n return iat_ea, module_name\n\n return None, None", "def gen_new_import_thunk(self, ordinal):\n separator_thunk = self.gen_separator_thunk()\n empty_thunk = self.gen_new_thunk(ordinal)\n\n last_import_lookup_thunk = self.get_last_import_lookup_thunk()\n last_import_lookup_thunk_offset = \\\n last_import_lookup_thunk.get_file_offset()\n separator_thunk.set_file_offset(last_import_lookup_thunk_offset +4)\n empty_thunk.set_file_offset(last_import_lookup_thunk_offset + 8)\n\n rva_at_iat = self.append_to_iat(ordinal)\n self.count_of_additional_fn += 1\n return empty_thunk, rva_at_iat", "def dereference(self):\n offset = headers.calculateRelativeAddress(self, self['Name'])\n return self.p.p.new(IMAGE_IMPORT_HINT, __name__='ImportName', offset=offset)", "def __init__(self, pe_manager):\n self.PE = pe_manager.PE\n self.structures = self.PE.__structures__\n self.pe_manager = pe_manager\n self.import_entries = pe_manager.PE.DIRECTORY_ENTRY_IMPORT\n self.import_structures = pe_manager.get_import_structures()\n\n self._origin_import_section = None\n self._new_import_section = None\n\n self.count_of_additional_fn = 0\n self.count_of_additional_dll = 0", "def FuncItems(start):\n return ida_funcs.func_item_iterator_t(ida_funcs.get_func(start))", "def is_func_imported(self, ea):\n # If address is located in IAT\n if ea in self.rt_import_table:\n return True\n\n return False", "def _entrypoint_iterator(self):\n return self._entry_points", "def get_last_import_thunk_offset(self):\n offset = 0\n for entry in self.import_structures:\n if entry.name == 'IMAGE_THUNK_DATA':\n entry_offset = entry.get_file_offset()\n if entry_offset > offset:\n if entry.AddressOfData > 0:\n offset = entry_offset\n return offset", "def get_imports() -> list[FileImporters]:\n g.ledger.changed()\n return g.ledger.ingest.import_data()", "def iter_import_chunks(self):\r\n chunk = []\r\n last_line = None\r\n for leaf in self.python_file.tree.body:\r\n if isinstance(leaf, (ast.Import, ast.ImportFrom)):\r\n # we've seen previous imports but this import is not in the same chunk\r\n if last_line and leaf.lineno != last_line[1]:\r\n yield chunk\r\n chunk = [leaf]\r\n # we've either not seen previous imports or this is part of the same chunk\r\n elif not last_line or last_line and leaf.lineno == last_line[1]:\r\n chunk.append(leaf)\r\n last_line = self.python_file.logical_lines[leaf.lineno]\r\n if chunk:\r\n yield chunk", "def find_iat_ptrs(self, pe, image_base, size, get_word):\n iat_ptrs = []\n\n next_offset = image_base\n\n while next_offset < image_base + size:\n offset = next_offset\n next_offset = ida_bytes.next_addr(offset)\n\n # Attempt to read the current instruction's effective memory address operand (if present)\n mnem = idc.print_insn_mnem(offset).lower()\n ptr = 0\n\n if mnem in [\"call\", \"push\", \"jmp\"]:\n if idc.get_operand_type(offset, 0) == idc.o_mem:\n # Get memory offset for branch instructions\n ptr = idc.get_operand_value(offset, 0)\n elif mnem in [\"mov\", \"lea\"]:\n if idc.get_operand_type(offset, 0) == idc.o_reg and idc.get_operand_type(offset, 1) == idc.o_mem:\n # Get memory offset for mov/lea instructions\n ptr = idc.get_operand_value(offset, 1)\n\n # Does the instruction's memory address operand seem somewhat valid?!\n if ptr < 0x1000:\n continue\n\n # Resolve pointer from memory operand\n iat_offset = get_word(ptr)\n\n # Ignore offset if it is in our image\n if image_base <= iat_offset <= image_base + size:\n continue\n\n # Get module and API name for offset\n module, api = self.resolve_address(iat_offset)\n\n # Ignore the offset if it is in a debug segment or stack etc\n if api and module and module.endswith(\".dll\"):\n if not iat_offset in iat_ptrs:\n # Add IAT offset, address to patch, module name and API name to list\n iat_ptrs.append((iat_offset, offset + idc.get_item_size(offset) - 4, module, api))\n\n self.ret = iat_ptrs\n return self.ret", "def __iter__(self):\n idx = ffi.new('uint32_t *')\n mod = lib.ly_ctx_get_module_iter(self._ctx, idx)\n while mod:\n yield Module(self, mod)\n mod = lib.ly_ctx_get_module_iter(self._ctx, idx)", "def gen_new_thunk(self, attr_data):\n new_thunk = self.pe_manager.gen_new_empty_import_thunk()\n setattr(new_thunk, \"AddressOfData\", attr_data)\n setattr(new_thunk, \"ForwarderString\", attr_data)\n setattr(new_thunk, \"Function\", attr_data)\n setattr(new_thunk, \"Ordinal\", attr_data)\n return new_thunk", "def extract_functions(elf_path):\n text_data = objdump_section(elf_path, '.text')\n name_to_addr = parse_func_names(text_data)\n return name_to_addr" ]
[ "0.7978085", "0.6452974", "0.62089634", "0.61941874", "0.61730665", "0.60156834", "0.5757013", "0.56928056", "0.56683564", "0.5667785", "0.55959344", "0.5552405", "0.5497728", "0.54639316", "0.5418187", "0.5392681", "0.5231617", "0.52232295", "0.5129847", "0.510814", "0.50952214", "0.5080455", "0.5060702", "0.50585693", "0.50493616", "0.5006351", "0.49946043", "0.4910851", "0.48729774", "0.48707163" ]
0.812108
0
Get the NT header
def get_nt_header(self): if self.e_magic != 0x5a4d: raise ValueError('e_magic {0:04X} is not a valid DOS signature.'.format(self.e_magic)) nt_header = obj.Object("_IMAGE_NT_HEADERS", offset = self.e_lfanew + self.obj_offset, vm = self.obj_vm, native_vm = self.obj_native_vm) if nt_header.Signature != 0x4550: raise ValueError('NT header signature {0:04X} is not a valid'.format(nt_header.Signature)) return nt_header
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _nt_header(self):\n\n try:\n dos_header = obj.Object(\"_IMAGE_DOS_HEADER\", offset = self.DllBase,\n vm = self.obj_native_vm)\n\n return dos_header.get_nt_header()\n except ValueError:\n return obj.NoneObject(\"Failed initial sanity checks\")\n except exceptions.SanityCheckException:\n return obj.NoneObject(\"Failed initial sanity checks. Try -u or --unsafe\")", "def getHeader():\n return _HEADER", "def get_header(self):\n return self._header", "def getHeader(self):\n return self.data.header", "def GetHeaders(the_file):\n\n data = exifread.process_file(the_file, 'UNDEF', False, False, False)\n return data", "def get_tfsheader(tfsfile):\n headerdata = pd.read_csv(tfsfile, delim_whitespace=True, nrows=44, index_col=None)\n headerdata.columns = ['AT', 'NAME', 'TYPE', 'VALUE']\n return headerdata[['NAME', 'VALUE']]", "def get_header(file):\n with open(file, 'r') as f:\n return f.readline()", "def read_header(self):\n if self._fopen.read(4) != b'\\x84\\x83\\x82\\x81':\n raise ValueError('Invalid OMF file')\n file_version = struct.unpack('<32s', self._fopen.read(32))[0]\n file_version = file_version[0:len(COMPATIBILITY_VERSION)]\n if file_version != COMPATIBILITY_VERSION:\n raise ValueError(\n 'Version mismatch: file version {fv}, '\n 'reader version {rv}'.format(\n fv=file_version,\n rv=COMPATIBILITY_VERSION\n )\n )\n uid = uuid.UUID(bytes=struct.unpack('<16s', self._fopen.read(16))[0])\n json_start = struct.unpack('<Q', self._fopen.read(8))[0]\n return str(uid), json_start", "def get_headers(self):\n return self.numHeadList", "def header(self):\r\n return self.__header", "def header(self):\n header_str = self._base[0:self.s_allocator_header].tostring()\n magic, pos, used = struct.unpack(str('III'), header_str)\n\n assert magic == self._magic_num, \\\n 'invalid header magic[%d] in shared memory' % (magic)\n return self._header_pages, self._total_pages, pos, used", "def mail_header(self):\n return self._hdr", "def get_header(filename):\n if not os.path.isfile(filename):\n sys.exit('ERROR: input {} does not exist'.format(filename))\n try:\n hdr = dcm.read_file(filename)\n return hdr\n except:\n sys.exit('ERROR: failed to parse {}'.format(filename))", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD2_GetHeader(self)", "def getHeaders(self):\n hd = {}\n line = self.conn.readline()\n while line != \"\\r\\n\":\n print \":\"+line+\":\"+\" len = \",len(line)\n key,value = line.split(':',1)\n hd[key] = value.rstrip()\n line = self.conn.readline()\n return hd", "def header(self):\n return self._header", "def header(self):\n return self._header", "def header(self):\n return self._header", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF2_GetHeader(self)", "def get_header(filepath):\n header = None\n for i, x in enumerate(open(filepath)):\n if i == 0:\n header = x\n return(header)", "def unpackRecHeader(self):\n return self.unpack('4s3i',16,'REC_HEAD')", "def read_headers(filelike):\n return reader.Reader.read_headers(filelike).datafile", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF3_GetHeader(self)", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD3_GetHeader(self)", "def tsv_header(self):\n return self.tsv_lines[0]", "def get_header(self, name):\n return self.headers.get(name)", "def __head_or_get(self, path):\n try:\n info = self.get_cont_stat(path)\n if not isinstance(info, types.DictType):\n raise info()\n headers = HeaderKeyDict({\n 'X-Container-Object-Count': info['object_count'],\n 'X-Container-Bytes-Used': info['bytes_used'],\n 'X-Timestamp': info['created_at'],\n 'X-PUT-Timestamp': info['put_timestamp'],\n })\n metadata = info['metadata']\n for key, value in metadata.iteritems():\n if key == 'r-':\n headers.update({'x-container-read' : value})\n elif key == 'w-':\n headers.update({'x-container-write' : value})\n else:\n ser_key = key.split('-')[0]\n if ser_key == 'm':\n #Supported a single word key till first '-' \n #in the entire metadata header as X-Container-Meta-A\n #key = 'x-container-meta-' + key.split('-')[1]\n \n #SANCHIT: This supports multi-part key for metadata \n #such as X-Container-Meta-A-B-C\n key = 'x-container-meta-' + key.split('-', 1)[1]\n else:\n #key = 'x-container-sysmeta-' + key.split('-')[1]\n key = 'x-container-sysmeta-' + key.split('-', 1)[1]\n headers.update({key : value})\n return headers\n except HTTPException as error:\n self.logger.exception(error)\n return error.status_int\n except Exception as err:\n self.logger.exception(err)\n return HTTP_INTERNAL_SERVER_ERROR", "def header(self):\n return self[0]", "def _read_hdr_file(ktlx_file):\r\n with open(ktlx_file, 'rb') as f:\r\n\r\n hdr = {}\r\n assert f.tell() == 0\r\n\r\n hdr['file_guid'] = hexlify(f.read(16))\r\n hdr['file_schema'], = unpack('<H', f.read(2))\r\n if not hdr['file_schema'] in (1, 3, 7, 8, 9):\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'file_schema ' + str(hdr['file_schema']))\r\n\r\n hdr['base_schema'], = unpack('<H', f.read(2))\r\n if not hdr['base_schema'] == 1: # p.3: base_schema 0 is rare, I think\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'base_schema ' + str(hdr['base_schema']))\r\n\r\n hdr['creation_time'] = datetime.fromtimestamp(unpack('<i',\r\n f.read(4))[0])\r\n hdr['patient_id'], = unpack('<i', f.read(4))\r\n hdr['study_id'], = unpack('<i', f.read(4))\r\n hdr['pat_last_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_first_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_middle_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['patient_id'] = _make_str(unpack('c' * 80, f.read(80)))\r\n assert f.tell() == 352\r\n\r\n if hdr['file_schema'] >= 7:\r\n hdr['sample_freq'], = unpack('<d', f.read(8))\r\n n_chan, = unpack('<i', f.read(4))\r\n hdr['num_channels'] = n_chan\r\n hdr['deltabits'], = unpack('<i', f.read(4))\r\n hdr['phys_chan'] = unpack('<' + 'i' * hdr['num_channels'],\r\n f.read(hdr['num_channels'] * 4))\r\n\r\n f.seek(4464)\r\n hdr['headbox_type'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sn'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sw_version'] = _make_str(unpack('c' * 40, f.read(40)))\r\n hdr['dsp_hw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['dsp_sw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['discardbits'], = unpack('<i', f.read(4))\r\n\r\n if hdr['file_schema'] >= 8:\r\n hdr['shorted'] = unpack('<' + 'h' * 1024, f.read(2048))[:n_chan]\r\n hdr['frequency_factor'] = unpack('<' + 'h' * 1024,\r\n f.read(2048))[:n_chan]\r\n return hdr", "def getTableHeader(self, filename):\n hdr = \"\"\n with open(filename, \"r\") as f:\n for line in f:\n if line[0] == \">\":\n hdr += line\n else:\n return hdr" ]
[ "0.83684963", "0.70789915", "0.678403", "0.65934235", "0.6516757", "0.64691263", "0.64680433", "0.6467337", "0.6464013", "0.6449925", "0.64363486", "0.6402365", "0.6393405", "0.63418704", "0.63347447", "0.6304198", "0.6304198", "0.6304198", "0.6273747", "0.6260924", "0.62572646", "0.62244815", "0.6187318", "0.6177313", "0.61640203", "0.61049587", "0.6093889", "0.6091847", "0.609097", "0.60841024" ]
0.81594086
1
Get the _VS_VERSION_INFO structure
def get_version_info(self): try: nt_header = self.get_nt_header() except ValueError, ve: return obj.NoneObject("PE file failed initial sanity checks: {0}".format(ve)) try: unsafe = self.obj_vm.get_config().UNSAFE except AttributeError: unsafe = False for sect in nt_header.get_sections(unsafe): if str(sect.Name) == '.rsrc': root = obj.Object("_IMAGE_RESOURCE_DIRECTORY", self.obj_offset + sect.VirtualAddress, self.obj_vm) for rname, rentry, rdata in root.get_entries(): # We're a VERSION resource and we have subelements if rname == resource_types['RT_VERSION'] and rentry: for sname, sentry, sdata in rdata.get_entries(): # We're the single sub element of the VERSION if sname == 1 and sentry: # Get the string tables for _stname, stentry, stdata in sdata.get_entries(): if not stentry: return obj.Object("_VS_VERSION_INFO", offset = (stdata.DataOffset + self.obj_offset), vm = self.obj_vm) return obj.NoneObject("Cannot find a _VS_VERSION_INFO structure")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version_info(self):\n\n return __version_info__", "def get_version_info() -> Tuple[Text, Text]:", "def get_version_info(self):\n return self._jadeRpc('get_version_info')", "def info(self):\n version_str = self.version\n return Utils.version_str2tuple(version_str)", "def get_version_info():\n from docplex.cp.model import CpoModel\n try:\n with CpoSolver(CpoModel()) as slvr:\n return slvr.agent.version_info\n except:\n if config.context.log_exceptions:\n traceback.print_exc()\n pass\n return {}", "def getVersionInfo(cls):\n\n return __version__ + \"\\n\"", "def version_info():\r\n return tuple(map(int, __version__.split('.')))", "def read_versionInfo(self):\n # PROTECTED REGION ID(SdpMasterLeafNode.versionInfo_read) ENABLED START #\n return self.attr_map[\"versionInfo\"]\n # PROTECTED REGION END # // SdpMasterLeafNode.versionInfo_read", "def version_info(self):\n if self._api_version is None:\n self.query_api_version()\n return self._api_version['api-major-version'],\\\n self._api_version['api-minor-version']", "def get_version_info(self, key_name='ver_sw_release'):\n if key_name in self._msg_info_dict:\n val = self._msg_info_dict[key_name]\n return ((val >> 24) & 0xff, (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff)\n return None", "def getVersion(self):\n return self.get('Version', type=\"numeric\")", "def pyzmq_version_info():\n return version_info", "def get_version_info(self):\n sys_info_service = self.robot.all_services.get(\"sys_info\")\n if sys_info_service is not None:\n log.info(\"System version info: %s\" % sys_info_service.system_version)\n else:\n log.warning(\"Service get_version_info is not enabled!\")", "def get_release_info(self):\r\n return self.detail_info.get_release_info(self.version)", "def formver(self) -> Tuple[int]:\n return (self.header.format, self.header.version)", "def version(self) -> Dict[str, str]:\n return self.get_version()", "def get_version(self):\n verxml = self._ncc.nxoscli('show version')\n self.logger.debug(verxml)\n verparsed = _begin_parse(verxml)\n sysmgrclischema = parse_get_nsmap(verparsed)\n self.logger.debug(\"NSMAP: {}\".format(sysmgrclischema))\n showversion = find_element(['sys_ver_str', 'chassis_id', 'host_name', 'loader_ver_str'], sysmgrclischema,\n verparsed)\n self.logger.debug(str(showversion))\n self.hostname = showversion['host_name']\n self.chassis_id = showversion['chassis_id']\n self.system_version = showversion['sys_ver_str']", "def get_version(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.version)", "def rpc_version(self):\n\t\tvinfo = {'version': version.version, 'version_info': version.version_info._asdict()}\n\t\tvinfo['rpc_api_version'] = version.rpc_api_version\n\t\treturn vinfo", "def build_version(self):\n return self.nodes[0].get('infos').get('system_info').get('system_version')", "def version(self):\n if \"version\" in self._prop_dict:\n return self._prop_dict[\"version\"]\n else:\n return None", "def get_version():\n return about.get_version()", "def get_version(self):\n return self.cur_config['version']['name']", "def getVersion(cls):\n cVersion = cls.__getLib().voikkoGetVersion()\n return unicode_str(cVersion, \"UTF-8\")", "def extract_version_info():\n version = None\n if os.path.exists('.version'):\n with open('.version') as f:\n line = f.read().rstrip()\n log.info('.version contains \"%s\"', line)\n if line.startswith('openafs-'):\n # Extract version from the git tag name.\n version = re.sub('openafs-[^-]*-', '', line).replace('_', '.')\n elif line.startswith('BP-'):\n # Branch point tags do not contain the version number.\n log.info('.version file has old branch point tag name.')\n else:\n # Use the given version string.\n version = line\n if not version:\n # Unable to lookup version from the .version file, try to extract the\n # version from the source directory name.\n root = os.path.basename(os.path.abspath('.'))\n m = re.match(r'openafs-(.*)', root)\n if m:\n version = m.group(1)\n if not version:\n module.fail_json(msg='Unable to determine version.')\n\n # Determine package version and release from the OpenAFS version.\n m1 = re.match(r'(.*)(pre[0-9]+)', version) # prerelease\n m2 = re.match(r'(.*)dev', version) # development\n m3 = re.match(r'(.*)-([0-9]+)-(g[a-f0-9]+)$', version) # development\n m4 = re.match(r'(.*)-([a-z]+)([0-9]+)', version) # custom\n if m1:\n v = m1.group(1)\n r = \"0.{0}\".format(m1.group(2))\n elif m2:\n v = m2.group(1)\n r = \"0.dev\"\n elif m3:\n v = m3.group(1)\n r = \"{0}.{1}\".format(m3.group(2), m3.group(3))\n elif m4:\n v = m4.group(1).replace('-', '')\n r = \"1.2.{0}.{1}\".format(m4.group(3), m4.group(2))\n else:\n v = version # standard release\n r = \"1\" # increment when repackaging this version\n # '-' are used as delimiters by rpm.\n v = v.replace('-', '_')\n r = r.replace('-', '_')\n return dict(openafs_version=version, package_version=v, package_release=r)", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def _get_version(self):", "def version(self):\n return self._get(\"version\")", "def get_release_info(self, version):\r\n try:\r\n return self._detail[\"releases\"][version]\r\n except KeyError as key_error:\r\n log.warning(key_error)\r\n return []", "def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')):\n # Import the needed APIs\n try:\n import java.lang\n except ImportError:\n return release, vendor, vminfo, osinfo\n\n vendor = _java_getprop('java.vendor', vendor)\n release = _java_getprop('java.version', release)\n vm_name, vm_release, vm_vendor = vminfo\n vm_name = _java_getprop('java.vm.name', vm_name)\n vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)\n vm_release = _java_getprop('java.vm.version', vm_release)\n vminfo = vm_name, vm_release, vm_vendor\n os_name, os_version, os_arch = osinfo\n os_arch = _java_getprop('java.os.arch', os_arch)\n os_name = _java_getprop('java.os.name', os_name)\n os_version = _java_getprop('java.os.version', os_version)\n osinfo = os_name, os_version, os_arch\n\n return release, vendor, vminfo, osinfo" ]
[ "0.7197638", "0.7100442", "0.7023496", "0.7013558", "0.6973527", "0.6866096", "0.68598795", "0.6841089", "0.6810265", "0.68088377", "0.66023237", "0.65580744", "0.6516995", "0.6495485", "0.6466568", "0.646222", "0.64489084", "0.6443668", "0.6442421", "0.64309174", "0.64292395", "0.6426767", "0.64147437", "0.6400845", "0.63883215", "0.6351476", "0.6350536", "0.63244325", "0.62912506", "0.6247662" ]
0.7852839
0
Rounds down an address based on an alignment
def round(self, addr, align, up = False): if addr % align == 0: return addr else: if up: return (addr + (align - (addr % align))) return (addr - (addr % align))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def align_addr(addr, align = 16, dir = ALIGN_DOWN):\n\n if dir == ALIGN_DOWN:\n return addr - (addr % align)\n else:\n return addr + (align - addr % align)", "def _format_address(self,address):\n address = int(address)\n if address >=1 and address <= 250:\n address = hex(int(address)) #Convert address if between 0-250.\n if len(address) == 3: #Take the last char and append a zero.\n address = str(address[-1]).rjust(2,'0')\n elif len(address) == 4:\n address = address[-2:] #Take the last two char. \n return address\n elif address == 0:\n address = '00'\n return address\n else:\n return False", "def align(val):\n ovr = val % ALIGNMENT\n if (ovr):\n val = val + ALIGNMENT - ovr\n return val", "def calculate_padding_to_align(length, align):\n return 0 if length % align == 0 else (align - (length % align))", "def format_alignment(self, alignment):\n raise NotImplementedError(\"This method should be implemented\")\n ###################################################\n # You MUST implement this method in the subclass. #\n ###################################################", "def format_address(addr: int, arch: Optional[cemu.arch.Architecture] = None) -> str:\n if arch is None:\n arch = cemu.core.context.architecture\n\n if arch.ptrsize == 2:\n return f\"{addr:#04x}\"\n elif arch.ptrsize == 4:\n return f\"{addr:#08x}\"\n elif arch.ptrsize == 8:\n return f\"{addr:#016x}\"\n else:\n raise ValueError(f\"Invalid value for '{arch.ptrsize=}'\")", "def toAddr(self, offset: long) -> ghidra.program.model.address.Address:\n ...", "def __ip2intstr(self, address):\n return str(struct.unpack('!I', address)[0])", "def pad_physical_address(addr):\n return addr + [0] * (4 - len(addr))", "def decodeAddress(self, value: long, useMemorySegmentation: bool) -> ghidra.program.model.address.Address:\n ...", "def align(args) :\n from aligner import align_reads\n align_reads(args)", "def _read_addr_resolve(self, addr: 'bytes', htype: 'int') -> 'str':\n if htype == Enum_Hardware.Ethernet: # Ethernet\n if py38:\n _addr = addr.hex(':')\n else:\n _addr = ':'.join(textwrap.wrap(addr.hex(), 2))\n else:\n _addr = addr.hex()\n return _addr", "def align(self):\n ...", "def toAddr(self, offset: int) -> ghidra.program.model.address.Address:\n ...", "def get_mask_from_alignment(al):\n alignment_str = str(al).split(\"\\n\")[1]\n return alignment_str.replace(\"|\", \"+\")", "def get_segment_alignment(*args):\n return _ida_segment.get_segment_alignment(*args)", "def int_to_address(n, length):\n return \"{0:b}\".format(n).zfill(length)", "def map_addr(s, d):\n s_out, d_out = crc8(0, s, 0x31)%11 + 1, crc8(0, d, 0x1d)%11 + 1\n count = 0\n while s_out == d_out and count < 5:\n s_out, d_out = crc8(0, s, 0x31)%11 + 1, crc8(0, d, 0x1d)%11 + 1\n count += 1\n s_out, d_out = 's' + str(s_out), 's' + str(d_out)\n return s_out, d_out", "def get_address(address, registers):\n \n try:\n address, offset = address.split('+')\n offset = int(offset)\n except ValueError:\n try:\n address, offset = address.split('-')\n offset = -int(offset)\n except ValueError:\n offset = 0\n\n if address.isdigit():\n return int(address)\n\n return int(registers[address]) + offset", "def convert_to_address(cls, x_ns):\n if x_ns % 4 != 0:\n raise ValueError(\"To address must be divisible by 4 ({} given)\".format(x_ns))\n if not(cls.JT_MIN_TO_ADDR <= x_ns // 4 <= cls.JT_MAX_TO_ADDR):\n raise ValueError(\"To address must be {}<x<{}, ({} given)\".format(\n cls.JT_MIN_TO_ADDR*4, cls.JT_MAX_TO_ADDR*4, x_ns\n ))\n return x_ns // 4", "def align(offset, data, align_to=64):\n rem = offset % align_to\n new_offset = offset if (rem == 0) else offset + (align_to - rem)\n\n if data is not None:\n new_data = np.pad(\n data.flatten(),\n (0, int((new_offset - offset) / data.dtype.itemsize)), mode=\"constant\")\n else:\n new_data = None\n return new_offset, new_data", "def normalize_address(address: str):\n return Web3.toChecksumAddress(address.lower())", "def mac_addr(address):\n\tprint(':'.join('%02x' % compat_ord(b) for b in address))\n\treturn ':'.join('%s' % format(compat_ord(b), '0>8b') for b in address)", "def test_align():\n target = ('TAAATAAATATCTGGTGTTTGAGGCAAAAAGGCAGACTTAAATTCTAAATCACACCTGTGCTT'\n 'CCAGCACTACCTTCAAGCGCAGGTTCGAGCCAGTCAGGCAGGGTACATAAGAGTCCATTGTGC'\n 'CTGTATTATTTTGAGCAATGGCTAAAGTACCTTCACCCTTGCTCACTGCTCCCCCACTTCCTC'\n 'AAGTCTCATCGTGTTTTTTTTAGAGCTAGTTTCTTAGTCTCATTAGGCTTCAGTCACCAT')\n query = ('TCTGGTGTTTGAGGCAAAAAGGCAGACTTAAATTCTAAATCACACCTGTGCTTCCAGCACTACC'\n 'TTCAAGCGCAGGTTCGAGCCAGTCAGGACTGCTCCCCCACTTCCTCAAGTCTCATCGTGTTTTT'\n 'TTTAGAGCTAGTTTCTTAGTCTCATTAGGCTTCAGTCACCATCATTTCTTATAGGAATACCA')\n assert kevlar.align(target, query) == ('10D91M69D79M20I', 155)", "def reverse_lookup_zone(ipaddress):\n return reverse_dotted_decimals(ipaddress) + '.in-addr.arpa'", "def parseAddressFA(address, word_size = 4):\n binary_address = bin(address)[2:].zfill(32)\n byte_offset_size = int(math.log2(word_size))\n byte_offset = int(binary_address[-byte_offset_size:],2)\n tag = int(binary_address[:-(byte_offset_size)],2)\n #address_result = int(binary_address[:-byte_offset_size],2)\n return {\"tag\" : tag, \"address_result\" : address - byte_offset }", "def convert_from_address(cls, x_ns):\n if x_ns % 4 != 0:\n raise ValueError(\"From address must be divisible by 4 ({} given)\".format(x_ns))\n if not(cls.JT_MIN_FROM_ADDR <= x_ns // 4 <= cls.JT_MAX_FROM_ADDR):\n raise ValueError(\"From address must be {}<x<{}, ({} given)\".format(\n cls.JT_MIN_FROM_ADDR*4, cls.JT_MAX_FROM_ADDR*4, x_ns\n ))\n return x_ns // 4 + cls.JT_FROM_ADDR_OFFSET", "def bytes_to_addr(hh, ll):\n return (int(hh, 16) << 8) + int(ll, 16)", "def _make_addr_resolve(self, addr: 'str | bytes', htype: 'int') -> 'bytes':\n _addr = addr.encode() if isinstance(addr, str) else addr\n\n if htype == Enum_Hardware.Ethernet:\n if PAT_MAC_ADDR.fullmatch(_addr) is not None:\n return _addr.replace(b':', b'').replace(b'-', b'')\n raise ProtocolError(f'Invalid MAC address: {addr!r}')\n return _addr", "def offset_pad(self, offset):\n return (((offset + 3) / 4) * 4)" ]
[ "0.7200445", "0.6091956", "0.60217863", "0.59502107", "0.5875949", "0.5678854", "0.55767566", "0.55569094", "0.55374366", "0.53665066", "0.5357269", "0.53529537", "0.5348658", "0.5348002", "0.5330257", "0.5318402", "0.5307727", "0.52861303", "0.52447337", "0.521914", "0.5218729", "0.51808715", "0.51764226", "0.51757944", "0.51513207", "0.5149876", "0.5139813", "0.51397157", "0.51118755", "0.5091048" ]
0.73834985
0
Replaces a field in a sector header
def replace_header_field(self, sect, header, item, value): field_size = item.size() start = item.obj_offset - sect.obj_offset end = start + field_size newval = struct.pack(item.format_string, int(value)) result = header[:start] + newval + header[end:] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setHeader(k, v):", "def set_header( name, value ):", "def update_header(self, key, value):\n if self.mode != 'write':\n raise IOError(\"Must open file in write mode to do this!\")\n\n if isinstance(value, CommentedSeq):\n # Always converted to a tuple because a commented sequence (list or tuple created by the YAML when reading\n # in a sequence of info) cannot be written to photontable header\n getLogger(__name__).debug(f\"Converting CommentedSeq {value} to tuple so header can be updated.\")\n value = tuple(value)\n\n if key in self.file.root.photons.photontable.attrs._f_list('sys'):\n raise KeyError(f'\"{key}\" is reserved for use by pytables')\n\n if key not in self.file.root.photons.photontable.attrs._f_list('user'):\n getLogger(__name__).info(f'Adding new header key: {key}')\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=tables.NaturalNameWarning)\n setattr(self.file.root.photons.photontable.attrs, key, value)", "def replace_hdr_file(hdrfile):\n # hdr file replacment string\n HDRFILE_STRING = \"byteorder M\\nlayout bil\\nnbands 1\\nnbits 16\\nncols 6935\\nnrows 3351\\n\\\n ulxmap -124.729583333331703\\nulymap 52.871249516804028\\nxdim 0.00833333333\\nydim 0.00833333333\\n\"\n with open(hdrfile, 'w') as o:\n o.write(HDRFILE_STRING)", "def _set_key_value(ext, key, value):\n ext.hdr[key] = value", "def _headercorrected(hdr):\n # COM*** -> COMMENT\n i = 1\n while 'COM%03d' % i in hdr:\n value = hdr['COM%03d' % i]\n comment = hdr.cards['COM%03d' % i].comment\n hdr['COMMENT'] = '[%s] %s' % (comment, value)\n del hdr['COM%03d' % i]\n i += 1\n # HIST*** -> HISTORY\n i = 1\n while 'HIST%03d' % i in hdr:\n value = hdr['HIST%03d' % i]\n comment = hdr.cards['HIST%03d' % i].comment\n hdr['HISTORY'] = '%s (%s)' % (value, comment)\n del hdr['HIST%03d' % i]\n i += 1\n # ORIGIN -> FROM\n if 'ORIGIN' in hdr.keys():\n hdr.rename_keyword('ORIGIN', 'FROM')\n if 'ORIGIN_V' in hdr.keys():\n hdr.rename_keyword('ORIGIN_V', 'FROM_V')\n # SOURCE_V -> FORMAT\n if 'SOURCE_V' in hdr.keys():\n hdr.rename_keyword('SOURCE_V', 'FORMAT')\n # SRC_VERS -> SRC_V\n if 'SRC_VERS' in hdr.keys():\n hdr.rename_keyword('SRC_VERS', 'SRC_V')", "def edit_header(my_vcf):\n header = my_vcf.header.copy()\n header.add_line(('##INFO=<ID=GTCNT,Number=.,Type=Integer,'\n 'Description=\"Counts of genotypes for the allele (UNK, REF, HET, HOM)\">'))\n return header", "def update_header(self) -> None:\n self.header.partial_reset()\n self.header.point_format_id = self.points.point_format.id\n self.header.point_data_record_length = self.points.point_size\n\n if len(self.points) > 0:\n self.header.update(self.points)\n\n if self.header.version.minor >= 4:\n if self.evlrs is not None:\n self.header.number_of_evlrs = len(self.evlrs)\n self.header.start_of_waveform_data_packet_record = 0\n # TODO\n # if len(self.vlrs.get(\"WktCoordinateSystemVlr\")) == 1:\n # self.header.global_encoding.wkt = 1\n else:\n self.header.number_of_evlrs = 0", "def format_header(self, header):\n raise NotImplementedError()", "def set_header_value(old_rmap, new_rmap, key, new_value):\n mapping = rmap.load_mapping(old_rmap)\n mapping.header[key] = new_value\n mapping.write(new_rmap)", "def process_header(line, new_ids=None):\n # extraneous headers\n if line.startswith('##') and not any(\n line.startswith('##' + header_type)\n for header_type in ('INFO', 'FILTER', 'FORMAT', 'ALT', 'contig')\n ):\n return None\n\n # non-whitelisted annotations\n match = re.match(r'##INFO=<ID=([^,]+)', line)\n if match:\n info_name = match.group(1)\n if info_name not in WHITELISTED_ANNOTATIONS:\n return None\n\n if line.startswith('#CHROM') and new_ids is not None:\n fields = line.strip().split('\\t')[:9] # fixed headers\n fields.extend(new_ids)\n line = '\\t'.join(fields) + '\\n'\n\n return line", "def update_header(fopen):\n json_start = fopen.tell()\n fopen.seek(52, 0)\n fopen.write(struct.pack('<Q', json_start))\n fopen.seek(json_start)", "def _clean(header):\n # TODO: find a way to identify cubes containing time\n header['ctype1'] = 'HPLN-TAN' # Helioprojective longitude, TAN projection\n header['ctype2'] = 'HPLT-TAN' # Helioprojective latitude, TAN projection\n header['ctype3'] = 'WAVE ' # Wavelength axis, default (TAB) projection\n header['naxis'] = 3\n return header", "def filter_headers(self, header):\n if header == \"Ticker symbol\":\n return \"symbol\"\n elif header == \"GICS Sector\":\n return \"sector\"\n elif header == \"Security\":\n return \"name\"\n elif header == \"GICS Sub Industry\":\n return \"industry\"\n else:\n return header", "def add_header( name, value ):", "def modify_device_fields(self, data):\n data = clean(data, self.fields_parameters)\n return self.put(\"/device/fields\", data)", "def _fixHeaderLength(self):\n self.header.seek(0)\n lines = self.header.readlines()\n headlength = len(lines)\n lines[0] = wrapLine(\"NLHEAD_FFI\", self.annotation, self.delimiter, \"%d%s%d\\n\" % (headlength, self.delimiter, self.FFI))\n self.header = StringIO(\"\".join(lines))\n self.header.seek(0)", "def update_header():\n token = request.json['token']\n u = user.User.query.filter(user.User.token == token).first()\n if u is None:\n abort(404)\n try:\n with store_context(fs_store):\n with open(files.path(request.json['header'])) as f:\n u.header_icon.from_file(f)\n db.session.merge(u)\n db.session.commit()\n return jsonify(u.to_dict())\n except Exception, e:\n return jsonify(dict(result='fail',message='Can not find image error.'))", "def setDataField(self, dataField):\n dataFieldLength = len(dataField)\n if dataFieldLength < MIN_DATA_FIELD_BYTE_SIZE:\n raise AttributeError(\"data field must contain at least 1 character\")\n if dataFieldLength > MAX_DATA_FIELD_BYTE_SIZE:\n raise AttributeError(\"data field must contain at most 65536 characters\")\n # resize according to the data field length\n self.setLen(PRIMARY_HEADER_BYTE_SIZE + dataFieldLength)\n # fill in the data field\n self.setBytes(PRIMARY_HEADER_BYTE_SIZE, dataFieldLength, dataField)\n # update CCSDS header\n self.setPacketLength()", "def __setattr__(self, item, value):\n if item in ('header', 'lines', 'mag', 'z', 'cubes', 'images',\n 'spectra', 'tables', '_logger', '_filename',\n '_default_size', 'default_size'):\n super(Source, self).__setattr__(item, value)\n else:\n self.header[item] = value", "def _writeCommonHeader(self):\n # Line 1 if often overwritten at _fixHeaderLength\n self.header.write(wrapLine(\"NLHEAD_FFI\", self.annotation, self.delimiter, \"%d%s%d\\n\" % (self.NLHEAD, self.delimiter, self.FFI)))\n self.header.write(getAnnotation(\"ONAME\", self.annotation, delimiter = self.delimiter) + stripQuotes(self.ONAME) + \"\\n\")\n self.header.write(getAnnotation(\"ORG\", self.annotation, delimiter = self.delimiter) + stripQuotes(self.ORG) + \"\\n\")\n self.header.write(getAnnotation(\"SNAME\", self.annotation, delimiter = self.delimiter) + stripQuotes(self.SNAME) + \"\\n\")\n self.header.write(getAnnotation(\"MNAME\", self.annotation, delimiter = self.delimiter) + stripQuotes(self.MNAME) + \"\\n\")\n self.header.write(wrapLine(\"IVOL_NVOL\", self.annotation, self.delimiter, \"%d%s%d\\n\" % (self.IVOL, self.delimiter, self.NVOL)))\n line = \"%d %d %d%s%d %d %d\\n\" % (self.DATE[0], self.DATE[1], self.DATE[2], self.delimiter, self.RDATE[0], self.RDATE[1], self.RDATE[2])\n self.header.write(wrapLine(\"DATE_RDATE\", self.annotation, self.delimiter, line))", "def __set_header(self, header):\n\n if APIKEYHEADER not in header:\n header[APIKEYHEADER] = self.__client.get_apikey()\n if ROUTETAG not in header:\n header[ROUTETAG] = self.__route_tag\n if FABIOROUTETAGHEADER not in header:\n header[FABIOROUTETAGHEADER] = self.__route_tag\n\n return header", "def del_header_value(old_rmap, new_rmap, key):\n mapping = rmap.load_mapping(old_rmap)\n del mapping.header[key]\n mapping.write(new_rmap)", "def __set_header(self, header):\n\n if APIKEYHEADER not in header:\n header[APIKEYHEADER] = self.__client.get_apikey()\n\n if ROUTETAG not in header:\n header[ROUTETAG] = self.__route_tag\n\n if FABIOROUTETAGHEADER not in header:\n header[FABIOROUTETAGHEADER] = self.__route_tag\n\n return header", "def transfer_header(infl, outfl):\n\n print \"Transfer\", infl, \"to\", outfl\n fin = pyfits.open(infl)\n fout = pyfits.open(outfl, 'update')\n\n dont_transfer = [\"HSTSLAC\", \"MDRIZSKY\", \"LACOSMIC\", \"HISTORY\", \"COMMENT\", \"\"]\n\n print \"Transferring: \",\n for i in range(len(fin)):\n for key in fin[i].header:\n if dont_transfer.count(key) == 0:\n if fin[i].header[key] != fout[i].header.get(key, default = None):\n print key,\n\n fout[i].header[key] = fin[i].header[key]\n fout.flush()\n fout.close()\n fin.close()\n print", "def update_header():\n print_debug_info()\n if not should_do_write():\n debug(\"should not write this buffer.\")\n return\n\n if not (has_header() or suffix_is_supported()):\n # This file do not have a header, or it's format is unknown, quit.\n debug(\"cannot add header to a script of unknown format.\")\n return\n\n # if current buffer is not modified, do not bother to update it's date.\n if not modified():\n debug(\"Buffer not modified, just quit\")\n return\n\n row, column = vim.current.window.cursor\n header_template = globals().get(\"%s_header\" % SUFFIX).rstrip()\n\n # if line has the keyword, find the current for the keyword, get the line, re-render it and fill it in.\n head = CURRENT_BUFFER[:10]\n\n more_updates = vim.eval(\"g:BHUpdates\")\n\n update = {\n 'Maintained by': AUTHOR,\n 'Modified by': AUTHOR,\n 'Last modified': datetime.now().strftime(\"%Y-%m-%d %H:%M\"),\n 'Filename': FILENAME,\n }\n update.update(more_updates)\n for index, line in enumerate(head):\n for keyword in update:\n if line.find(keyword) != -1:\n original_line = [_line for _line in header_template.splitlines() if _line.find(keyword) != -1]\n if original_line:\n original_line = original_line[0]\n else:\n continue\n debug(\"original line: %s\" % original_line)\n debug(\"line to be replaced: %s\" % line)\n rendered_line = original_line % {KEYWORDS[keyword]: update[keyword]}\n debug(\"rendered line: %s\" % rendered_line)\n CURRENT_BUFFER[index] = rendered_line\n\n vim.current.window.cursor = (row, column)", "def set_extra_header(self, key, value):\n self.headers[key] = value", "def _augment_info(self, header):\n # Information on carriers\n header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"AFFECTED_CARRIERS\"),\n (\"Number\", \"1\"),\n (\"Type\", \"Integer\"),\n (\"Description\", \"Number of affected samples from pedigree that are carriers\"),\n ]\n )\n )\n header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"UNAFFECTED_CARRIERS\"),\n (\"Number\", \"1\"),\n (\"Type\", \"Integer\"),\n (\"Description\", \"Number of unaffected samples from pedigree that are carriers\"),\n ]\n )\n )\n header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"BACKGROUND_CARRIERS\"),\n (\"Number\", \"1\"),\n (\"Type\", \"Integer\"),\n (\"Description\", \"Number of background samples that are carriers\"),\n ]\n )\n )\n for anno_args in self.args.annotation_beds:\n header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", anno_args[\"info\"]),\n (\"Number\", \".\"),\n (\"Type\", \"String\"),\n (\"Description\", anno_args[\"description\"]),\n ]\n )\n )\n return header", "def sector(self, sector: str):\n\n self._sector = sector", "def add_naxis_to_fitsio_header(hdr,extra_hdr):\n if 'ZNAXIS1' in extra_hdr or 'ZNAXIS2' in extra_hdr:\n hdr.add_record({'name':'ZNAXIS1','value':extra_hdr['ZNAXIS1']})\n hdr.add_record({'name':'ZNAXIS2','value':extra_hdr['ZNAXIS2']})\n\n if 'NAXIS1' in extra_hdr or 'NAXIS2' in extra_hdr:\n hdr.add_record({'name':'NAXIS1','value':extra_hdr['NAXIS1']})\n hdr.add_record({'name':'NAXIS2','value':extra_hdr['NAXIS2']})\n\n return hdr" ]
[ "0.62390125", "0.59990454", "0.58095795", "0.57502204", "0.56584907", "0.56279963", "0.55969024", "0.5521237", "0.5509375", "0.5504716", "0.5496373", "0.5363723", "0.531794", "0.5293608", "0.5290567", "0.5276565", "0.52743393", "0.52416605", "0.5224283", "0.5224242", "0.5216097", "0.5216092", "0.5214949", "0.52107507", "0.51959896", "0.51948327", "0.5185737", "0.5183763", "0.5181474", "0.5177489" ]
0.7021669
0
returns a modified header buffer with the image base changed to the provided base address
def _fix_header_image_base(self, header, nt_header): imb_offs = nt_header.OptionalHeader.ImageBase.obj_offset - self.obj_offset imb = nt_header.OptionalHeader.ImageBase newval = struct.pack(imb.format_string, int(self.obj_offset)) return header[:imb_offs] + newval + header[imb_offs+imb.size():]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header", "def get_base_header(self):\n while '\\n' not in self.client_buffer:\n self.client_buffer += self.client.recv(BUFFER_LENGTH)\n (data, _, self.client_buffer) = self.client_buffer.partition('\\n')\n return data.split()", "def prepend_header(rendered_header):\n debug(\"adding header\")\n _range = CURRENT_BUFFER.range(0, 0)\n _range.append(rendered_header.split(\"\\n\"))", "def load_header(base_path, subvolume):\n with h5py.File(file_path(base_path, subvolume, 'subvolume'), 'r') as f:\n header = dict(f['Header'].attrs.items())\n header.update({key: f['Header'][key][:] for key in f['Header'].keys()})\n \n return header", "def Header(self): # Override the ctypes Header for the struct -> return extended header\n addr = ctypes.addressof(self)\n sheader = super(AceBase, type(self)).Header\n return AceHeader.from_address(addr + sheader.offset)", "def update_header():\n print_debug_info()\n if not should_do_write():\n debug(\"should not write this buffer.\")\n return\n\n if not (has_header() or suffix_is_supported()):\n # This file do not have a header, or it's format is unknown, quit.\n debug(\"cannot add header to a script of unknown format.\")\n return\n\n # if current buffer is not modified, do not bother to update it's date.\n if not modified():\n debug(\"Buffer not modified, just quit\")\n return\n\n row, column = vim.current.window.cursor\n header_template = globals().get(\"%s_header\" % SUFFIX).rstrip()\n\n # if line has the keyword, find the current for the keyword, get the line, re-render it and fill it in.\n head = CURRENT_BUFFER[:10]\n\n more_updates = vim.eval(\"g:BHUpdates\")\n\n update = {\n 'Maintained by': AUTHOR,\n 'Modified by': AUTHOR,\n 'Last modified': datetime.now().strftime(\"%Y-%m-%d %H:%M\"),\n 'Filename': FILENAME,\n }\n update.update(more_updates)\n for index, line in enumerate(head):\n for keyword in update:\n if line.find(keyword) != -1:\n original_line = [_line for _line in header_template.splitlines() if _line.find(keyword) != -1]\n if original_line:\n original_line = original_line[0]\n else:\n continue\n debug(\"original line: %s\" % original_line)\n debug(\"line to be replaced: %s\" % line)\n rendered_line = original_line % {KEYWORDS[keyword]: update[keyword]}\n debug(\"rendered line: %s\" % rendered_line)\n CURRENT_BUFFER[index] = rendered_line\n\n vim.current.window.cursor = (row, column)", "def update_header():\n token = request.json['token']\n u = user.User.query.filter(user.User.token == token).first()\n if u is None:\n abort(404)\n try:\n with store_context(fs_store):\n with open(files.path(request.json['header'])) as f:\n u.header_icon.from_file(f)\n db.session.merge(u)\n db.session.commit()\n return jsonify(u.to_dict())\n except Exception, e:\n return jsonify(dict(result='fail',message='Can not find image error.'))", "def _create_header(cls, width, height):\n\n\t\ttotal_header_size = cls.bmp_header_len + 40 # V3 len = 40 bytes\n\t\tpadding_size = width & 3 # Magic stuff\n\t\tbitmap_size = ((width * 3) + padding_size) * height\n\t\tfile_size = total_header_size + bitmap_size\n\t\t\n\t\t# BMP header: Magic (2 bytes), file size, 2 ignored values, bitmap offset\n\t\theader = struct.pack('<2s I 2H I', \"BM\", file_size, 0, 0, total_header_size)\n\n\t\t# DIB V3 header: header size, px width, px height, num of color planes, bpp, compression method,\n\t\t# bitmap data size, horizontal resolution, vertical resolution, number of colors in palette, number of important colors used\n\t\t# Few of these matter, so there are a bunch of default/\"magic\" numbers here...\n\t\theader += struct.pack('I 2i H H I I 2i 2I', 40, width, height, 1, 24, 0, bitmap_size, 0x0B13, 0x0B13, 0, 0)\n\n\t\treturn header", "def _getheaderfromunit(self,headernum, progress=False, usecache=True, crc=None):\n \n cachefilename = self._getcachefilename(headernum=headernum,crc=crc)\n \n if cachefilename is None:\n return None\n \n cachefilename = self._cachedir + os.sep + cachefilename\n\n data=''\n\n if usecache and os.path.isfile(cachefilename):\n if progress:\n print 'Cache entry found (' + cachefilename + \")\"\n with open(cachefilename,\"rb\") as myfile:\n data = myfile.read()\n else:\n ndx = 0\n newdata = data = self.reg(regnum=0xCC, index=ndx, index2=headernum, write=1)\n while len(newdata) > 0:\n ndx += 1\n newdata = self.reg(regnum=0xCC, index=ndx, index2=headernum, write=1)\n data += newdata\n if progress and not ndx%5:\n sys.stdout.write('.')\n sys.stdout.flush()\n if len(data) > 0:\n if usecache:\n with open(cachefilename,\"wb\") as myfile:\n myfile.write(data)\n\n if len(data) > 0:\n decodeddata = zlib.decompress(data, 16+zlib.MAX_WBITS)\n self._keys[headernum] = data[-4:] \n else:\n decodeddata = None\n return decodeddata", "def copy_header(header_file, in_file, keep_dtype=True):\n hdr_img = nb.load(header_file)\n out_img = nb.load(in_file, mmap=False)\n hdr = hdr_img.header.copy()\n if keep_dtype:\n hdr.set_data_dtype(out_img.get_data_dtype())\n\n new_img = out_img.__class__(out_img.dataobj, None, hdr)\n if not keep_dtype:\n new_img.set_data_dtype(hdr_img.get_data_dtype())\n\n new_img.to_filename(in_file)\n return in_file", "def update_header(fopen):\n json_start = fopen.tell()\n fopen.seek(52, 0)\n fopen.write(struct.pack('<Q', json_start))\n fopen.seek(json_start)", "def update_header(self) -> None:\n self.header.partial_reset()\n self.header.point_format_id = self.points.point_format.id\n self.header.point_data_record_length = self.points.point_size\n\n if len(self.points) > 0:\n self.header.update(self.points)\n\n if self.header.version.minor >= 4:\n if self.evlrs is not None:\n self.header.number_of_evlrs = len(self.evlrs)\n self.header.start_of_waveform_data_packet_record = 0\n # TODO\n # if len(self.vlrs.get(\"WktCoordinateSystemVlr\")) == 1:\n # self.header.global_encoding.wkt = 1\n else:\n self.header.number_of_evlrs = 0", "def extend_headers(self, fragment):\r\n\r\n self.header_b.append(fragment)", "def _get_header_attr_from_snap(attr, num, base):\n f = absn.AbstractSnapshotFactory(num, base)\n value = f.get_header_attr(attr)\n del f\n return value", "def modify_header():\n\n print_debug_info()\n if not bool(int(vim.eval(\"g:BHModify\"))):\n return\n\n if not should_do_write():\n debug(\"should not write this buffer.\")\n return\n\n if not has_header():\n debug(\"This file has no header.\")\n return add_header()\n\n # only if the suffix is supported and we have a method to strip the comment.\n if not ((\"extract_comment_%s\" % SUFFIX) in globals() and suffix_is_supported()):\n return\n\n comment = globals()[\"extract_comment_%s\" % SUFFIX]()\n debug(\"comment: %s\" % str(comment))\n if not comment:\n debug(\"comment is empty\")\n return\n\n comment_dict = {}\n\n if len(comment) < 3:\n # Less than 3 lines of original comment, put them in Description part.\n comment_dict['Description'] = '\\n'.join(comment)\n else:\n comment_dict = read_comment(comment)\n if \"\" in comment_dict:\n del comment_dict[\"\"]\n new_header_dict = read_comment(globals().get(\"%s_header\" % SUFFIX).rstrip().splitlines())\n debug(\"new\")\n debug(set(new_header_dict.keys()))\n debug(set(comment_dict.keys()))\n debug(\"end\")\n if not set(new_header_dict.keys()) == set(comment_dict.keys()):\n return prepend_header(render_header(comment_dict))\n else:\n debug(\"do not modify header since we already have the same header.\")", "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def header(self):\n header_str = self._base[0:self.s_allocator_header].tostring()\n magic, pos, used = struct.unpack(str('III'), header_str)\n\n assert magic == self._magic_num, \\\n 'invalid header magic[%d] in shared memory' % (magic)\n return self._header_pages, self._total_pages, pos, used", "def initialize_header(fopen, uid):\n fopen.seek(0, 0)\n fopen.write(b'\\x84\\x83\\x82\\x81')\n fopen.write(\n struct.pack('<32s', COMPATIBILITY_VERSION.ljust(32, b'\\x00'))\n )\n fopen.write(struct.pack('<16s', uid.bytes))\n fopen.seek(8, 1)", "def _decode_header(self):\n #header = self.file_content[0:6]\n log_screen_descr = self.file_content[6:13]\n self.canvas_width = log_screen_descr[0] + (log_screen_descr[1]<<8)\n self.canvas_height = log_screen_descr[2] + (log_screen_descr[3]<<8)\n # is there a global color table? (usually yes)\n flags = log_screen_descr[4]\n self.glob_col_table = (flags & 0b10000000) != 0\n\n # determine the number of bits per primary color value\n self.color_resolution = (flags & 0b01110000) >> 4\n self.bits_per_pixel = self.color_resolution + 1\n\n # If the value is 1, then the colors in the global color table are sorted\n # in order of \"decreasing importance,\" which typically means \"decreasing\n # frequency\" in the image\n self.sort_flag = (flags & 0b00001000) != 0\n\n # If this value is N, then the actual table size is 2^(N+1).\n self.glob_col_table_sz = 1 << ((flags & 0b00000111)+1)\n\n self.bg_color_index = log_screen_descr[5]\n self.pix_asp_ratio = log_screen_descr[6]", "def writeheader(filename, header):\n # convert string to [unsigned] byte array\n hh = np.zeros(512, dtype='uint8')\n for i, ss in enumerate(header):\n hh[i] = ord(ss)\n # write header to file\n file_arr = np.memmap(filename, dtype='uint8', mode='r+', shape=(512,))\n file_arr[:512] = hh[:]\n del file_arr\n return", "def lookup_fits_header(bucket_path):\n header = None\n request_params = dict(bucket_path=bucket_path, bucket_name=INCOMING_BUCKET)\n res = requests.post(FITS_HEADER_URL, json=request_params)\n if res.ok:\n header = res.json()['header']\n\n return header", "def __read_header(self):\n header = self.__file_object.readline()\n header_string = header.decode('utf-8')\n print(header_string)\n # Ignore first letter\n self.frame_width = int(re.findall('W\\d+', header_string)[0][1:])\n self.frame_height = int(re.findall('H\\d+', header_string)[0][1:])\n self.frame_rate = re.findall('F\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual frame rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.frame_rate.split(':')]\n self.frame_rate = round(tokens[0] / tokens[1], 1)\n\n self.__pixel_aspect_ratio = re.findall('A\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual pixel aspect ratio rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.__pixel_aspect_ratio.split(':')]\n self.__pixel_aspect_ratio = round(tokens[0] / tokens[1], 1)\n\n # Don't ignore for interlacing\n self.__interlacing_mode = re.findall('I(p|t|b|m)', header_string)[0]\n\n # Ignore first 'FRAME\\n' terminator so the file object points to the first byte of raw data of the first frame\n self.__file_object.readline()\n\n self.__first_frame_raw_data_position = self.__file_object.tell()\n\n self.determine_color_space_by_frame_size()\n\n # Restore\n self.__file_object.seek(self.__first_frame_raw_data_position)\n\n return header\n\n # Color space parameter is missing?\n print('FourCC:\\t\\t', header_string[:4])\n print('Input file:\\t', self.__input_file_path)\n print('Frame size:\\t', f'{self.frame_width}x{self.frame_height}')\n print('Frame rate:\\t', f'{self.frame_rate} FPS')\n print('Aspect Ratio:\\t', self.__pixel_aspect_ratio)\n print('Color space\\t', self.color_space)\n print('Frame size (raw data):', self.__frame_raw_data_size)\n print('Position of first raw:', self.__first_frame_raw_data_position)", "def get_buffer_start(blob):\n file_size = 0\n if blob.VCS_Fix:\n file_size = byte_size(ctypes.c_uint64(0)) * 4\n\n file_size += byte_size(blob.filesize)\n file_size += byte_size(blob.version)\n file_size += byte_size(blob.name)\n file_size += byte_size(blob.report_dir)\n file_size += byte_size(blob.stage_count)\n file_size += byte_size(ctypes.c_uint32(0))\n assert file_size == 0xf8, \"Blob format modified, please change the \" +\\\n \"FathomRun/tests/per_layer_tests/util/generate_test_data.py file\"\n file_size += blob.myriad_params.binary_size()\n file_size += blob.network.head[0].binary_size() * blob.network.count\n file_size += align(file_size, np.zeros(1), align_to=8)[0] - file_size\n return ctypes.c_uint32(file_size)", "def header_callback(self,buf):\n self.response_headers.extend(buf) #Optional TODO use chunk or byte-array storage", "def getHeader():\n return _HEADER", "def inject_header(self, base_url, soup):\n if _REMOVE_BANNER:\n return\n header_soup = BeautifulSoup(HEADER).find(\"div\")\n header_soup.find(\"input\", {\"id\": \"wmtbURL\"})['value'] = base_url\n soup.find(\"body\").insert(0, header_soup)", "def __set_header(self, header):\n\n if APIKEYHEADER not in header:\n header[APIKEYHEADER] = self.__client.get_apikey()\n if ROUTETAG not in header:\n header[ROUTETAG] = self.__route_tag\n if FABIOROUTETAGHEADER not in header:\n header[FABIOROUTETAGHEADER] = self.__route_tag\n\n return header", "def get_GP_header(stat_code, size, delta_t, time_delay, comment=\"broadband\"):\n\n # header_000 = stat_code + \" 0 broadband\\n\"\n # header_090 = stat_code + \" 90 broadband\\n\"\n # header_ver = stat_code + \" ver broadband\\n\"\n comment = str(comment)\n comment = comment.strip(\"\\n\")\n header_000 = \"{:s} 000 {:s}\\n\".format(stat_code, comment)\n header_090 = \"{:s} 090 {:s}\\n\".format(stat_code, comment)\n header_ver = \"{:s} ver {:s}\\n\".format(stat_code, comment)\n stat_info = (\"{:<10d}\" + 7 * \"{:<10.3f}\" + \"\\n\").format(\n size, delta_t, 0.0, 0.0, time_delay, 0.0, 0.0, 0.0\n )\n\n header_000 += stat_info\n header_090 += stat_info\n header_ver += stat_info\n\n return header_000, header_090, header_ver", "def transform_fasta_header(fastaheader):\n\n fastq_source, read_header = fastaheader.split(\" \", 1)[0].rsplit(\"_\", 1)\n fastq_base = fastq_source.rsplit(\"_\", 1)[0]\n return fastq_base, read_header", "def __set_header(self, header):\n\n if APIKEYHEADER not in header:\n header[APIKEYHEADER] = self.__client.get_apikey()\n\n if ROUTETAG not in header:\n header[ROUTETAG] = self.__route_tag\n\n if FABIOROUTETAGHEADER not in header:\n header[FABIOROUTETAGHEADER] = self.__route_tag\n\n return header" ]
[ "0.5801565", "0.5690087", "0.5652942", "0.56325054", "0.55486536", "0.54878336", "0.54709184", "0.5431581", "0.5403561", "0.53856033", "0.5357821", "0.5346563", "0.5325183", "0.5313993", "0.53054875", "0.5264218", "0.5193449", "0.5187051", "0.5177819", "0.5175555", "0.51432174", "0.5121992", "0.5112929", "0.5084305", "0.5028771", "0.5014131", "0.5013799", "0.5009269", "0.50031257", "0.5002997" ]
0.687387
0
Determines the string value for or end location of the key
def _determine_key(self, findend = False): if self.Key != None: name = None for n in self.Key: if n == None: return n # If the letter's valid, then deal with it if n == 0: if findend: return n.obj_offset + n.size() name = self.obj_vm.read(self.Key.obj_offset, n.obj_offset - self.Key.obj_offset).decode("utf16", "ignore").encode("ascii", 'backslashreplace') break return name return self.Key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key_value_string_value(key_value_string, key):\n if key_value_string is None or key is None:\n return None\n words = key_value_string.split(' ')\n for i in range(0, len(words)-1):\n if words[i] == key + ':':\n return words[i+1]\n return None", "def get_value(self, key: str) -> Optional[str]:\n raise NotImplementedError", "def get(self, key: str) -> str:\n hashkey = self._gethash(key)\n if type(self.HashMap[hashkey]) is list:\n if len(self.HashMap[hashkey]) > 2:\n \"\"\"\n Return correct Key and value from the\n location which has a hashclash\n \"\"\"\n idx = self._find_if_hashclash(key, hashkey, \"v\")\n if idx is not None:\n return self.HashMap[hashkey][idx]\n elif self.HashMap[hashkey][0] == key:\n # Check that the data matches the key and return it if it does\n return self.HashMap[hashkey][1]\n return \"\"", "def __getitem__(self, key: str) -> str:\n return self.get(key)", "def tp_key_value(str_tag):\n rgx_split = re.compile(r'[\\@\\(\\)\\{\\}]')\n str_key, str_value = '', ''\n\n # count the pieces\n lst_parts = rgx_split.split(str_tag)\n lng_parts = len(lst_parts)\n\n # and winnow the noise\n if lng_parts > 1:\n str_key = lst_parts[1]\n if lng_parts > 2:\n for str_value in lst_parts[2:]:\n if str_value != '':\n break\n\n return (str_key, str_value)", "def selector(string,key,lkey,lval):\n print string\n ip = string.find(key)\n print 'key =',key, 'position =',ip\n if ip > -1:\n value = string[ip+lkey:ip+lkey+lval]\n print 'velue = ',value\n else:\n value = 'none'\n \n return value", "def get(self, key):\n return \"\"", "def get_key_value(line: str) -> str:\n if line.find('=') == -1:\n raise Exception(\"Error: Key line must have equal sign seperating name and value\")\n return line[line.find('=') + 1:]", "def __getitem__(self, key):\n return self.data.get(key, '')", "def get(self, key):\n\t\treturn self.__get(key, key[1:])", "def p_value_key(protItem):\n return protItem[-1]", "def p_value_key(protItem):\n return protItem[-1]", "def _find_if_hashclash(self, key: str, location: int, key_or_value: str):\n if key in self.HashMap[location]:\n idx = self.HashMap[location].index(key)\n else:\n idx = None\n\n if idx is not None:\n if key_or_value == \"v\":\n return idx + 1\n else:\n return idx", "def _GetKeyString(self):", "def _GetKeyString(self):", "def keyvalue(dict, key):\n try:\n return dict[key]\n except KeyError:\n return ''", "def dictkey(self):\n return self.start + \",\" + self.end", "def __getitem__(self, key: Union[str, Tuple[str, T]]) -> Union[str, T]:\n default: Union[str, T]\n if isinstance(key, tuple):\n key, default = key\n else:\n default = ''\n\n key = key.casefold()\n for k in self._keys:\n if k.casefold() == key:\n return self._keys[k]\n else:\n return default", "def __getitem__(self, key):\n return self.keyvaluepair_set.get(key=key).value", "def get_value(self, key):\n pass", "def get(self, key):", "def get(self, key):", "def __getitem__(self, key):\n ndx = self._findPosition(key)\n assert ndx is not None, 'Invalid map key'\n return self._entryList[ndx].value", "def get_record_value(record_entry, key):\n value = record_entry[key][\"value\"]\n return value[value.rfind(\"#\") + 1:]", "def selector_post(string,key,lkey,lval,sel_set):\n print string\n ip = string.find(key)\n print 'key =',key, 'position =',ip\n if ip > -1:\n value = string[ip-lval:ip]\n print 'value = ',value", "def get_string(self, key):\n tag = self.get_tag(key)\n if isinstance(tag, BDTString):\n return tag.val\n else:\n raise KeyError(\"No string value for key %s found in BDTCompound @%#x\" % (key, id(self)))", "def get(self, key):\r\n if not isinstance(key, str):\r\n raise TypeError(\"Key must be a string\")\r\n\r\n node = self._find_node(key)\r\n if node is None:\r\n return None\r\n else:\r\n return node.value[1]", "def parse_value_ending(self, value: str):\n if len(value) < 1:\n raise Exception(\"Failed to parse the __value.\")\n\n if value.endswith(\"T\"):\n return value[:-1], 12\n if value.endswith(\"G\"):\n return value[:-1], 9\n if value.endswith(\"M\"):\n return value[:-1], 9\n if value.endswith(\"k\") or value.endswith(\"K\"):\n return value[:-1], 3\n if value.endswith(\"m\"):\n return value[:-1], -3\n if value.endswith(\"u\"):\n return value[:-1], -6\n if value.endswith(\"n\"):\n return value[:-1], -9\n if value.endswith(\"p\"):\n return value[:-1], -12\n\n return value, 0", "def keysequence(value):\r\n return value.toString()", "def key(key):\n return key" ]
[ "0.7128954", "0.64660406", "0.6441887", "0.6365823", "0.62895465", "0.62735194", "0.6181749", "0.61817443", "0.61766124", "0.6114784", "0.61035806", "0.61035806", "0.60622257", "0.6052435", "0.6052435", "0.60078233", "0.5998765", "0.5990275", "0.59151965", "0.5914784", "0.5896677", "0.5896677", "0.58885336", "0.5870247", "0.58558506", "0.5854967", "0.58122444", "0.58018124", "0.5770523", "0.5766596" ]
0.665368
1
Renders a UTF16 string
def display_unicode(self, string): if string is None: return '' return string.decode("utf16", "ignore").encode("ascii", 'backslashreplace')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _text16(self, font, text, x0, y0, color=WHITE, background=BLACK):\n for char in text:\n ch = ord(char)\n if (font.FIRST <= ch < font.LAST\n and x0+font.WIDTH <= self.width\n and y0+font.HEIGHT <= self.height):\n\n each = 16\n if font.HEIGHT == 16:\n passes = 2\n size = 32\n else:\n passes = 4\n size = 64\n\n for line in range(passes):\n idx = (ch-font.FIRST)*size+(each*line)\n buffer = struct.pack(\n '>128H',\n color if font.FONT[idx] & _BIT7 else background,\n color if font.FONT[idx] & _BIT6 else background,\n color if font.FONT[idx] & _BIT5 else background,\n color if font.FONT[idx] & _BIT4 else background,\n color if font.FONT[idx] & _BIT3 else background,\n color if font.FONT[idx] & _BIT2 else background,\n color if font.FONT[idx] & _BIT1 else background,\n color if font.FONT[idx] & _BIT0 else background,\n color if font.FONT[idx+1] & _BIT7 else background,\n color if font.FONT[idx+1] & _BIT6 else background,\n color if font.FONT[idx+1] & _BIT5 else background,\n color if font.FONT[idx+1] & _BIT4 else background,\n color if font.FONT[idx+1] & _BIT3 else background,\n color if font.FONT[idx+1] & _BIT2 else background,\n color if font.FONT[idx+1] & _BIT1 else background,\n color if font.FONT[idx+1] & _BIT0 else background,\n color if font.FONT[idx+2] & _BIT7 else background,\n color if font.FONT[idx+2] & _BIT6 else background,\n color if font.FONT[idx+2] & _BIT5 else background,\n color if font.FONT[idx+2] & _BIT4 else background,\n color if font.FONT[idx+2] & _BIT3 else background,\n color if font.FONT[idx+2] & _BIT2 else background,\n color if font.FONT[idx+2] & _BIT1 else background,\n color if font.FONT[idx+2] & _BIT0 else background,\n color if font.FONT[idx+3] & _BIT7 else background,\n color if font.FONT[idx+3] & _BIT6 else background,\n color if font.FONT[idx+3] & _BIT5 else background,\n color if font.FONT[idx+3] & _BIT4 else background,\n color if font.FONT[idx+3] & _BIT3 else background,\n color if font.FONT[idx+3] & _BIT2 else background,\n color if font.FONT[idx+3] & _BIT1 else background,\n color if font.FONT[idx+3] & _BIT0 else background,\n color if font.FONT[idx+4] & _BIT7 else background,\n color if font.FONT[idx+4] & _BIT6 else background,\n color if font.FONT[idx+4] & _BIT5 else background,\n color if font.FONT[idx+4] & _BIT4 else background,\n color if font.FONT[idx+4] & _BIT3 else background,\n color if font.FONT[idx+4] & _BIT2 else background,\n color if font.FONT[idx+4] & _BIT1 else background,\n color if font.FONT[idx+4] & _BIT0 else background,\n color if font.FONT[idx+5] & _BIT7 else background,\n color if font.FONT[idx+5] & _BIT6 else background,\n color if font.FONT[idx+5] & _BIT5 else background,\n color if font.FONT[idx+5] & _BIT4 else background,\n color if font.FONT[idx+5] & _BIT3 else background,\n color if font.FONT[idx+5] & _BIT2 else background,\n color if font.FONT[idx+5] & _BIT1 else background,\n color if font.FONT[idx+5] & _BIT0 else background,\n color if font.FONT[idx+6] & _BIT7 else background,\n color if font.FONT[idx+6] & _BIT6 else background,\n color if font.FONT[idx+6] & _BIT5 else background,\n color if font.FONT[idx+6] & _BIT4 else background,\n color if font.FONT[idx+6] & _BIT3 else background,\n color if font.FONT[idx+6] & _BIT2 else background,\n color if font.FONT[idx+6] & _BIT1 else background,\n color if font.FONT[idx+6] & _BIT0 else background,\n color if font.FONT[idx+7] & _BIT7 else background,\n color if font.FONT[idx+7] & _BIT6 else background,\n color if font.FONT[idx+7] & _BIT5 else background,\n color if font.FONT[idx+7] & _BIT4 else background,\n color if font.FONT[idx+7] & _BIT3 else background,\n color if font.FONT[idx+7] & _BIT2 else background,\n color if font.FONT[idx+7] & _BIT1 else background,\n color if font.FONT[idx+7] & _BIT0 else background,\n color if font.FONT[idx+8] & _BIT7 else background,\n color if font.FONT[idx+8] & _BIT6 else background,\n color if font.FONT[idx+8] & _BIT5 else background,\n color if font.FONT[idx+8] & _BIT4 else background,\n color if font.FONT[idx+8] & _BIT3 else background,\n color if font.FONT[idx+8] & _BIT2 else background,\n color if font.FONT[idx+8] & _BIT1 else background,\n color if font.FONT[idx+8] & _BIT0 else background,\n color if font.FONT[idx+9] & _BIT7 else background,\n color if font.FONT[idx+9] & _BIT6 else background,\n color if font.FONT[idx+9] & _BIT5 else background,\n color if font.FONT[idx+9] & _BIT4 else background,\n color if font.FONT[idx+9] & _BIT3 else background,\n color if font.FONT[idx+9] & _BIT2 else background,\n color if font.FONT[idx+9] & _BIT1 else background,\n color if font.FONT[idx+9] & _BIT0 else background,\n color if font.FONT[idx+10] & _BIT7 else background,\n color if font.FONT[idx+10] & _BIT6 else background,\n color if font.FONT[idx+10] & _BIT5 else background,\n color if font.FONT[idx+10] & _BIT4 else background,\n color if font.FONT[idx+10] & _BIT3 else background,\n color if font.FONT[idx+10] & _BIT2 else background,\n color if font.FONT[idx+10] & _BIT1 else background,\n color if font.FONT[idx+10] & _BIT0 else background,\n color if font.FONT[idx+11] & _BIT7 else background,\n color if font.FONT[idx+11] & _BIT6 else background,\n color if font.FONT[idx+11] & _BIT5 else background,\n color if font.FONT[idx+11] & _BIT4 else background,\n color if font.FONT[idx+11] & _BIT3 else background,\n color if font.FONT[idx+11] & _BIT2 else background,\n color if font.FONT[idx+11] & _BIT1 else background,\n color if font.FONT[idx+11] & _BIT0 else background,\n color if font.FONT[idx+12] & _BIT7 else background,\n color if font.FONT[idx+12] & _BIT6 else background,\n color if font.FONT[idx+12] & _BIT5 else background,\n color if font.FONT[idx+12] & _BIT4 else background,\n color if font.FONT[idx+12] & _BIT3 else background,\n color if font.FONT[idx+12] & _BIT2 else background,\n color if font.FONT[idx+12] & _BIT1 else background,\n color if font.FONT[idx+12] & _BIT0 else background,\n color if font.FONT[idx+13] & _BIT7 else background,\n color if font.FONT[idx+13] & _BIT6 else background,\n color if font.FONT[idx+13] & _BIT5 else background,\n color if font.FONT[idx+13] & _BIT4 else background,\n color if font.FONT[idx+13] & _BIT3 else background,\n color if font.FONT[idx+13] & _BIT2 else background,\n color if font.FONT[idx+13] & _BIT1 else background,\n color if font.FONT[idx+13] & _BIT0 else background,\n color if font.FONT[idx+14] & _BIT7 else background,\n color if font.FONT[idx+14] & _BIT6 else background,\n color if font.FONT[idx+14] & _BIT5 else background,\n color if font.FONT[idx+14] & _BIT4 else background,\n color if font.FONT[idx+14] & _BIT3 else background,\n color if font.FONT[idx+14] & _BIT2 else background,\n color if font.FONT[idx+14] & _BIT1 else background,\n color if font.FONT[idx+14] & _BIT0 else background,\n color if font.FONT[idx+15] & _BIT7 else background,\n color if font.FONT[idx+15] & _BIT6 else background,\n color if font.FONT[idx+15] & _BIT5 else background,\n color if font.FONT[idx+15] & _BIT4 else background,\n color if font.FONT[idx+15] & _BIT3 else background,\n color if font.FONT[idx+15] & _BIT2 else background,\n color if font.FONT[idx+15] & _BIT1 else background,\n color if font.FONT[idx+15] & _BIT0 else background\n )\n self.blit_buffer(buffer, x0, y0+8*line, 16, 8)\n x0 += font.WIDTH", "def reverse(self, s):\n return '\\x16%s\\x16' % s", "def render_string(_str):\n\t\treturn str.encode(_str)", "def _reg_encode_utf16_list(self, xlist):\n t = '' \n for x in xlist: \n t += self._reg_encode_utf16(x + u'\\u0000') # null term \n t += self._reg_encode_utf16(u'\\u0000') # end of list (double null) \n return t", "def b16encode(s: str) -> str:\n return base64.b16encode(s.encode()).decode()", "def test_analyze_syntax_utf16():\n test_string = \"a \\u00e3 \\u0201 \\U0001f636 b\"\n byte_array = test_string.encode(\"utf16\")\n # Remove the byte order marker, which the offsets don't account for\n byte_array = byte_array[2:]\n result = analyze.analyze_syntax(test_string, encoding=\"UTF16\")\n tokens = result[\"tokens\"]\n\n assert tokens[0][\"text\"][\"content\"] == \"a\"\n # The offset is an offset into an array where each entry is 16 bits. Since\n # we have an 8-bit array, the offsets should be doubled to index into our\n # array.\n offset = 2 * tokens[0][\"text\"].get(\"beginOffset\", 0)\n assert (\n byte_array[offset : offset + 2].decode(\"utf16\") == tokens[0][\"text\"][\"content\"]\n )\n\n assert tokens[1][\"text\"][\"content\"] == \"\\u00e3\"\n offset = 2 * tokens[1][\"text\"].get(\"beginOffset\", 0)\n # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so\n # slice out 2 bytes starting from the offset. Then interpret the bytes as\n # utf16 for comparison.\n assert (\n byte_array[offset : offset + 2].decode(\"utf16\") == tokens[1][\"text\"][\"content\"]\n )\n\n assert tokens[2][\"text\"][\"content\"] == \"\\u0201\"\n offset = 2 * tokens[2][\"text\"].get(\"beginOffset\", 0)\n # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so\n # slice out 2 bytes starting from the offset. Then interpret the bytes as\n # utf16 for comparison.\n assert (\n byte_array[offset : offset + 2].decode(\"utf16\") == tokens[2][\"text\"][\"content\"]\n )\n\n assert tokens[3][\"text\"][\"content\"] == \"\\U0001f636\"\n offset = 2 * tokens[3][\"text\"].get(\"beginOffset\", 0)\n # A UTF16 character with a high codepoint is 32 bits (4 bytes) long, so\n # slice out 4 bytes starting from the offset. Then interpret those bytes as\n # utf16 for comparison.\n assert (\n byte_array[offset : offset + 4].decode(\"utf16\") == tokens[3][\"text\"][\"content\"]\n )\n\n # This demonstrates that the offset takes into account the variable-length\n # characters before the target token.\n assert tokens[4][\"text\"][\"content\"] == \"b\"\n offset = 2 * tokens[4][\"text\"].get(\"beginOffset\", 0)\n # Even though 'b' is only one byte long, utf16 still encodes it using 16\n # bits\n assert (\n byte_array[offset : offset + 2].decode(\"utf16\") == tokens[4][\"text\"][\"content\"]\n )", "def render(self) -> str:\n with no_colors():\n self._render()\n return self.sio.getvalue()", "def rendermsg(self,msg):\n return ' '.join(['%02x'%ord(x) for x in msg])", "def swd_write16(self, output, value):\n return self.swd_write(output, value, 16)", "def read_u16(self) -> int:", "def DrawStringAt(self, x, y, s, color=(229, 153, 153, 255)):\r\n self.screen.blit(self.font.render(s, True, color), (x, y))", "def write16(self, register, value):\n raise NotImplementedError", "def __repr__(self):\n return \"ANSIString(%s, decoded=True)\" % repr(self._raw_string)", "def showText(self, context, text, size=1, color=colors.WHITE, conversion=True):\n context.print(text, self.components, size, color=color, conversion=conversion)", "def read_u16(self) -> int:\n ...", "def read_string(self):\n\n # length may be -1, 0, or a positive integer\n length = self.read_and_unpack('l')[0]\n if length > 0:\n return self.read(length).decode(self.utf_16_decoder)\n else:\n return ''", "def flash_write16(self, addr, data):\n return self.flash_write(addr, data, 16)", "def hexchar(i):\n if i > -1 and i < 16:\n return \"%X\" % i\n else:\n return None", "def test_i18n04(self):\n output = self.engine.render_to_string('i18n04', {'anton': b'\\xc3\\x85'})\n self.assertEqual(output, 'å')", "def test_i18n04(self):\n output = self.engine.render_to_string('i18n04', {'anton': b'\\xc3\\x85'})\n self.assertEqual(output, 'å')", "def intRender(self, number):\n\n data = unicode(number)\n bites = list()\n\n while data:\n bites.append(data[-3:])\n data = data[:-3]\n\n return \" \".join(reversed(bites))", "def drawString(text: str):\n pass", "def encoding():\n\n return render_template(\"UTF-8-demo.txt\")", "def render(self, data):\n logging.info(\"render (start)\")\n\n seria = json.dumps(data, ensure_ascii=False, indent=4)\n logging.info(\"rendered %s characters (end)\" % len(seria))\n return seria", "async def charinfo(self, ctx: Context, *, characters: str) -> None:\n match = re.match(r\"<(a?):(\\w+):(\\d+)>\", characters)\n if match:\n embed = Embed(\n title=\"Non-Character Detected\",\n description=\"Only unicode characters can be processed, but a custom Discord emoji \" \"was found. Please remove it and try again.\",\n )\n embed.colour = Color.red()\n await ctx.send(embed=embed)\n return\n\n if len(characters) > 25:\n embed = Embed(title=f\"Too many characters ({len(characters)}/25)\")\n embed.colour = Color.red()\n await ctx.send(embed=embed)\n return\n\n def get_info(char: str) -> Tuple[str, str]:\n digit = f\"{ord(char):x}\"\n if len(digit) <= 4:\n u_code = f\"\\\\u{digit:>04}\"\n else:\n u_code = f\"\\\\U{digit:>08}\"\n url = f\"https://www.compart.com/en/unicode/U+{digit:>04}\"\n name = f\"[{unicodedata.name(char, '')}]({url})\"\n info = f\"`{u_code.ljust(10)}`: {name} - {char}\"\n return info, u_code\n\n charlist, rawlist = zip(*(get_info(c) for c in characters))\n\n embed = Embed(description=\"\\n\".join(charlist))\n embed.set_author(name=\"Character Info\")\n\n if len(characters) > 1:\n embed.add_field(name=\"Raw\", value=f\"`{''.join(rawlist)}`\", inline=False)\n\n await ctx.send(embed=embed)", "def render(self, mode='human'):", "def binary(self):\n self.appendString(\"%\\xD0\\xD0\\xD0\\xD0\\n\")", "def test_i18n17(self):\n output = self.engine.render_to_string('i18n17', {'anton': 'α & β'})\n self.assertEqual(output, 'α &amp; β')", "def test_i18n17(self):\n output = self.engine.render_to_string('i18n17', {'anton': 'α & β'})\n self.assertEqual(output, 'α &amp; β')", "def test_i18n16(self):\n with translation.override('de'):\n output = self.engine.render_to_string('i18n16')\n self.assertEqual(output, '<')" ]
[ "0.58147746", "0.568368", "0.565337", "0.5632572", "0.5459072", "0.5347117", "0.53401065", "0.5276823", "0.5213218", "0.5203724", "0.5197888", "0.5144833", "0.51248384", "0.5114635", "0.511052", "0.50886446", "0.50571483", "0.50373083", "0.5031097", "0.5031097", "0.50146073", "0.4982403", "0.4976993", "0.49608484", "0.49439833", "0.4931683", "0.49309608", "0.49154308", "0.49154308", "0.49119192" ]
0.62821937
0
Returns the file version
def file_version(self): return self.get_version(self.FileVerMS) + "." + self.get_version(self.FileVerLS)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version(self):\n a = re.search('(?<=_V)\\d{1,2}', self.fname)\n if a is None:\n return None\n else:\n return int(a.group())", "def get_version():\n\n current_dir = os.path.dirname(os.path.realpath(__file__))\n version_path = os.path.join(current_dir, VERSION_FILE)\n\n with open(version_path, 'r') as version_fd:\n return version_fd.read().strip()", "def getVersion(self,fileName):\n if not fileName in self.data or not self.data[fileName].tes3:\n return ''\n maVersion = reVersion.search(self.data[fileName].tes3.hedr.description)\n return (maVersion and maVersion.group(2)) or ''", "def version() -> str:\n with open(join(dirname(__file__), 'resources', 'VERSION')) as f:\n return f.read().strip()", "def get_version():\r\n try:\r\n with open('version', 'r') as version_file:\r\n return str(version_file.readline())\r\n except:\r\n return False", "def get_current_version(self):\n #full_path = self._root.knob('name').value()\n full_path = os.path.normpath(\n self.comp.GetAttrs()['COMPS_FileName']\n ).replace('\\\\', '/')\n return self.get_version_from_full_path(full_path)", "def read_version(self, fname):\n version = 'unknown'\n lines = open(fname).readlines()\n for line in lines:\n if \" Version\" in line:\n version = line.split()[-2]\n break\n return version", "def get_version():\n\n with open('u2fval/__init__.py', 'r') as f:\n match = VERSION_PATTERN.search(f.read())\n return match.group(1)", "def get_rvt_file_version(rvt_file):\n file_info = get_basic_info(rvt_file, cleaned_str=True)\n re_version = re.compile(r\"Format: (\\d{4})\")\n found = re.findall(re_version, file_info)\n if found:\n rvt_file_version = found[0]\n else:\n re_version = re.compile(r\"Autodesk Revit (\\d{4})\")\n rvt_file_version = re.findall(re_version, file_info)[0]\n return rvt_file_version", "def version(self):\n self._get_latest_content()\n return self._data.get('version', None)", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def get_version():\n return 1", "def read_version():\n # code parts were taken from here https://stackoverflow.com/a/67692\n\n path2setup = os.path.dirname(__file__)\n version_file = os.path.abspath(\n os.path.join(path2setup, \"diffusion_maps\", \"version.py\"))\n\n spec = importlib.util.spec_from_file_location(\"version\", version_file)\n version = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(version)\n return version.version.v_short", "def _get_version(self):", "def get_version(self):\n return self.bot_data_file[\"version\"]", "def _get_package_version():\n file = join(get_root(), 'VERSION')\n\n if exists(file):\n with open(file) as file:\n return file.read()\n\n return ''", "def getVersion(self):\n try:\n filepath = f\"{EXTERNAL_DIRECTORY}/VERSION\"\n with open(filepath, \"r\") as file:\n lines = file.readlines()\n for line in lines:\n if line != \"\\n\":\n return line.replace(\"\\n\", \"\")\n\n\n except FileNotFoundError as e:\n _LOGGER.error(\"Could not find VERSION File.\")\n return None\n except Exception as e:\n _LOGGER.debug(\"Could not read program version file. Error message: %s\", e)\n return None", "def getVersion():\n try:\n fh=open(version_py, 'r')\n version=fh.read().strip().split('=')[-1].replace(\"'\",'').lstrip()\n fh.close()\n except:\n return None\n\n return version", "def get_version():\n version = \"unknown\"\n try:\n version_file = open(VERSIONFILE, \"r\")\n for line in version_file:\n if line.startswith('__version__'):\n version = line.split(\"'\")[1]\n break\n except EnvironmentError:\n pass # Okay, there is no version file.\n return version", "def src_get_version():\n return ffi.string(_lib.src_get_version()).decode()", "def get_version():\n return '%d.%d.%d' % version_info", "async def version(self):\n self.do(\"version\")\n return (await self.read(7)).strip()", "def _version(self):\n # TODO: Can we delete this method and just print the line from the\n # reqs file verbatim instead?\n def version_of_archive(filename, package_name):\n # Since we know the project_name, we can strip that off the left, strip\n # any archive extensions off the right, and take the rest as the\n # version.\n for ext in ARCHIVE_EXTENSIONS:\n if filename.endswith(ext):\n filename = filename[:-len(ext)]\n break\n # Handle github sha tarball downloads.\n if is_git_sha(filename):\n filename = package_name + '-' + filename\n if not filename.lower().replace('_', '-').startswith(package_name.lower()):\n # TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?\n give_up(filename, package_name)\n return filename[len(package_name) + 1:] # Strip off '-' before version.\n\n def version_of_wheel(filename, package_name):\n # For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-\n # name-convention) we know the format bits are '-' separated.\n whl_package_name, version, _rest = filename.split('-', 2)\n # Do the alteration to package_name from PEP 427:\n our_package_name = re.sub(r'[^\\w\\d.]+', '_', package_name, re.UNICODE)\n if whl_package_name != our_package_name:\n give_up(filename, whl_package_name)\n return version\n\n def give_up(filename, package_name):\n raise RuntimeError(\"The archive '%s' didn't start with the package name '%s', so I couldn't figure out the version number. My bad; improve me.\" %\n (filename, package_name))\n\n get_version = (version_of_wheel\n if self._downloaded_filename().endswith('.whl')\n else version_of_archive)\n return get_version(self._downloaded_filename(), self._project_name())", "def get_version(self):\n pass", "def get_version(self):\n return self.version", "def version(version_file=default_version_file, osp_package=default_osp_package):\n\n if os.path.exists(version_file):\n (version_string, version_name) = version_from_file(version_file)\n\n else:\n package_info = get_package_info(osp_package)\n repo_name = get_package_repo_name(package_info)\n version_string = get_version_from_repo_name(repo_name)\n\n if version_string == None:\n version_string = \"unknown\"\n \n return version_string", "def get_version() -> str:\n version = read(\"pdf_utils/__version__.py\")\n return re.search(r\"__version__ = \\\"(.*?)\\\"\", version).group(1)", "def get_version(self) -> str:\n return versioning.get_version()", "def get_version(self, directory, version_file_name='.version'):\n if self.path_exists(directory) and (version_file_name in os.listdir(directory)):\n f = open(directory + '/' + version_file_name)\n version = f.read()\n f.close()\n return version\n return None", "def get_version(self):\n return 0" ]
[ "0.81010616", "0.78354764", "0.772773", "0.7555567", "0.7484181", "0.74439764", "0.743952", "0.73560786", "0.7338772", "0.732837", "0.72691864", "0.72675997", "0.72669727", "0.7261523", "0.7258668", "0.7251823", "0.7247981", "0.72477627", "0.7239214", "0.7227263", "0.7225165", "0.72202194", "0.7209334", "0.71853805", "0.7148706", "0.71340096", "0.7126876", "0.71018773", "0.7096361", "0.7087954" ]
0.8548073
0
Returns the file's flags
def flags(self): data = struct.pack('=I', self.FileFlags & self.FileFlagsMask) addr_space = addrspace.BufferAddressSpace(self.obj_vm.get_config(), 0, data) bitmap = {'Debug': 0, 'Prerelease': 1, 'Patched': 2, 'Private Build': 3, 'Info Inferred': 4, 'Special Build' : 5, } return obj.Object('Flags', offset = 0, vm = addr_space, bitmap = bitmap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flags(self):\n return self._flags", "def get_flags(self):\n\n return self._flags", "def flags(self):\n if self._flags is None:\n raise ValueError('Flags are not available since dataset '\n 'was opened with metadata only')\n return self._flags", "def get_flags(self):\n return self.short_flag, self.long_flag", "def get_file_flag(self):\n flag_list = os.listdir(self.path)\n temp_flag_list = []\n for flag in flag_list[:5]:\n result = re.match('^(\\w{2}\\d{6}\\_)(\\d{8})', flag)\n if result:\n temp_flag_list.append(result[2])\n self.flag_list = list(set(temp_flag_list))", "def flags(self):\n return c.Flags(self)", "def flag_file(self):\n return os.path.join(self.flag_dir, self.flag_name)", "def flags(self,index):\n return self._flags", "def get_flags(self):\n\n if self.raw.flags not in [0, 1, 2, 3]:\n raise ValueError(\"Invalid raw flags: {}\".format(self.raw.flags))\n\n flags = set()\n\n if (self.raw.flags & 0b010) > 0:\n flags.add(\"DF\")\n\n if (self.raw.flags & 0b001) > 0:\n flags.add(\"MF\")\n\n return frozenset(flags)", "def flags(self):\n return self.ast_node.flags", "def read_flags():\n return flag_args", "def flags(self) -> Optional[int]:\n return self.get(\"/Ff\")", "def get_flags(cls):\n return cls.get_short_flag(), cls.get_flag()", "def flags(self):\n return self.__flag_set", "def flags(self):\n return list(self._flags_generator())", "def get_permissions(self, filepath):\n return oct(os.stat(filepath).st_mode & 0777)", "def _flags(self):\n done, data = self._request('GE')\n if done:\n flags = int(data[1], 16)\n else:\n raise EvseError\n return {\n 'service_level': (flags & 0x0001) + 1,\n 'diode_check': not flags & 0x0002,\n 'vent_required': not flags & 0x0004,\n 'ground_check': not flags & 0x0008,\n 'stuck_relay_check': not flags & 0x0010,\n 'auto_service_level': not flags & 0x0020,\n 'auto_start': not flags & 0x0040,\n 'serial_debug': not not flags & 0x0080,\n 'lcd_type': 'monochrome' if flags & 0x0100 else 'rgb',\n 'gfi_self_test': not flags & 0x0200\n }", "def source_flags(self):\n return self.ast_node.source_flags", "def get_permissions(filepath):\n return oct(stat.S_IMODE(os.lstat(filepath).st_mode))", "def view_files(self):\n return 1 << 1", "def list_flags(self):\n return self._defs.items()", "def process_flags(self):\n\t\tsflags = []\n\t\tfor attr in dir(self):\n\t\t\tif attr[:3] != \"PF_\":\n\t\t\t\tcontinue\n\t\t\tvalue = getattr(self, attr)\n\t\t\tif value & self.fields[\"flags\"]:\n\t\t\t\tsflags.append(attr)\n\n\t\treturn sflags", "def gn_files(self):\n return set(self._gn_flags.keys())", "def get_flags(args):\r\n\r\n flags = 0\r\n\r\n if args.regexfilepattern is not None:\r\n flags |= pygrep.FILE_REGEX_MATCH\r\n\r\n if not args.regexp:\r\n flags |= pygrep.LITERAL\r\n elif args.dotall:\r\n flags |= pygrep.DOTALL\r\n\r\n if args.ignore_case:\r\n flags |= pygrep.IGNORECASE\r\n\r\n if args.recursive:\r\n flags |= pygrep.RECURSIVE\r\n\r\n if args.regexdirpattern:\r\n flags |= pygrep.DIR_REGEX_MATCH\r\n\r\n return flags", "def flags(self):\n flags = self.Flags\n return [x for x in self.FLAGS_VALUES if flags & x]", "def GetAGWFlags(self):\r\n \r\n return self._agwFlags", "def format_flags(self):\n flags = []\n if self.is_unique:\n flags.append('Unique')\n if self.is_weak:\n flags.append('Weak')\n if self.is_ctor:\n flags.append('Constructor')\n if self.is_warning:\n flags.append('Warning')\n if self.is_ref:\n flags.append('Indirect reference')\n if self.is_reloc:\n flags.append('Reloc function')\n if self.is_debug:\n flags.append('Debug')\n if self.is_dynamic:\n flags.append('Dynamic')\n if self.is_func:\n flags.append('Function')\n if self.is_file:\n flags.append('File')\n if self.is_object:\n flags.append('Object')\n return flags", "def check_file_flag(file):\n return process_file_flag(file, None)", "def hive_flags(self):\n return self.unpack_dword(0x8)", "def manage_files(self):\n return 1 << 2" ]
[ "0.74164915", "0.7365765", "0.7306501", "0.7034275", "0.69856757", "0.6956966", "0.693179", "0.691857", "0.69089735", "0.68848044", "0.6857017", "0.684196", "0.68312913", "0.6696304", "0.66916585", "0.66664183", "0.66175085", "0.6596741", "0.64871913", "0.6412913", "0.6397083", "0.6392307", "0.6385142", "0.62855303", "0.62748814", "0.62512916", "0.6229761", "0.61837727", "0.6174069", "0.61671835" ]
0.7750857
0
Return a copy of hits shifted and trimmed to match self.truth. hits must have the same frequency as self.truth.
def alignshift(self, hits): return hits.shift(self.horizon, axis=0) \ .align(self.truth, axis=0, join='right')[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleaned(self, start_threshold=0.01, end_threshold=0.25, shifted=True):\n start_i, end_i = None, None\n\n max_i = np.nanargmax(self.ys)\n max_y = self.ys[max_i]\n\n if start_threshold is not None:\n # includes the value before threshold is met\n for i, y in enumerate(self.ys[1:]):\n if y > max_y*start_threshold:\n start_i = i\n break\n\n if end_threshold is not None:\n for i, y in enumerate(self.ys[max_i:], start=max_i):\n if y < max_y*end_threshold:\n end_i = i\n break\n\n return self.cropped_index(start_i, end_i, shifted)", "def _shifted(self, aslice):\n return slice(\n self._start if aslice.start is None else self._clamp(aslice.start),\n self._stop if aslice.stop is None else self._clamp(aslice.stop),\n aslice.step)", "def trim(self):\n for i in range(len(self)):\n if self[i] != TRIT_ZERO:\n return self.__class__(self[i:])\n return self.__class__([])", "def _clean_hits(reads):\n new_reads = defaultdict(realign)\n for r in reads:\n world = {}\n sc = 0\n for p in reads[r].precursors:\n world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence))\n if sc < world[p]:\n sc = world[p]\n new_reads[r] = reads[r]\n for p in world:\n logger.debug(\"score %s %s %s\" % (r, p, world[p]))\n if sc != world[p]:\n logger.debug(\"remove %s %s %s\" % (r, p, world[p]))\n new_reads[r].remove_precursor(p)\n\n return new_reads", "def cropped(self, start=None, end=None, shifted=True):\n start_i, end_i, i = None, None, 0\n\n if start is not None:\n for i, val in enumerate(self.xs):\n if val > start:\n start_i = i\n break\n\n if end is not None:\n for i, val in enumerate(self.xs[i:], start=i):\n if val > end:\n end_i = i + 1\n break\n\n return self.cropped_index(start_i, end_i, shifted)", "def filter(self):\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_", "def prune(self, upper, lower):\n # max_count = sorted([self.counts[key] for key in self.counts.keys()])[::-1][upper]\n max_count = upper\n\n print('Removed all words that occur less than {} times and more than {} times'.format(lower, upper))\n for i, doc in enumerate(self.docs):\n new_doc = []\n for word in doc:\n if self.counts[word] <= max_count and self.counts[word] > lower:\n new_doc.append(word)\n self.docs[i] = new_doc", "def mask(self):\n\n mask = self.freqs >= self.minimum_threshold\n mask = mask.astype(int)\n self.freqs = self.freqs * mask\n self.sums = self.sums * mask", "def _xtrim(self, lower, upper):\n trm = pd.Series(data=True, index=self._data.index)\n for c in self.index_colnames_all:\n l_limit = np.percentile(self._data[c], 100 * lower)\n u_limit = np.percentile(self._data[c], 100 * upper)\n trm &= self._data[c].apply(lambda x: True if l_limit <= x <= u_limit else False)\n\n return trm", "def remove_jumps(self) -> None:\n q_diff = np.diff(self.array, axis=0)\n jumps = np.nonzero(np.where(np.linalg.norm(q_diff, axis=1)>1, 1, 0))[0]+1\n if len(jumps) % 2:\n jumps = np.append(jumps, [len(q_diff)+1])\n jump_pairs = jumps.reshape((len(jumps)//2, 2))\n for j in jump_pairs:\n self.array[j[0]:j[1]] *= -1.0", "def removed(self):\n return self.past_keys - self.intersect", "def trim_timings(phrase_length, timings):\n extra_hits = np.argwhere(np.cumsum(timings) > int(phrase_length)).ravel()\n\n if len(extra_hits) != 0:\n all_to_end = np.min(extra_hits)\n del timings[all_to_end:]\n\n return timings", "def forward(self, value, query, lens):\n relevant_scores = self.relevant_score(value, query)\n\n # make mask to mask out padding embeddings\n mask = torch.zeros_like(relevant_scores)\n for b, n_c in enumerate(lens):\n mask[b, n_c:] = -math.inf\n\n # apply mask\n relevant_scores += mask\n\n return relevant_scores", "def shift(self) -> Any:\n return self.pop(0)", "def remove_baseline(self):\n\n print(\" \\t Apply Savitzky-Golay filter \\t %d\" %self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol", "def __call__(self, results):\n if random.random() < self.shift_ratio:\n img_shape = results['img'].shape[:2]\n\n random_shift_x = random.randint(-self.max_shift_px,\n self.max_shift_px)\n random_shift_y = random.randint(-self.max_shift_px,\n self.max_shift_px)\n new_x = max(0, random_shift_x)\n orig_x = max(0, -random_shift_x)\n new_y = max(0, random_shift_y)\n orig_y = max(0, -random_shift_y)\n\n # TODO: support mask and semantic segmentation maps.\n for key in results.get('bbox_fields', []):\n bboxes = results[key].copy()\n bboxes[..., 0::2] += random_shift_x\n bboxes[..., 1::2] += random_shift_y\n\n # clip border\n bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1])\n bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0])\n\n # remove invalid bboxes\n bbox_w = bboxes[..., 2] - bboxes[..., 0]\n bbox_h = bboxes[..., 3] - bboxes[..., 1]\n valid_inds = (bbox_w > self.filter_thr_px) & (\n bbox_h > self.filter_thr_px)\n # If the shift does not contain any gt-bbox area, skip this\n # image.\n if key == 'gt_bboxes' and not valid_inds.any():\n return results\n bboxes = bboxes[valid_inds]\n results[key] = bboxes\n\n # label fields. e.g. gt_labels and gt_labels_ignore\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][valid_inds]\n\n for key in results.get('img_fields', ['img']):\n img = results[key]\n new_img = np.zeros_like(img)\n img_h, img_w = img.shape[:2]\n new_h = img_h - np.abs(random_shift_y)\n new_w = img_w - np.abs(random_shift_x)\n new_img[new_y:new_y + new_h, new_x:new_x + new_w] \\\n = img[orig_y:orig_y + new_h, orig_x:orig_x + new_w]\n results[key] = new_img\n\n return results", "def crop_missing(self):\n new_data = numpy.ma.copy(self.data)\n new_edges = list(self.bset.edges) # Mutable copy\n\n # Remove all-masked edge slices along all dimensions\n for axis in range(new_data.ndim):\n # Bring axis to front\n new_data = numpy.ma.swapaxes(new_data, 0, axis)\n\n # Find first slice to keep\n try:\n first = next(i for (i, mask) in\n enumerate(numpy.ma.getmaskarray(new_data))\n if not mask.all())\n new_data = new_data[first:]\n new_edges[axis] = new_edges[axis][first:]\n except StopIteration:\n pass\n\n # Find last slice to keep\n try:\n last = next(i for (i, mask) in\n enumerate(numpy.ma.getmaskarray(new_data)[::-1])\n if not mask.all())\n if last != 0:\n new_data = new_data[:-last]\n new_edges[axis] = new_edges[axis][:-last]\n except StopIteration:\n pass\n\n # Swap back axis\n new_data = numpy.ma.swapaxes(new_data, 0, axis)\n\n return type(self)(new_data, new_edges)", "def reduce(self):\n return self.crop(*self.ink_offsets)", "def remove_duplicates(self, hits):\n\t\tseen = set()\n\t\tkeep = []\n\n\t\tfor i in range(len(hits)):\n\t\t\tif hits[i][\"Text\"] not in seen:\n\t\t\t\tseen.add(hits[i][\"Text\"])\n\t\t\t\tkeep.append(hits[i])\n\n\t\treturn keep", "def drifting(self):\n return [n for n in self if n.drifting]", "def cropped_index(self, start_i=None, end_i=None, shifted=True):\n xs = self.xs[start_i:end_i]\n ys = self.ys[start_i:end_i]\n\n if shifted:\n xs = xs - xs[0]\n\n return self.__class__(xs, ys, self.gauge_length, self.sample_width, self.sample_thickness, self.name)", "def filter_passing_hits(self):\n self.create_fasta()\n self.blastn_commandline()\n\n hits = {}\n result_handle = open(generate_path(\"tmp/validate.xml\"))\n for record in NCBIXML.parse(result_handle):\n for entry in record.alignments:\n hit = entry.hit_def\n seqlen = entry.length\n hsp = entry.hsps[0]\n percent_ident = (float(hsp.positives) / float(seqlen)) * 100\n\n if 90 <= percent_ident <= 100:\n if hit in hits:\n if percent_ident > hits[hit]:\n hits[hit] = percent_ident\n else:\n hits[hit] = percent_ident\n del result_handle\n self.seqdata.hits = hits", "def filter(self, results):\r\n \r\n docs = self.docs & results.docs\r\n self.scored_list = [docnum for docnum in self.scored_list if docnum in docs]\r\n self.docs = docs", "def __rshift__(self, other):\n other.set_upstream(self)\n # return other so a >> b >> c works\n return other", "def trim(self, edge_ic_cutoff=0.4):\n pwm = self.pwm[:]\n while len(pwm) > 0 and self.ic_pos(pwm[0]) < edge_ic_cutoff:\n pwm = pwm[1:]\n self.pwm = self.pwm[1:]\n self.pfm = self.pfm[1:]\n while len(pwm) > 0 and self.ic_pos(pwm[-1]) < edge_ic_cutoff:\n pwm = pwm[:-1]\n self.pwm = self.pwm[:-1]\n self.pfm = self.pfm[:-1]\n \n self.consensus = None \n self.min_score = None\n self.max_score = None\n self.wiggled_pwm = None\n \n return self", "def eliminate_the_rule(self):\n item = self.item\n claim_tokens = item['claim_tokens']\n finded_keys = item['prioritized_docids']\n if claim_tokens[0] == 'The':\n claim_tokens[1] = claim_tokens[1].title()\n claim = ' '.join(claim_tokens[1:])\n fk_new = self._keyword_match(claim)\n finded_keys = set(finded_keys) | set(fk_new)\n item['prioritized_docids'] = list(finded_keys)\n return self", "def remain(self):\n return self.source[self.cur :]", "def prune(self, min_freq):\n new_forward = {}\n new_backward = [\"OOV\"]\n new_freq = [0]\n j = 1\n for i in xrange(1,len(self.backward)):\n f = self.backward[i]\n if self.freq[i] >= min_freq:\n new_forward[f] = j\n new_backward.append(f)\n new_freq.append(self.freq[i])\n j += 1\n self.forward = new_forward\n self.backward = new_backward\n self.freq = new_freq\n self.counter = j", "def Truncate(self, f, fcut, below=True):\n fout = copy.copy(f)\n ind = thresh(f,fcut)\n if below:\n fout = fout[0:ind]\n else:\n fout = fout[ind:]\n \n keys=['Gxx','Gyy','Gxy']\n\n for curkey in keys:\n curitem = colwise(getattr(self,curkey))\n\n if below:\n curitem = curitem[0:ind,:]\n else:\n curitem = curitem[ind:,:]\n \n setattr(self,curkey,squeeze(curitem))\n return fout", "def _remove_duplicates_(self):\n t = self.table_orig\n mask = []\n t_obs = np.unique(t['jdobs'])\n for t_ in t_obs:\n if np.sum(t['jdobs'] == t_) == 1:\n mask.append(True)\n else:\n mags = t['magpsf'][t['jdobs'] == t_]\n if len(np.unique(mags)) == 1:\n mask.append(True)\n for k in range(len(mags) - 1):\n mask.append(False)\n elif np.sum(np.unique(mags) < 90) == 1:\n done = False\n for m_ in mags:\n if m_ < 90. and not done:\n mask.append(True)\n done = True\n else:\n mask.append(False)\n else:\n mags_ = np.unique(mags)\n mags_ = np.array(mags_[mags_ < 90])\n\n done = [False for k in range(len(mags_))]\n for m_ in mags:\n if m_ < 90.:\n k = np.where(mags_ == m_)[0][0]\n if not done[k]:\n mask.append(True)\n done[k] = True\n else:\n mask.append(False)\n\n self.table = t[np.array(mask)]" ]
[ "0.5625676", "0.53062385", "0.512562", "0.50831914", "0.5061793", "0.50101686", "0.4993119", "0.49484766", "0.49025172", "0.48564422", "0.48109755", "0.47933322", "0.47614375", "0.47234794", "0.47166798", "0.47092518", "0.47006017", "0.46792665", "0.46642837", "0.46602577", "0.46372643", "0.46339986", "0.462398", "0.4615221", "0.46078727", "0.46073493", "0.4599934", "0.45960554", "0.45882893", "0.4582754" ]
0.5598753
1
Assumes myList is sorted. Returns closest value to myNumber. If two numbers are equally close, return the smallest number.
def takeClosest(myList, myNumber): pos = bisect_left(myList, myNumber) if pos == 0: return myList[0] if pos == len(myList): return myList[-1] before = myList[pos - 1] after = myList[pos] if after - myNumber < myNumber - before: return after else: return before
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def takeClosest(myList, myNumber):\n pos = bisect_left(myList, myNumber)\n if pos == 0:\n return 0 #myList[0]\n if pos == len(myList):\n return len(myList)-1 #myList[-1]\n\n before = myList[pos - 1]\n after = myList[pos]\n\n if after - myNumber < myNumber - before:\n return pos #after\n else:\n return pos-1 #before", "def closestValue(aList: list, givenV: int):\n abs_diff = lambda list_value: abs(list_value - givenV)\n\n return min(aList, key=abs_diff)", "def closest_match(num,num_list):\n\tdiffs = np.abs(np.subtract(num,num_list))\n\treturn num_list[np.argmin(diffs)]", "def getnearest(iterable, value):\n return min(enumerate(iterable), key=lambda i: abs(i[1] - value))", "def findNearest(myInterval,IntervalList):\n \n myDist = 9999999999999999999\n res = 0\n for i in IntervalList:\n distance = myInterval.distance(i)\n if distance > 0 and distance < myDist:\n myDist = distance\n res = i\n return res", "def _get_index_closest_val(list, val):\n\n return min(range(len(list)), key=lambda i: abs(list[i]-val))", "def get_closest_value_index_in_sorted_list(value, list_):\n if value <= list_[0]:\n return 0\n if value >= list_[-1]:\n return len(list_) - 1\n pos = bisect.bisect_left(list_, value)\n before = list_[pos - 1]\n after = list_[pos]\n if after - value < value - before:\n return pos\n else:\n return pos - 1", "def takeclosest(takecloselist, takecloseint):\n pos = bisect_left(takecloselist, takecloseint)\n if pos == 0:\n return takecloselist[0]\n if pos == len(takecloselist):\n return takecloselist[-1]\n before = takecloselist[pos - 1]\n after = takecloselist[pos]\n if after - takecloseint < takecloseint - before:\n return after\n else:\n return before", "def find_least_number(incoming_list):\n retval = min(incoming_list)\n return retval", "def find_least_number(incoming_list):\n \n return_value = min(incoming_list)\n return return_value", "def find_least_number(incoming_list):\n\n least_number = min(incoming_list)\n return least_number", "def find_min(list):\n return find_value_at(list, -1)", "def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1", "def lowest_number(list_int):\n if len(list_int) == 1:\n return list_int[0]\n number = lowest_number(list_int[1:])\n if list_int[0] < number:\n return list_int[0]\n else:\n return number", "def find_least_number(incoming_list: list):\n return min(incoming_list)", "def find_nearest(numbers, target):\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]", "def get_closest(a, n):\n pos = bisect_left(a, n)\n if pos == 0:\n return a[0]\n if pos == len(a):\n return a[-1]\n before = a[pos - 1]\n after = a[pos]\n if after - n < n - before:\n return after\n else:\n return before", "def nearest(n, number):\n return math.floor((n / number) + 0.5) * number", "def find_smallest(numbers):\n small = numbers[0]\n for item in numbers:\n if item < small:\n small = item\n return small", "def first_missing_num(the_list):\n the_list.sort()\n first_index = 0\n next_min = 0\n for i, v in enumerate(the_list):\n if v > 0:\n first_index = i\n next_min = v\n break\n for num in the_list[first_index:]:\n if num < next_min:\n continue\n elif num == next_min:\n next_min += 1\n else:\n return next_min\n return next_min", "def nearest(items, pivot):\n return min(items, key=lambda x: abs(x - pivot))", "def smallest_number_with_last_digit_equal_to_an_input_digit(lst: [int], n) -> int:\n lowest = None\n for x in lst:\n last_digit = abs(x) % 10\n if last_digit == n and (lowest is None or x < lowest):\n lowest = x\n return lowest", "def find_smallest(list):\n smallest_index = 0\n smallest_number = list[0]\n for index, number in enumerate(list):\n if number < smallest_number:\n smallest_index = index\n smallest_number = number\n del list[smallest_index]\n return smallest_number", "def nextMin(value,lista):\n for i in lista:\n if i<value:\n return i\n raise NameError('No value')", "def less_than_index(numlist, singnum):\r\n try:\r\n for elem in numlist:\r\n if elem <= singnum:\r\n e_val = numlist.index(elem)\r\n return e_val\r\n except ValueError:\r\n return 'None. Try a value contained within the list.'", "def max_index_of_smaller_number(list, number):\n for i, element in enumerate(list):\n if element >= number:\n return i - 1", "def closestNumbers(arr):\n arr.sort()\n mindiff = abs(arr[1] - arr[0])\n\n for i in range(1, len(arr)):\n mindiff = min(mindiff, abs(arr[i] - arr[i - 1]))\n\n pairs = []\n for i in range(1, len(arr)):\n if arr[i] - arr[i - 1] == mindiff:\n pairs.append(arr[i - 1])\n pairs.append(arr[i])\n return pairs", "def get_minimum_value_from_list(self, list_):\r\n return min(list_)", "def bruteClosest(list_points):\n\n minimum = 0\n p1 = 0\n p2 = 0\n for i in list_points:\n for k in list_points:\n \n d = dist(i,k)\n if (d < minimum and d != 0) or minimum == 0:\n p1 = i\n p2 = k\n minimum = d\n return [p1, p2, minimum]", "def find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n return array[idx]" ]
[ "0.8521541", "0.76922435", "0.7656438", "0.7287982", "0.7014501", "0.6732125", "0.6697386", "0.66785496", "0.66401225", "0.66105974", "0.656588", "0.6560782", "0.64787084", "0.64621955", "0.6439355", "0.6437316", "0.641067", "0.6385044", "0.6370557", "0.63562495", "0.63331723", "0.6307673", "0.6274802", "0.6197667", "0.6162756", "0.6161932", "0.6147914", "0.61089826", "0.6106207", "0.61049366" ]
0.8773185
0
Overlay the card on top of the src
def overlayCard(src: np.array, card: np.array, mask:np.array, top_left: Point) -> Any: max_y = src.shape[0] # if top left is outside the src if top_left.y >= max_y: return # get the ROI rows, cols, _ = mask.shape roi = src[top_left.y:max_y, top_left.x:top_left.x+cols] roi_shape = roi.shape mask_grey = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) _, mask = cv2.threshold(mask_grey, 127, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(mask) img_bg = cv2.bitwise_and(roi, roi, mask=mask_inv[:roi_shape[0], :roi_shape[1]]) card_fg = cv2.bitwise_and(card, card, mask=mask) dst = cv2.add(img_bg, card_fg[:roi_shape[0], :roi_shape[1]]) src[top_left.y:max_y, top_left.x:top_left.x+cols] = dst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_card_icon(self):\n card_icon = pygame.Surface((card_dimensions[0]/2, card_dimensions[1]/2))\n card_icon.fill(self.color)\n pygame.draw.rect(card_icon, CARD_OUTLINE, (0,0,card_icon.get_width(), card_icon.get_height()), 1)\n #the picture\n pic = pygame.transform.scale(self.picture, (card_icon.get_width()-2, card_icon.get_height()*1/2))\n pic = util.outline_surface(pic)\n card_icon.blit(pic,(1, card_icon.get_height()/7))\n util.draw_text(self.name, (2,2), card_icon, text_size = 8)\n self.icon = card_icon", "def oneplayer():\n return render_template('cards.html')", "def update(self):\n\n surface = pygame.Surface(CARD_SIZE, pygame.SRCALPHA)\n card_filename = f\"assets/sprites/cardBack.png\"\n card_image = pygame.image.load(card_filename).convert_alpha()\n surface.blit(card_image, (0, 0))\n self.image = surface\n self.rect = self.image.get_rect()", "def make_card_surface(self):\n\n surf = pygame.Surface((card_dimensions))\n surf.fill(CARD_OUTLINE)\n pygame.draw.rect(surf, NORMAL_MONSTER,(central_padding, central_padding,\n surf.get_width()-central_padding*2,\n surf.get_height()-central_padding*2))\n picture_outline = pygame.Surface((self.picture.get_width()+2,\n self.picture.get_height()+2))\n picture_outline.fill(CARD_OUTLINE)\n picture_outline.blit(self.picture,(1,1))\n surf.blit(picture_outline, (central_padding-1,surf.get_height()*1/7))\n util.draw_text(self.name, (central_padding*1.5, central_padding*1.5), surf)\n util.draw_text(\"ATK: \"+str(self.stats[0]), (central_padding*2, surf.get_height()*0.73), surf)\n util.draw_text(\"DEF: \"+str(self.stats[1]), (central_padding*2, surf.get_height()*0.83), surf)\n self.spr_card = surf", "def update(self):\n surface = pygame.Surface(CARD_SIZE, pygame.SRCALPHA)\n card_filename = f\"assets/sprites/card{self.suit.capitalize()}{self.rank}.png\"\n card_image = pygame.image.load(card_filename)\n surface.blit(card_image, (0, 0))\n self.image = pygame.Surface.convert_alpha(surface)\n self.rect = self.image.get_rect()", "def new_card(self, card_id):\n\n self.last_action_ts = pygame.time.get_ticks()\n # self.background.hidden=True\n self.background.image_view.image = ui.get_image(card_id, '/home/pi/music/images/')\n self.showing_splash = False # we no longer show the splash screen image\n # self.background.hidden=False\n self.progress_view.hidden = False # we play a song, so show progress bar\n self.show_buttons() # show play controll buttons", "async def show_card(self, ctx, card: dict):\r\n emb = discord.Embed(\r\n title=card['name'],\r\n colour=discord.Colour.dark_purple(),\r\n url='https://roll20.net/compendium/dnd5e/Deck%20of%20Many%20Things#content',\r\n description=card['desc']\r\n )\r\n emb.set_footer(text='Use [p]domt info for list of all cards.')\r\n emb.set_image(url=card['img'])\r\n await ctx.send(embed=emb)", "def add_card(self, card_widget: WidgetT):", "def play_selected_card(_screen, player):\n card = Card(player.selected_card.card_id, 400, 350)\n card.image_of_card(_screen)", "def draw_card(dealer,player):\n # hidden_img = Image(img_path+\"back.png\")\n depth = 100\n x0,y0 = 100,100\n x1,y1 = 100,300\n ix = 30\n\n bj_board.clear()\n for card in dealer:\n if card.state:\n card.image.moveTo(x0, y0)\n card.image.setDepth(depth)\n bj_board.add(card.image)\n else:\n img = Image(img_path+\"Back.png\")\n img.moveTo(x0, y0)\n img.setDepth(depth)\n bj_board.add(img)\n x0 += ix\n \n for card in player:\n if card.state:\n card.image.moveTo(x1, y1)\n card.image.setDepth(depth)\n bj_board.add(card.image)\n else:\n img = Image(img_path+\"back.png\")\n img.moveTo(x1, y1)\n img.setDepth(depth)\n bj_board.add(img)\n x1 += ix", "def overlay(self, img2_path=\"./hurr.png\"):\n img2 = self.imread(img2_path)\n\n self.img = self.overlay_transparent(self.img, img2, 0, 0)\n\n self.edits.append(f\"overlay:{os.path.basename(img2_path)}\")\n return self", "def draw(self, surface):\n\n\t\tsurface.blit(self.image, self.rect.topleft)", "def blit_me(self):\n self.game_over_img.blit_me()\n self.retry_button.blit_me()", "def overlay(self, image, x, y, r=0):\n x -= (image.get_rect()[2] - self.dial.get_rect()[2])/2\n y -= (image.get_rect()[3] - self.dial.get_rect()[3])/2\n image.set_colorkey((255,255,0))\n self.dial.blit(image, (x,y))", "def on_draw_over_image(self):", "def view(self):\n window = tk.Tk()\n label = tk.Label(window)\n label.pack()\n img = self.get_tkimage()\n label[\"image\"] = label.img = img\n window.mainloop()", "def addUserCard(src: np.array, alpha: float, current_y: float, top_left: tuple, bottom_right: tuple, background_color: tuple, foreground_color: tuple, identity: str, time: datetime, font_path: str, font_size: int=24) -> np.array:\r\n \r\n img = src.copy()\r\n\r\n # dimmed the source\r\n img = cv2.addWeighted(img, alpha, np.zeros(img.shape, img.dtype), 1 - alpha, 0)\r\n \r\n # get the user card\r\n card, mask = _userCard(top_left, bottom_right, background_color, foreground_color, identity, time, font_path, font_size)\r\n\r\n # overlay card\r\n overlayCard(img, card, mask, Point(top_left[0], int(current_y)))\r\n\r\n return img", "def __card_url(self, card: wekanapi.models.Card) -> str:\n return self.__board_urls(card.cardslist.board.id)[0] + '/' + card.id", "def display(self):\n display(self.image)", "def deal_cards(frame):\n next_card = deck.pop(0)\n deck.append(next_card)\n # now we will add the image to a label and display the label\n tkinter.Label(frame, image=next_card[1], relief='raised', borderwidth=3).pack(side='left', padx=3)\n # image attribute sets the image in that label\n # Well we can observe we have pack geometry layout because it would be good over here\n # NOTE: We cant add grid layout in that frame now\n # Now we have to link the action to the buttons\n\n return next_card", "def add_card(self, card_, on_top=True):\n card_.unclick()\n if on_top:\n pos_ = self.pos\n if len(self.cards) is not 0:\n length = len(self.cards)\n pos_ = (self.pos[0] + length * self.offset[0],\n self.pos[1] + length * self.offset[1])\n card_.set_pos(pos_)\n self.cards.append(card_)\n else:\n self.cards.insert(0, card_)\n self.update_position()", "def body(self, parent):\n img = Label(parent, image = self._photo, text=\"Unable to display image\")\n img.pack()", "def update_deck_display(self):\n self.deck_surface.fill(CLEARCOLOUR)\n self.deck_surface.blit(self.background, (0, 0))\n if not self.is_empty():\n cards_to_draw = self.cards\n if self.draw_from_last:\n cards_to_draw = reversed(cards_to_draw)\n\n for i, card in enumerate(cards_to_draw):\n selected = (i == self.selected_card)\n image_to_draw = card.image\n\n if self.deck_reveal == DeckReveal.HIDE_ALL:\n image_to_draw = card.backimage\n\n if self.flip:\n image_to_draw = pygame.transform.flip(image_to_draw, self.vert_orientation,\n not self.vert_orientation)\n\n self.deck_surface.blit(image_to_draw, (card.x - selected * card.x * 0.5 *\n (-1)**self.flip * self.vert_orientation,\n card.y - selected * card.y * 0.5 *\n (-1)**self.flip * (not self.vert_orientation)))", "def full_photo():\n top = Toplevel()\n top.title(\"Full APOD Photo\")\n top.iconbitmap('10.APOD Viewer/rocket.ico')\n\n #Load the full image to the top image\n img_label = Label(top, image=full_img)\n img_label.pack()", "def show(self):\n\n self.image.show()", "def draw_overlay(self):\n pass", "def preview(self, obj):\n return format_html(\n '<iframe width=\"640\" height=\"360\" src=\"{}\"></iframe>',\n 'https://sms.cam.ac.uk/media/{}/embed'.format(obj.id)\n )", "def propeller_card(card):\n return card.as_html()", "def add_child(self, child):\n super(Img, self).add_child(child)\n\n # If this is a relative URL, it's relative to the statics directory\n # of the application\n src = self.get('src')\n if src is not None:\n self.set('src', absolute_url(src, self.renderer.head.static_url))", "def draw_card(self, x, y, card_pos):\n card_id = self.cards[card_pos]\n item = CommonCardHandle(card_pos, self)\n pixmap = QPixmap()\n if not pixmap.loadFromData(self.parent.database.get_data(card_id, 'low_res')):\n # TODO: throw error or something\n pass\n item.setPixmap(pixmap)\n item.setPos(x, y)\n self.scene.addItem(item)\n\n if card_pos in self.get_selected_cards():\n self.draw_highlight(x, x + 85, y, y + 115, QColor(255, 0, 0))" ]
[ "0.59966683", "0.59719294", "0.59591544", "0.59294903", "0.5928805", "0.59207386", "0.569979", "0.54795706", "0.5383957", "0.5292921", "0.5283945", "0.528266", "0.52679265", "0.5257612", "0.52402157", "0.5225715", "0.5220325", "0.5204619", "0.51837224", "0.5161136", "0.51569605", "0.515022", "0.5140272", "0.5124663", "0.51062214", "0.51008993", "0.50868297", "0.5085043", "0.50809413", "0.5059122" ]
0.62227285
0
Generate a user card to overlay on top of the src
def addUserCard(src: np.array, alpha: float, current_y: float, top_left: tuple, bottom_right: tuple, background_color: tuple, foreground_color: tuple, identity: str, time: datetime, font_path: str, font_size: int=24) -> np.array: img = src.copy() # dimmed the source img = cv2.addWeighted(img, alpha, np.zeros(img.shape, img.dtype), 1 - alpha, 0) # get the user card card, mask = _userCard(top_left, bottom_right, background_color, foreground_color, identity, time, font_path, font_size) # overlay card overlayCard(img, card, mask, Point(top_left[0], int(current_y))) return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def oneplayer():\n return render_template('cards.html')", "def make_card_icon(self):\n card_icon = pygame.Surface((card_dimensions[0]/2, card_dimensions[1]/2))\n card_icon.fill(self.color)\n pygame.draw.rect(card_icon, CARD_OUTLINE, (0,0,card_icon.get_width(), card_icon.get_height()), 1)\n #the picture\n pic = pygame.transform.scale(self.picture, (card_icon.get_width()-2, card_icon.get_height()*1/2))\n pic = util.outline_surface(pic)\n card_icon.blit(pic,(1, card_icon.get_height()/7))\n util.draw_text(self.name, (2,2), card_icon, text_size = 8)\n self.icon = card_icon", "def _userCard(top_left: tuple, bottom_right: tuple, background_color: tuple, foreground_color: tuple, identity: str, time: datetime, font_path: str, font_size: int=24) -> np.array:\r\n\r\n card_top_left = (0,0)\r\n card_bottom_right = (Point.fromTuple(coord=bottom_right) - Point.fromTuple(coord=top_left)).toTuple()\r\n \r\n card = np.zeros((card_bottom_right[1], card_bottom_right[0], 3), dtype=np.dtype('uint8'))\r\n\r\n # add background\r\n rounded_rectangle(card, card_top_left, card_bottom_right, color=background_color, thickness=-1)\r\n mask = card.copy()\r\n\r\n # add the user image to the card\r\n user_img_top_left = (card_top_left[1] + user_img_offset[1], card_top_left[0] + user_img_offset[0])\r\n user_img_path = f'face_pic/{identity}.jpg'\r\n\r\n user_img = cv2.imread(user_img_path)\r\n user_img = cv2.resize(user_img, (round(user_img.shape[1]*.25), round(user_img.shape[0]*.25)))\r\n\r\n user_img_size = user_img.shape\r\n \r\n card[user_img_top_left[0]:user_img_top_left[0]+user_img_size[0], user_img_top_left[1]:user_img_top_left[1]+user_img_size[1]] = user_img\r\n\r\n # add text\r\n text_top_left = (user_img_top_left[1]+user_img_size[1]+text_offset[0], user_img_top_left[0]+text_offset[1])\r\n \r\n time_str = time.strftime('%H:%M:%S')\r\n\r\n font = ImageFont.truetype(font_path, font_size)\r\n\r\n card_pil = Image.fromarray(card)\r\n draw = ImageDraw.Draw(card_pil)\r\n \r\n draw.text(text_top_left, f'เลขประจำตัว: {identity}', font=font, fill=foreground_color)\r\n draw.text((text_top_left[0], text_top_left[1] + font_size + text_offset[1] + text_padding), f'เวลา: {time_str}', font=font, fill=foreground_color)\r\n\r\n card = np.array(card_pil)\r\n \r\n return card, mask", "def make_card_surface(self):\n\n surf = pygame.Surface((card_dimensions))\n surf.fill(CARD_OUTLINE)\n pygame.draw.rect(surf, NORMAL_MONSTER,(central_padding, central_padding,\n surf.get_width()-central_padding*2,\n surf.get_height()-central_padding*2))\n picture_outline = pygame.Surface((self.picture.get_width()+2,\n self.picture.get_height()+2))\n picture_outline.fill(CARD_OUTLINE)\n picture_outline.blit(self.picture,(1,1))\n surf.blit(picture_outline, (central_padding-1,surf.get_height()*1/7))\n util.draw_text(self.name, (central_padding*1.5, central_padding*1.5), surf)\n util.draw_text(\"ATK: \"+str(self.stats[0]), (central_padding*2, surf.get_height()*0.73), surf)\n util.draw_text(\"DEF: \"+str(self.stats[1]), (central_padding*2, surf.get_height()*0.83), surf)\n self.spr_card = surf", "def propeller_card(card):\n return card.as_html()", "async def _idavatar(self, ctx, userid: int = None):\n e = discord.Embed(color=discord.Color.blurple())\n if not userid:\n user = ctx.author\n else:\n try:\n user = await ctx.bot.fetch_user(int(userid))\n if user is None:\n raise Exception(\"User is None.\")\n except Exception as e:\n await ctx.send(f\"Failed to catch user: {e}\")\n e.set_image(url=user.avatar_url)\n e.set_author(name=f\"{user.name}'s avatar\", icon_url=user.avatar_url, url=user.avatar_url)\n e.set_footer(text=f\"{ctx.author.name} wanted to see.\", icon_url=ctx.author.avatar_url)\n await ctx.send(embed=e)", "async def gen_banner(self, member):\n base = deepcopy(self.images[randint(0, len(self.images) - 1)])\n\n # Draw the username\n idraw = ImageDraw.Draw(base)\n idraw.text(self.banner_cfg[\"TextPos\"], member.name, fill=tuple(self.banner_cfg[\"Text_Color\"]), font=self.font)\n \n\n # Get user avatar\n avatar_url = member.avatar_url\n if(avatar_url==None):\n avatar_url = member.default_avatar_url\n # Wow, we can really just load it asynchronously from the API now? That's dope\n avatar = await avatar_url.read()\n # We need to save it as a file in memory to get the size so we can load it as an image.\n with io.BytesIO() as fb:\n fb.write(avatar)\n fb.seek(0, 0)\n avatar = Image.open(fb)\n avatar = avatar.resize(self.banner_cfg[\"AvatarSize\"])\n if (self.banner_cfg[\"Rounded\"][\"is_rounded\"]):\n avatar = self.round_corners(avatar, self.banner_cfg[\"Rounded\"][\"px\"])\n # Now that we have our avatar, we can slap it into our banner.\n final = Image.new(\"RGBA\", base.size)\n final.paste(avatar, self.banner_cfg[\"AvatarPos\"])\n if(self.banner_cfg[\"AvatarLayer\"]==\"front\"):\n final = Image.alpha_composite(base, final)\n if(self.banner_cfg[\"AvatarLayer\"]==\"back\"):\n final = Image.alpha_composite(final, base)\n \n # Lastly, let's package it as a file to be uploaded.\n with io.BytesIO() as fb:\n final.save(fb, format=\"png\")\n fb.seek(0, 0)\n \n return discord.File(fb, filename=\"Welcome.png\")", "async def show_card(self, ctx, card: dict):\r\n emb = discord.Embed(\r\n title=card['name'],\r\n colour=discord.Colour.dark_purple(),\r\n url='https://roll20.net/compendium/dnd5e/Deck%20of%20Many%20Things#content',\r\n description=card['desc']\r\n )\r\n emb.set_footer(text='Use [p]domt info for list of all cards.')\r\n emb.set_image(url=card['img'])\r\n await ctx.send(embed=emb)", "async def avatar(self, ctx:utils.Context, user:discord.User=None):\n\n if user is None:\n user = ctx.author\n with utils.Embed(use_random_colour=True) as embed:\n embed.set_image(url=user.avatar_url)\n await ctx.send(embed=embed)", "def display_player(self, pick, win):\n if pick == 'none':\n return False\n if pick == 'paper':\n player = pygame.image.load('paper.png')\n elif pick == 'scissor':\n player = pygame.image.load('scissor.png')\n else:\n player = pygame.image.load('rock.png')\n player = pygame.transform.scale(player, (100, 100))\n\n win.blit(player, (screen_width // 6, screen_height // 3))", "def create_avatar_embed(message, user):\n\n requestor = message.author\n name = user.name\n avatarImage = user.avatar_url\n os.system(f'curl -o .img.png {avatarImage}')\n color_thief = ColorThief('.img.png')\n dominant_color = color_thief.get_color(quality=1)\n os.system('rm .img.png')\n clr = '0x' + '%02X%02X%02X' % dominant_color\n clr = int(clr, base=16)\n embed = discord.Embed(title=f\"Avatar of {name}\", value=requestor, color=clr)\n embed.set_image(url=avatarImage)\n\n return embed", "def player():\r\n return render_template('deck.html')", "async def avatarurl(self, ctx: \"IceTeaContext\", target: discord.Member = None):\n target = target or ctx.author\n embed = discord.Embed(description=f\"{target} Profile Picture\")\n embed.set_image(url=str(target.avatar_url))\n await ctx.send(embed=embed)", "def show_me():\n # Scumbag thumbnail code\n try:\n from PIL import Image\n except ImportError:\n pass\n else:\n filename = os.path.join(app.static_folder, 'img', 'badumtss.png')\n image = Image.open(filename)\n\n return render_template('show_me.html')", "def play_selected_card(_screen, player):\n card = Card(player.selected_card.card_id, 400, 350)\n card.image_of_card(_screen)", "def new_card(self, card_id):\n\n self.last_action_ts = pygame.time.get_ticks()\n # self.background.hidden=True\n self.background.image_view.image = ui.get_image(card_id, '/home/pi/music/images/')\n self.showing_splash = False # we no longer show the splash screen image\n # self.background.hidden=False\n self.progress_view.hidden = False # we play a song, so show progress bar\n self.show_buttons() # show play controll buttons", "def renderProfile(request, user, identities):\n sourcesResults = lifestream.models.Feed.objects.order_by('url').filter(user__username=user.username)\n sources = []\n for s in sourcesResults:\n if s.title:\n sources.append({'title': s.title, 'url': s.url})\n \n # avatar\n \n gravatarHash = hashlib.md5(user.email).hexdigest()\n avatar_url = \"http://www.gravatar.com/avatar/%s.jpg?d=monsterid&s=80\" % gravatarHash\n \n t = django.template.loader.select_template(('foo', 'lifestream/profile_blurb.html'))\n c = django.template.Context(\n {'avatar_src': avatar_url, 'avatar_width':'80', 'avatar_height':'80',\n 'user': user,\n 'username': user.username,\n 'preferences': json.loads(user.get_profile().properties),\n 'sources': sources,\n 'identities': identities})\n return t.render(c)", "async def avatar(self, ctx, user: discord.Member = None):\n\n if user is None:\n user = ctx.author\n\n avatar = user.avatar_url_as(static_format='png', size=1024)\n\n embed = discord.Embed(color=self.bot.embed_color)\n embed.set_author(name=f\"{user}'s avatar\", icon_url=avatar)\n embed.description = f'[[Download Avatar]]({avatar})'\n\n embed.set_image(url=avatar)\n\n await ctx.send(embed=embed)", "def player(user):\n if isinstance(user, str):\n return ''\n\n if isinstance(user, int) or isinstance(user, long):\n user = Player.objects.get(pk=user)\n\n link = reverse('wouso.interface.profile.views.user_profile', args=(user.id,))\n\n artif_html = artifact(user.level)\n return u'<a href=\"%s\">%s%s</a>' % (link, artif_html, user)", "async def thumbsup(self,ctx,user: discord.Member=None):\n if user == None or user.id == ctx.author.id:\n await ctx.send(\"{}\".format(ctx.author.mention))\n else:\n await ctx.send(\"{} {}\".format(ctx.author.mention, user.mention))\n img = random.choice(self.getreaction(\"thumbsup\", \"0\"))\n embed = discord.Embed(colour=ctx.guild.me.top_role.colour)\n embed.set_image(url=img)\n await ctx.send(embed=embed)", "def battle_screen_my_hand_card_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n rect_position_x = 100\n rect_position_y = 610\n row_number = 1\n if screen_status.battle_screen_action_indicator == 'stage-0':\n pass\n else :\n\n if screen_status.battle_screen_my_hand_page_id <= 0:\n screen_status.battle_screen_my_hand_page_id = 1\n # Edge cases when len() = 6,12,18....\n if len(user.hand_list) % 7 == 0 and len(user.hand_list) != 0:\n if screen_status.battle_screen_my_hand_page_id >= (len(user.hand_list))//7 + 1:\n screen_status.battle_screen_my_hand_page_id = (len(user.hand_list))//7 + 0\n\n else:\n if screen_status.battle_screen_my_hand_page_id >= (len(user.hand_list))//7 + 2:\n screen_status.battle_screen_my_hand_page_id = (len(user.hand_list))//7 + 1\n # Algorithm to draw all cards in local_store_list, 6 card per page.\n for card in user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]:\n if row_number <= 7:\n card.rect.x = rect_position_x\n card.rect.y = rect_position_y\n screen.blit(card.image, card.rect)\n rect_position_x += 145\n row_number += 1\n if row_number >= 8:\n row_number = 1", "def image_thumb_tag(self):\n u = self.user\n uf = self.app.url_for\n image = None\n if u.image is not None and u.image!=\"\":\n try:\n image = uf(\"asset\", asset_id = self.app.module_map.uploader.get(u.image).variants['userlist']._id)\n except AssetNotFound:\n pass\n except KeyError:\n pass\n if image is not None:\n return \"\"\"<img alt=\"%s\" class=\"profile-image-userlist\" src=\"%s\">\"\"\" %(u.fullname, image)\n return \"\"\"<div class=\"profile-image-userlist missing\"><i class=\"fa fa-user\"></i></div>\"\"\"", "def userProfile(userid):\n images = get_uploaded_images()\n record = UserProfile.query.filter_by(id=userid).first()\n return render_template('userProfile.html', images=images, record =record)", "def __init__(self, user, enabled=True):\n super().__init__()\n self.setObjectName(\"user-profile\")\n self.enabled = enabled\n self.setProperty(\"follow-mouse\", enabled)\n\n image, label = _get_visuals(user)\n\n grid = QGridLayout(self)\n i = QLabel()\n i.setPixmap(image)\n i.setAlignment(Qt.AlignCenter)\n\n text = label\n text.setAlignment(Qt.AlignCenter)\n\n grid.addWidget(i, 0, 0)\n grid.addWidget(text, 1, 0)", "def draw_card(dealer,player):\n # hidden_img = Image(img_path+\"back.png\")\n depth = 100\n x0,y0 = 100,100\n x1,y1 = 100,300\n ix = 30\n\n bj_board.clear()\n for card in dealer:\n if card.state:\n card.image.moveTo(x0, y0)\n card.image.setDepth(depth)\n bj_board.add(card.image)\n else:\n img = Image(img_path+\"Back.png\")\n img.moveTo(x0, y0)\n img.setDepth(depth)\n bj_board.add(img)\n x0 += ix\n \n for card in player:\n if card.state:\n card.image.moveTo(x1, y1)\n card.image.setDepth(depth)\n bj_board.add(card.image)\n else:\n img = Image(img_path+\"back.png\")\n img.moveTo(x1, y1)\n img.setDepth(depth)\n bj_board.add(img)\n x1 += ix", "def profile_image_src(self, size):\n if self.profile_image:\n return join_path(STATIC_IMAGE_URL, 'users', \"{}.{}.{}.png\".format(self.id, self.profile_image, size)).replace(\"\\\\\", '/')\n return join_path(STATIC_IMAGE_URL, \"users\", \"no_profile.jpg\").replace(\"\\\\\", '/')", "def avatar_preview(self):\r\n h = '<img src=\"%s\" alt=\"%s\"/>' % (self.image_avatar_url, self.title)\r\n return mark_safe(h)", "async def test_create_user_embed_uses_png_format_of_user_avatar_as_thumbnail(self):\n ctx = helpers.MockContext()\n\n user = helpers.MockMember(id=217, colour=0)\n user.created_at = user.joined_at = datetime.now(UTC)\n user.display_avatar.url = \"avatar url\"\n embed = await self.cog.create_user_embed(ctx, user, False)\n\n self.assertEqual(embed.thumbnail.url, \"avatar url\")", "async def avatar(self, ctx, user: discord.User = None):\n if user is None:\n user = ctx.author\n avatar = user.avatar_url\n embed = discord.Embed(\n title=user.name + \"'s Avatar:\",\n color=discord.Colour.purple()\n )\n embed.set_image(url=avatar)\n await ctx.send(\"\", embed=embed)", "async def avatar(self, ctx):\n e = discord.Embed(title=\"Here is a avatar image for you {}.\".format(ctx.author.name), color=discord.Color.magenta())\n e.set_image(url=nekos.img('avatar'))\n await ctx.send(embed=e)" ]
[ "0.62067837", "0.60931987", "0.60509694", "0.60508245", "0.57916325", "0.5754831", "0.56826967", "0.5638378", "0.55915296", "0.5540149", "0.5523639", "0.54864603", "0.54524475", "0.54398614", "0.5433686", "0.54308635", "0.54275155", "0.54209024", "0.54202336", "0.5419586", "0.5414409", "0.54103005", "0.53796166", "0.5367253", "0.5366802", "0.5361383", "0.5354042", "0.53471434", "0.5316594", "0.530764" ]
0.65504867
0
forkmap.map(..., n=nprocessors), same as map(...). n must be a keyword arg; default n is number of physical processors.
def map(f, *a, **kw): def writeobj(pipe, obj): try: s = marshal.dumps(obj) s = struct.pack('i', len(s)) + s except: try: s = cPickle.dumps(obj) except: print obj s = cPickle.dumps(obj) s = struct.pack('i', -len(s)) + s os.write(pipe, s) def readobj(pipe): n = struct.unpack('i', os.read(pipe, 4))[0] s = '' an = abs(n) while len(s) < an: s += os.read(pipe, min(65536, an-len(s))) if n > 0: return marshal.loads(s) else: return cPickle.loads(s) n = kw.get('n', nproc) if n == 1: return builtin_map(f, *a) if len(a) == 1: L = a[0] else: L = zip(*a) try: len(L) except TypeError: L = list(L) n = min(n, len(L)) ans = [None] * len(L) pipes = [os.pipe() for i in range(n-1)] for i in range(n): if i < n-1 and not os.fork(): # Child, and not last processor try: try: if len(a) == 1: obj = builtin_map(f, L[i*len(L)//n:(i+1)*len(L)//n]) else: obj = [f(*x) for x in L[i*len(L)//n:(i+1)*len(L)//n]] except Exception, obj: pass writeobj(pipes[i][1], obj) except: traceback.print_exc() finally: os._exit(0) elif i == n-1: # Parent fork, and last processor try: if len(a) == 1: ans[i*len(L)//n:] = builtin_map(f, L[i*len(L)//n:]) else: ans[i*len(L)//n:] = [f(*x) for x in L[i*len(L)//n:]] for k in range(n-1): obj = readobj(pipes[k][0]) if isinstance(obj, Exception): raise obj ans[k*len(L)//n:(k+1)*len(L)//n] = obj finally: for j in range(n-1): os.close(pipes[j][0]) os.close(pipes[j][1]) os.wait() return ans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pfmap(func, workers=8):\n return fmap(func)", "def map(func, iterable, chunksize=None, ncpu=0, limit=True, progress=False):\n if (ncpu == 0):\n if (not progress):\n return _map(func, iterable)\n else:\n r = []\n if isinstance(progress, str):\n txt = progress\n else:\n txt = func.__name__\n for k in _PBar(desc=txt).iterover(iterable):\n r.append(func(k))\n return r\n elif progress:\n _n = _mp.cpu_count()\n if (ncpu <= 0):\n # use all available cpus\n p = _mp.Pool(_n)\n elif (ncpu > _n) & (limit is True):\n p = _mp.Pool(_n)\n else:\n p = _mp.Pool(ncpu)\n\n if not hasattr(iterable, '__len__'):\n iterable = list(iterable)\n ntasks = len(iterable)\n\n if isinstance(progress, str):\n txt = progress\n else:\n txt = func.__name__\n\n with _PBar(ntasks, desc=txt) as pb:\n # get the pool working asynchronously\n if islambda(func):\n amap = p.map_async(PicklableLambda(func), iterable, chunksize)\n else:\n amap = p.map_async(func, iterable, chunksize)\n left = 1\n while left > 0:\n _time.sleep(0.1)\n left = amap._number_left\n pb.update(ntasks - left)\n return amap.get()\n else:\n return map_async(func, iterable, chunksize, ncpu=ncpu, limit=limit).get()", "def thread_map(f, args_list, n_threads=None):\n if n_threads is None:\n n_threads = int(multiprocessing.cpu_count() / 2)\n pool = multiprocessing.pool.ThreadPool(processes=n_threads)\n return pool.map(f, args_list)", "def multiprocess_map(func, iterable, *worker_args, n_cores=None, mode=\"map\", **pool_kwargs):\n results = []\n\n with mp.Manager() as manager:\n shared_args_proxy = None\n if worker_args is not None:\n shared_args_proxy = manager.list(worker_args)\n\n with mp.Pool(processes=n_cores, initializer=init_worker,\n initargs=shared_args_proxy, **pool_kwargs) as pool:\n if mode == \"map\":\n results = pool.map(func, iterable)\n elif mode == \"starmap\":\n results = pool.starmap(func, iterable)\n elif mode == \"imap\":\n for result in pool.imap(func, iterable):\n results.append(result)\n\n return results", "def run_map(self):\n # Split input into chunks for processing\n files = self.split_list()\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n self.output = pool.map(_run, files)\n # Close and join pool\n pool.close()\n pool.join()", "def multiprocess_map(function, arguments, n_processes=1):\n from multiprocessing import Queue, Process\n\n # Initialize queues\n queue_in = Queue(1)\n queue_out = Queue()\n\n # Initialize processes and link to input and output queues\n processes = [Process(target=spawn(function), args=(queue_in, queue_out))\n for i in range(n_processes)]\n for p in processes:\n p.daemon = True\n p.start()\n\n # Construct input queue, including 'None' signals to terminate\n input = [queue_in.put((i, argument)) for i, argument in\n enumerate(arguments)]\n for i in range(n_processes):\n queue_in.put((None, None))\n\n # Retrieve output queue\n output = [queue_out.get() for i in range(len(input))]\n\n # Rejoin processes and return results\n for p in processes:\n p.join()\n return [x for i, x in sorted(output)]", "def _doMap(self, func, iterable):\n name = \"Mapper\"\n sys.stderr.write(\"Master[%s phase]: starting\\n\" % name)\n pipes = [mp.Pipe() for _ in range(self.num_workers)]\n proc = [mp.Process(target=spawn_mapper(func), name=name, args=(q,)) for q in pipes]\n for p in proc:\n p.daemon = True\n p.start()\n for output_p, input_p in pipes:\n input_p.close() # we don't need to read from the pipes\n qi = 0\n for item in iterable:\n pipes[qi][0].send(item)\n qi = (qi+1) % self.num_workers\n for q,_ in pipes:\n q.send(None) # add termination tokens\n q.close()\n for p in proc:\n p.join()\n sys.stderr.write(\"Master[%s phase]: ended..\\n\" % name)", "def pmap(func, seq,\n chunksize=1, nworkers=mp.cpu_count(),\n fargs=None, parallel=True):\n if fargs:\n nworkers = len(fargs)\n else:\n fargs = [None] * nworkers\n\n the_end = random_string()\n create_que = mp.Queue if parallel else Queue\n create_worker = mp.Process if parallel else th.Thread\n\n # Opening multiple ques sounds dumb in a way\n # but this is a easier way to implement the ordered version of\n # parrallel map. It's just that there is a limit in the number of\n # ques in the OS. Of course you wouldn't make more than 20 processes.\n que1s = [create_que(1) for _ in range(nworkers)]\n que2s = [create_que(1) for _ in range(nworkers)]\n\n def insert1(seq, que1s):\n for chunks in grouper(grouper(seq, chunksize, the_end),\n nworkers, the_end):\n for que1, chunk in zip(que1s, chunks):\n que1.put(chunk)\n for que1 in que1s:\n que1.put(the_end)\n\n w0 = create_worker(target=insert1, args=(seq, que1s))\n w0.daemon = True\n w0.start()\n\n def insert2(func, que1, que2):\n while True:\n chunk = que1.get()\n if chunk == the_end:\n que2.put(the_end)\n return\n else:\n result = []\n for x in chunk:\n if x != the_end:\n try:\n result.append(func(x))\n except Exception as error:\n que2.put(the_end)\n str_x = str(x)\n if len(str_x) > 100:\n str_x = str_x[:80] + ' ... ' + str_x[-20:]\n print('child worker error: ' + repr(error), str_x)\n return\n que2.put(result)\n\n for farg, que1, que2 in zip(fargs, que1s, que2s):\n if farg:\n # passing lexical closure\n # you can just do 'lambda x: func(farg, x)' for parallel version\n # because Python just copies args for each process\n # but it wouldn't work for thread version\n newfunc = (lambda farg: lambda x: func(farg, x))(farg)\n else:\n newfunc = func\n # don't replace the above with something like:\n # newfunc = A if test else B\n # causes a \"can't pickle\" error, I have no idea why.\n w = create_worker(target=insert2, args=(newfunc, que1, que2))\n w.daemon = True\n w.start()\n\n while True:\n for que2 in que2s:\n result = que2.get()\n if result == the_end:\n return\n else:\n yield from result\n\n # all the processes and threads are set to daemon\n # hence no need to terminate them manually\n # I might be wrong in the long run though.", "def map_async(func, iterable, chunksize=None, callback=None, ncpu=0, limit=True, **kwargs):\n _n = _mp.cpu_count()\n if (ncpu <= 0):\n # use all available cpus\n p = _mp.Pool(_n)\n elif (ncpu > _n) & (limit is True):\n p = _mp.Pool(_n)\n else:\n p = _mp.Pool(ncpu)\n\n if islambda(func):\n return p.map_async(PicklableLambda(func), iterable, chunksize, callback)\n else:\n return p.map_async(func, iterable, chunksize, callback)", "def task_mapper(task_function, task_iterable, parallel_procs=None):\n\n num_procs = get_num_processors(parallel_procs)\n\n if num_procs == 0:\n LOG.debug('Using serial task processor...')\n return serial_pc(task_function, task_iterable)\n else:\n LOG.debug('Using %d-parallel task processors...', num_procs)\n return parallel_pc(task_function, task_iterable, num_procs)", "def map(\n f: typing.Callable,\n stage: Stage = pypeln_utils.UNDEFINED,\n workers: int = None,\n maxsize: int = None,\n timeout: float = 0,\n on_start: typing.Callable = None,\n on_done: typing.Callable = None,\n) -> Stage:\n\n if pypeln_utils.is_undefined(stage):\n return pypeln_utils.Partial(\n lambda stage: map(\n f,\n stage=stage,\n workers=workers,\n maxsize=maxsize,\n timeout=timeout,\n on_start=on_start,\n on_done=on_done,\n )\n )\n\n stage = to_stage(stage)\n\n return Map(\n f=f, on_start=on_start, on_done=on_done, timeout=timeout, dependencies=[stage],\n )", "def parmap(f, X):\n pipe = [Pipe() for x in X]\n proc = [Process(target=spawn(f), args=(c, x))\n for x, (p, c) in zip(X, pipe)]\n [p.start() for p in proc]\n [p.join() for p in proc]\n return [p.recv() for (p, c) in pipe]", "def make_parallel(self, n):\n return super().make_parallel(n, True)", "def map(\n f, stage=pypeln_utils.UNDEFINED, workers=1, maxsize=0, on_start=None, on_done=None\n):\n\n if utils.is_undefined(stage):\n return utils.Partial(\n lambda stage: map(\n f,\n stage,\n workers=workers,\n maxsize=maxsize,\n on_start=on_start,\n on_done=on_done,\n )\n )\n\n stage = _to_stage(stage)\n\n return _Stage(\n worker_constructor=WORKER,\n workers=workers,\n maxsize=maxsize,\n on_start=on_start,\n on_done=on_done,\n target=_map,\n args=(f,),\n dependencies=[stage],\n )", "def thread_map(target, iterable, thread_count=None, *args, **kwargs):\n try:\n jobsize = len(iterable)\n except TypeError:\n iterable = list(iterable)\n jobsize = len(iterable)\n def array_targ(function, it, retvals, arglist, kwarglist, start, size):\n for i in range(start, start + size):\n retvals[i] = function(*(arglist + (it[i],)), **kwarglist)\n retvals = [None] * jobsize\n thread_job(jobsize, thread_count, array_targ,\n target, iterable, retvals, args, kwargs)\n return retvals", "def map(cls, args,\n pool_size=None, stop_on_failure=False, **kwargs):\n kw = dict(\n stdin=sys.stdin, stderr=PIPE,\n stdout=PIPE\n )\n kw.update(kwargs)\n if pool_size is None:\n import multiprocessing\n pool_size = multiprocessing.cpu_count()\n results = [None] * len(args)\n processes = []\n index = 0\n out_index = 0\n while args or processes:\n if args and len(processes) < pool_size:\n a = args.pop(0)\n if not isinstance(a, list):\n a = [a]\n cmd = cls(*a)\n a = cmd.command_line(cmd.kwargs.get('shell', False))\n processes.append((index, cmd, Popen(a, **kw)))\n index += 1\n for i, cmd, p in processes:\n result = p.poll()\n if result is not None:\n output = Stdout(p.stdout.read())\n output.stderr = p.stderr.read()\n output.returncodes = [result]\n output.failed = bool(result)\n output.succeeded = not output.failed\n results[i] = output\n processes.remove((i, cmd, p))\n if out_index == i:\n out_index += 1\n yield results[i]\n if result > 0 and stop_on_failure:\n args = None\n for index, cmd, p in processes:\n if p.poll() is None: # pragma: no cover\n p.kill()\n cmd._raise(output=output)\n time.sleep(.1)\n if out_index < len(results): # pragma: no cover\n yield results[out_index]\n out_index += 1", "def run_in_parallel(n_proc, target, all_args):\n curr_item = Counter()\n def worker():\n index = curr_item.return_and_increment()\n while index < len(all_args):\n args = all_args[index]\n target(*args)\n index = curr_item.return_and_increment()\n return\n\n fork_and_wait(n_proc, worker)", "def iterate_mproc_map(wrap_func, iterate_vals, nb_workers=CPU_COUNT, desc='', ordered=True):\n iterate_vals = list(iterate_vals)\n nb_workers = 1 if not nb_workers else int(nb_workers)\n nb_workers = CPU_COUNT if nb_workers < 0 else nb_workers\n\n if desc is not None:\n pbar = tqdm.tqdm(total=len(iterate_vals), desc=str('%r @%i-threads' % (desc, nb_workers)))\n else:\n pbar = None\n\n if nb_workers > 1:\n logging.debug('perform parallel in %i threads', nb_workers)\n # Standard mproc.Pool created a demon processes which can be called\n # inside its children, cascade or multiprocessing\n # https://stackoverflow.com/questions/6974695/python-process-pool-non-daemonic\n\n # pool = mproc.Pool(nb_workers)\n # pool = NonDaemonPool(nb_workers)\n pool = ProcessPool(nb_workers)\n # pool = Pool(nb_workers)\n mapping = pool.imap if ordered else pool.uimap\n else:\n logging.debug('perform sequential')\n pool = None\n mapping = map\n\n for out in mapping(wrap_func, iterate_vals):\n pbar.update() if pbar else None\n yield out\n\n if pool:\n pool.close()\n pool.join()\n pool.clear()\n\n pbar.close() if pbar else None", "def map(self, target, *iterable: iter):\n for args in zip(*iterable):\n self.submit(target=target, args=args)", "def fastMap(mapper, data):\n i = 0\n ans = []\n while i < len(data):\n with Pool(MAX_POOL_SIZE) as pool:\n ans.extend(pool.map(mapper, data[i:i+MAX_POOL_SIZE]))\n i += MAX_POOL_SIZE\n\n return ans", "def mpi_fork(n):\n if n<=1: \n return \"child\"\n if os.getenv(\"IN_MPI\") is None:\n env = os.environ.copy()\n env.update(\n MKL_NUM_THREADS=\"1\",\n OMP_NUM_THREADS=\"1\",\n IN_MPI=\"1\"\n )\n subprocess.check_call([\"mpirun\", \"-np\", str(n), sys.executable] + sys.argv, env=env)\n return \"parent\"\n else:\n return \"child\"", "def tmap(f, seq_args, num_workers=20, worker_queue=None, wait=True, stop_on_error=True):\n\n if worker_queue:\n wq = worker_queue\n else:\n # see if we have a global queue to work with.\n if _wq:\n wq = _wq\n else:\n if num_workers == 0:\n return map(f, seq_args)\n\n wq = WorkerQueue(num_workers)\n\n # we short cut it here if the number of workers is 0.\n # normal map should be faster in this case.\n if len(wq.pool) == 0:\n return map(f, seq_args)\n\n # print(\"queue size:%s\" % wq.queue.qsize())\n\n # TODO: divide the data (seq_args) into even chunks and\n # then pass each thread a map(f, equal_part(seq_args))\n # That way there should be less locking, and overhead.\n\n results = []\n for sa in seq_args:\n results.append(FuncResult(f))\n wq.do(results[-1], sa)\n\n # wq.stop()\n\n if wait:\n # print(\"wait\")\n wq.wait()\n # print(\"after wait\")\n # print(\"queue size:%s\" % wq.queue.qsize())\n if wq.queue.qsize():\n raise RuntimeError(\"buggy threadmap\")\n # if we created a worker queue, we need to stop it.\n if not worker_queue and not _wq:\n # print(\"stopping\")\n wq.stop()\n if wq.queue.qsize():\n um = wq.queue.get()\n if not um is STOP:\n raise RuntimeError(\"buggy threadmap\")\n\n # see if there were any errors. If so raise the first one. This matches map behaviour.\n # TODO: the traceback doesn't show up nicely.\n # NOTE: TODO: we might want to return the results anyway? This should be an option.\n if stop_on_error:\n error_ones = list(filter(lambda x: x.exception, results))\n if error_ones:\n raise error_ones[0].exception\n\n return map(lambda x: x.result, results)\n return [wq, results]", "def parallel_map(work_func, *sequences, **kwargs):\n # kwargs\n cores = kwargs.get('cores', None)\n ordered = kwargs.get('ordered', False)\n buffer_factor = kwargs.get('buffer_factor', 2.0)\n use_multiprocessing = kwargs.get('use_multiprocessing', False)\n heart_beat = kwargs.get('heart_beat', 0.001)\n fill_activate = 'fill_void' in kwargs\n fill_value = kwargs.get('fill_void', None)\n name = kwargs.get('name', None)\n\n if name:\n log = logging.getLogger(__name__ + '[%s]' % name)\n else:\n log = logging.getLogger(__name__)\n\n if heart_beat <= 0:\n raise ValueError(\"heart_beat must be >0.\")\n\n if cores is None or cores <= 0:\n cores = multiprocessing.cpu_count()\n log.debug(\"Using all cores (%d)\", cores)\n else:\n log.debug(\"Only using %d cores\", cores)\n\n # Choose parallel types\n if use_multiprocessing:\n queue_t = multiprocessing.Queue\n worker_t = _WorkerProcess\n else:\n queue_t = queue.Queue\n worker_t = _WorkerThread\n\n queue_work = queue_t(int(cores * buffer_factor))\n queue_results = queue_t(int(cores * buffer_factor))\n\n log.log(1, \"Constructing worker processes\")\n workers = [worker_t(name, i, work_func, queue_work, queue_results,\n heart_beat)\n for i in range(cores)]\n\n log.log(1, \"Constructing feeder thread\")\n feeder_thread = _FeedQueueThread(name, sequences, queue_work,\n len(workers), heart_beat, fill_activate,\n fill_value)\n\n return ParallelResultsIterator(name, ordered, use_multiprocessing,\n heart_beat, queue_work,\n queue_results, feeder_thread, workers)", "def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)", "def init_processes(rank, size, fn, backend='gloo'):\r\n os.environ['MASTER_ADDR'] = '127.0.0.1'\r\n os.environ['MASTER_PORT'] = '29500'\r\n dist.init_process_group(backend, rank=rank, world_size=size)\r\n fn(rank, size)", "def mpi_fork(n, bind_to_core=False):\n if n<=1:\n return \"child\"\n if os.getenv(\"IN_MPI\") is None:\n env = os.environ.copy()\n env.update(\n MKL_NUM_THREADS=\"1\",\n OMP_NUM_THREADS=\"1\",\n IN_MPI=\"1\"\n )\n args = [\"mpirun\", \"-np\", str(n)]\n if bind_to_core:\n args += [\"-bind-to\", \"core\"]\n args += [sys.executable] + sys.argv\n subprocess.check_call(args, env=env)\n return \"parent\"\n else:\n return \"child\"", "def init_processes(fn, local_rank, backend='nccl'):\n dist.init_process_group(backend)\n fn(dist.get_rank(), dist.get_world_size(), local_rank)", "def parallelize_process(self, data, func, n_cores=4):\n data_split = np.array_split(data, n_cores)\n pool = Pool(n_cores)\n data_return = pool.map(func, data_split)\n pool.close()\n pool.join()\n return data_return", "def map(_, params):\n import numpy as np\n from itertools import product\n from random import shuffle\n\n if 'param_set' in params:\n parameter_sets = params['param_set']\n else:\n alphas = params['alphas']\n Vs = params['Vs']\n gammas = params['gammas']\n parameter_sets = [item for item in product(alphas, gammas, Vs)]\n shuffle(parameter_sets)\n\n ## discretize the parameter configurations and equitably distribute\n ## them for the next map instance to deal with.\n chunk_length = len(parameter_sets)/params['nprocs']\n leftover = len(parameter_sets) % params['nprocs']\n for n in xrange(params['nprocs']):\n if n < leftover:\n left = n*(1+chunk_length)\n to_yield = parameter_sets[left:left+1+chunk_length]\n else:\n left = leftover*(1+chunk_length) + (n-leftover)*chunk_length\n to_yield = parameter_sets[left:left+chunk_length]\n #print n, to_yield, len(to_yield)\n yield (n, to_yield)", "def map_and_batch(map_func,\n batch_size,\n num_parallel_batches=None,\n drop_remainder=False,\n num_parallel_calls=None):\n\n if num_parallel_batches is None and num_parallel_calls is None:\n num_parallel_calls = batch_size\n elif num_parallel_batches is not None and num_parallel_calls is None:\n num_parallel_calls = batch_size * num_parallel_batches\n elif num_parallel_batches is not None and num_parallel_calls is not None:\n raise ValueError(\n \"`map_and_batch` allows only one of `num_parallel_batches` and \"\n \"`num_parallel_calls` to be set, but \"\n f\"`num_parallel_batches` was set to {num_parallel_batches} \"\n f\"and `num_parallel_calls` as set to {num_parallel_calls}.\")\n\n def _apply_fn(dataset):\n return _MapAndBatchDataset(dataset, map_func, batch_size,\n num_parallel_calls, drop_remainder)\n\n return _apply_fn" ]
[ "0.73466885", "0.7045566", "0.69555485", "0.6916925", "0.6867893", "0.6712211", "0.6629893", "0.6470534", "0.6378345", "0.6297177", "0.6100757", "0.6050541", "0.6016031", "0.601225", "0.5993307", "0.5915234", "0.5865009", "0.58327293", "0.58125037", "0.5781024", "0.5759589", "0.5751906", "0.57371116", "0.57355934", "0.5729691", "0.56994295", "0.568188", "0.5661602", "0.5654171", "0.56423956" ]
0.73086846
1
Check if the number is palindromic. O(1) timecomplexity O(1) spacecomplexity
def is_palindromic(n: int) -> bool: str_n = str(n) if str_n == str_n[::-1]: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isPalindromic(n: int):\n return str(n) == str(n)[::-1]", "def is_palindrome(n):\n d = digits(n)\n r = int(\"\".join([str(i) for i in d]))\n return n == r", "def is_palindrome(n):\n x, y = n, 0\n f = lambda: 10 * y + x % 10\n while x > 0:\n x, y = x // 10, f()\n return y == n", "def is_palindrome(n):\n x, y = n, 0\n f = lambda: y * 10 + x % 10\n while x > 0:\n x, y = x // 10, f()\n return y == n", "def is_palindrome(n):\n v = []\n while n > 0:\n v.append(n % 10)\n n //= 10\n for i in range(len(v)//2):\n if v[i] != v[len(v)-i-1]:\n return False\n return True", "def is_palindrome(n):\n return(n == reverse(n))", "def is_palindrome(n):\n # store locally\n temp = n\n rev = 0\n while n > 0:\n # get digit one by one\n digit = n % 10\n # find reverse number\n rev = rev * 10 + digit\n # divide the number\n n = n // 10\n return temp == rev", "def palindrome_check(num):\n num= str(num)\n len_num= len(num)\n for i in range(len_num/2):\n if num[i] == num[len_num-i-1]:\n ans= True\n else:\n ans= False\n break\n return ans", "def isPalindrome(Number):\r\n ListOfDigit=[int(d) for d in str(Number)]\r\n n=len(ListOfDigit)\r\n for i in range(n//2):\r\n if ListOfDigit[i]!=ListOfDigit[-(i+1)]:\r\n return(False)\r\n return(True)", "def is_pal(n):\r\n # Change the number into a string and then a list.\r\n as_list_of_chars = list(str(n))\r\n # Copy the list and reverse it.\r\n reversed_list_of_chars = list(as_list_of_chars)\r\n reversed_list_of_chars.reverse()\r\n # True if the list of chars is palindromic.\r\n return as_list_of_chars == reversed_list_of_chars", "def is_palindrome(n):\n ns = str(n)\n for i in range(0, len(ns) // 2):\n if ns[i] != ns[len(ns) - 1 - i]: return False\n return True", "def check_palindrome():", "def is_palindrome(x):\n \n # Assume negative number is not a palindromic number.\n if x < 0:\n return False\n\n ten_base = 1\n \n # Determine num size with a base of 10\n while x / ten_base >= 10:\n ten_base *= 10\n\n while x > 0:\n left_num, right_num = x / ten_base, x % 10\n if left_num != right_num:\n return False\n \n # Update and prep for next iteration.\n x = (x % ten_base) / 10\n ten_base /= 100\n\n return True", "def is_palindrome(num):\n digitList = int2list(num)\n \n i = 0\n while i <= round(len(digitList)/2):\n if digitList[i] != digitList[-(i+1)]:\n return False\n i += 1\n return True", "def is_palindrome(number_):\n temp = number_\n reverse = 0\n while number_ > 0:\n digit = number_ % 10\n reverse = reverse * 10 + digit\n number_ = number_ // 10\n if temp == reverse:\n return True\n else:\n return False", "def ispalind(num):\n num = list(str(num))\n if num == num[::-1]:\n return True", "def palindrom_permutation(string: str):\n string = re.sub(r'\\W+', '', string.lower())\n\n chars = dict()\n for c in string:\n chars[c] = chars[c] + 1 if c in chars else 1\n\n almost_not_okey = False\n for val in chars.values():\n if val % 2 == 1:\n if not almost_not_okey:\n almost_not_okey = True\n else:\n return False\n\n if almost_not_okey:\n return len(string) % 2 == 1\n return True", "def is_palindrome(n):\n num = list(str(n))\n h1 = num[:int(len(num)/2)] # first half of palindrome\n if len(num) % 2 == 0:\n h2 = num[int(len(num)/2):] # second half of palindrome\n else:\n h2 = num[int(len(num)/2) + 1:]\n return h1 == list(reversed(h2))", "def is_palindromic(lst):\n return all( lst[i] == lst[-(i+1)] for i in range(len(lst)) )", "def isLychrel(Number):\r\n Number+=reverse(Number)\r\n n=0\r\n while n<50:\r\n if isPalindrome(Number):\r\n return False\r\n else:\r\n Number+=reverse(Number)\r\n n+=1\r\n return True", "def is_antipalindrome(n):\n v = []\n while n > 0:\n v.append(n % 10)\n n //= 10\n for i in range(len(v)//2):\n if v[i] == v[len(v)-i-1]:\n return False\n return True", "def check_pal(num):\r\n \r\n num = str(num) #Convert number to string.\r\n \r\n #If a number is a palindrome, rreturn True \r\n if num[0] == num[len(num)-1] and len(num) <= 3:\r\n return True\r\n \r\n #If the first and last digits of a number are equal when its length is > 3,\r\n #strip the end digits away analyse the resulting number.\r\n elif num[0] == num[len(num)-1]:\r\n return check_pal(num[1:len(num)-1])\r\n \r\n #If a number is not a palindrome, return False\r\n else:\r\n return False", "def is_number_palindrome(n):\n digits = list() # list of all digits in n (reversed order)\n while n > 0:\n last_digit = n % 10\n digits.append(last_digit)\n n = n // 10\n if is_list_palindrome(digits):\n return True\n else:\n return False", "def is_palindrome(number):\r\n str_input = str(number)\r\n return str_input == reversed(str_input)", "def is_palindrome(a):\n\tmax = a\n\tmin = 0\n\twhile max > 0:\n\t\tmin = (min * 10 + max % 10)\n\t\tmax /= 10\n\treturn min == a", "def isPalendrome(number):\n\t\n\tnum = str(number)\n\ti \t= 0\n\tj \t= len(num) - 1\n\tmid = len(num) // 2\n\n\t#print(mid)\n\t\n\t# While i and j are not in the middle\n\twhile( i != mid):\n\t\t#print(i,j,sep=\"\\t\")\n\t\t#print(num[i],num[j], sep=\"\\t\")\n\t\tif(num[i] != num[j]):\n\t\t\treturn(False)\n\t\telse:\n\t\t\ti = i + 1\n\t\t\tj = j - 1\n\n\treturn(True)", "def isReversible(n): \n if n % 10 == 0:\n return False\n s = n + reverseNum(n)\n while s > 0:\n digit = s % 10\n if not digit in [1,3,5,7,9]:\n return False\n s //= 10\n return True", "def is_palindrome(num):\n str_num = str(num)\n\n if len(str_num) == 1:\n return True\n elif len(str_num) == 2:\n return str_num[0] == str_num[1]\n\n if str_num[0] == str_num[len(str_num)-1]:\n return is_palindrome(str_num[1:len(str_num)-1])\n else:\n return False", "def is_palindrome(num):\n\treversed_num = str(num)[::-1]\n\tif reversed_num == str(num): return True\n\telse: return False", "def is_Lychrel(n):\n count = 1\n current = n\n while count < 50:\n current = reverse_and_add(current)\n if is_palindrome(current):\n return False\n count += 1\n return True" ]
[ "0.8517692", "0.80143285", "0.7946466", "0.792349", "0.7887479", "0.7825143", "0.7789734", "0.7784462", "0.7754578", "0.77515846", "0.7744035", "0.7723485", "0.770385", "0.76945305", "0.7589223", "0.7582267", "0.75784665", "0.7555034", "0.7526849", "0.7496103", "0.74658847", "0.746254", "0.7434272", "0.74262965", "0.74127555", "0.73178023", "0.73097974", "0.7289185", "0.7277357", "0.7263042" ]
0.82342494
1
Calculate the sum of the digits in the number.
def sum_of_digits_in_number(n: int) -> int: return sum(int(digit) for digit in str(n))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sumDigit():", "def calculate_digits_sum(number: int) -> int:\n\n # Return value\n ret = 0\n\n while number != 0:\n # Extract the last digit number and add it to ret\n ret += number % 10\n\n # Delete the last digit of the number\n number //= 10\n\n return ret", "def get_sum_of_digits(number):\n return sum(int(digit) for digit in str(number))", "def sum_of_digits(number):\n \n if number < 0:\n number = -number\n \n digit_sum = 0\n while True:\n if number == 0:\n break\n digit = number % 10 # Get the digit\n digit_sum = digit_sum + digit # Accumulate it into the sum\n number = number // 10 # Get ready for the next digit\n \n return digit_sum", "def digit_sum(n):\n\treturn sum(int(c) for c in str(n))", "def sum_of_digits(n):\n return sum(int(c) for c in str(n))", "def sum_digits(n):\n sum = 0\n while n > 0:\n num = n % 10\n sum += num\n n //= 10\n return sum", "def digitSum ( n ) :\n return sum ( map ( int , str ( n ) ) )", "def sum_of_digits(number):\n # Students: While you are welcome to try to understand this\n # function definition, all you have to do is trust\n # that the green doc-string is correct (it is!).\n if number < 0:\n number = -number\n\n digit_sum = 0\n while True:\n if number == 0:\n break\n digit = number % 10 # Get the digit\n digit_sum = digit_sum + digit # Accumulate it into the sum\n number = number // 10 # Get ready for the next digit\n\n return digit_sum", "def sum_digits(n):\n \"*** YOUR CODE HERE ***\"\n count=0\n length=len(str(n))\n last=0\n sum=0\n while count<length:\n last=n%10\n n//=10\n sum+=last\n count+=1\n return sum", "def sum_digits(n):\n digits = [int(i) for i in str(n)]\n return sum(digits)", "def sum_digits(n):\n num = n\n incTen = 1\n summy = 0\n if num > 10:\n while incTen * 10 < num:\n incTen = incTen * 10\n while incTen >= 10:\n summy += num // incTen\n num = num % incTen\n incTen = incTen // 10\n summy += num\n return summy\n elif num == 10:\n return 1\n else:\n return num", "def digit_sum(n):\n s = 0\n while n:\n s += n % 10\n n //= 10\n return s", "def sum_of_digits(n):\n rest_of_num, last_num = split(n)\n if rest_of_num < 10:\n \treturn last_num + rest_of_num\n return last_num + sum_of_digits(rest_of_num)", "def sum_digits(n):\n if (n < 10):\n return n\n else:\n all_but_last, last = split(n)\n return sum_digits(all_but_last) + last", "def digit_sum(n):\n sum_of_digits = 0\n for c in str(n):\n sum_of_digits += int(c)\n return sum_of_digits", "def digit_sum(x):\n s = 0\n while x>0:\n s = s+(x%10)\n x = x//10\n\n return s", "def square_digit_sum(number):\n return sum(precomputed_digit_squares[digit] for digit in str(number))", "def add_digits(n):\n return sum([int(d) for d in str(n)])", "def digital_sum(n):\n r = 0\n while n:\n r, n = r + n % 10, n // 10\n return r", "def count_digits(num):\n total = 0\n while num is not 0:\n total += num % 10\n num //= 10\n return total", "def factorial_digit_sum(n):\n sum = 0\n factStr = str(factorial(n))\n for digit in factStr:\n sum += int(digit)\n return sum", "def sumDigits(s):\n _sum = 0\n for char in s:\n try:\n _sum += int(char)\n except:\n pass\n return _sum", "def digit(number: int, n: int) -> int:\n return number // 10 ** n % 10", "def _number_of_digits(number: int) -> int:\n return int(log10(number)) + 1", "def calculate(x: int) -> int:\n\n digits = list(map(int, list(str(x))))\n return sum(list(map(lambda a: a**2, digits)))", "def sum_integer(n):\n return sum([int(elem) for elem in str(n)])", "def sumDigits(s):\n sum_digit = 0\n\n for e in s:\n try:\n sum_digit += int(e)\n except ValueError:\n continue\n return sum_digit", "def sum_squares(num):\n sum = 0\n while (num != 0):\n sum += math.pow((num % 10), 2)\n num = num/10\n return int(sum)", "def sum_of_digit_powers_in_number(n: int, power: int) -> int:\n return sum(int(digit) ** power for digit in str(n))" ]
[ "0.8373557", "0.8288675", "0.8287705", "0.8256345", "0.8218147", "0.8207546", "0.820222", "0.8192879", "0.81736803", "0.8165122", "0.8146696", "0.805993", "0.8045805", "0.8027341", "0.80214125", "0.7909749", "0.7819021", "0.7793516", "0.7512039", "0.75100523", "0.7226058", "0.7204784", "0.69266385", "0.69133985", "0.69002646", "0.6850546", "0.68479884", "0.6809976", "0.68014264", "0.6762275" ]
0.8439344
0
Multiply all elements in array. O(n) timecomplexity O(1) spacecomplexity
def product(array: Iterable): product = 1 for i in array: product *= i return product
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multiply(numbers):\n prod = 1\n for i in numbers:\n prod = prod*i\n return prod", "def prod(arg):\n ret = 1\n for i in range(0, len(arg)):\n ret = ret * arg[i]\n return ret", "def prod(iterable):\n \n return reduce(operator.mul, iterable, 1)", "def __mul__(self, i):\n return asarray(multiply(self, i))", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def product_array(md_array):\n products = [1] * len(md_array)\n\n for list in md_array:\n list_index = md_array.index(list)\n for item in list:\n products[list_index] *= item\n\n return products", "def product(iterable):\n prod = 1\n for i in iterable:\n prod *= i\n return prod", "def prod(a, x):\n return [a[i]*x for i in range(2)]", "def product(it):\n prod = 1\n for x in it:\n prod *= x\n return prod", "def prod(self):\n r = 0\n for i in range(len(self)):\n r *= self[i]\n\n return r", "def reduce_by_multiplication(data):\n total = 1\n for num in data:\n total *= num\n return total", "def product( iterable ):\n p= 1\n for n in iterable:\n p *= n\n return p", "def product_from_rest(arr):\n\n product = 1\n res = []\n for num in arr:\n product = product * num\n\n for num in arr:\n res.append(product//num)\n \n return res\n \n\n # Naive approach (without division) - Time complexity = O(n^2)\n #\n # res = []\n # for i_num, _ in enumerate(arr):\n # res.append(1)\n # for i_x, x in enumerate(arr):\n # if i_x != i_num:\n # res[i_num] = res[i_num] * x\n # return res", "def product(*nums):\n\treturn reduce((lambda x, y: x * y), nums)", "def brute_multiply(numbers):\n result = []\n for a in range(len(numbers)):\n product = 1\n for index, b in enumerate(numbers):\n if index != a:\n product *= b\n result.append(product)\n return result", "def prod(seq):\n p = 1\n for a in seq:\n p *= a\n return p", "def prod(L):\n res = 1\n for e in L:\n res *= e\n return res", "def _prod(seq):\n return reduce(lambda x, y: x*y, seq, 1)", "def scalar_mult(n, m):\n\ttemp = []\n\tfor i in range(len(m)):\n\t\te = []\n\t\tfor j in range(len(m[0])):\n\t\t\te.append(m[i][j]*n)\n\t\ttemp.append(e)\n\treturn temp", "def intprod(xs):\n out = 1\n for x in xs:\n out *= x\n return out", "def multiply(numbers):\n \n result = numbers[0]\n for n in numbers[1:]:\n result = n * result\n return result", "def product(numbers):\n p = 1\n for x in numbers:\n p *= x\n return p", "def product(numbers):\n p = 1\n for x in numbers:\n p *= x\n return p", "def prod(l):\n r = 1\n for x in l:\n r *= x\n return r", "def prod(n):\n product = S.One\n for i in n:\n product = product * i\n return product", "def multiplied(*values):\n values = [_normalize(v) for v in values]\n def _product(it):\n p = 1\n for n in it:\n p *= n\n return p\n for v in zip(*values):\n yield _product(v)", "def multiply(t):\n return mul(*t)", "def product(mylist):\r\n temp = 1\r\n for num in mylist:\r\n temp *= num\r\n return temp" ]
[ "0.68984216", "0.6817217", "0.68122405", "0.68080294", "0.6798158", "0.6798158", "0.6798158", "0.67805743", "0.67755836", "0.67672944", "0.6732547", "0.66867065", "0.6683021", "0.66712505", "0.66672236", "0.66325617", "0.6606041", "0.65941656", "0.6586031", "0.6581371", "0.6561844", "0.6558201", "0.6544153", "0.65026534", "0.65026534", "0.6500159", "0.648441", "0.648376", "0.6455575", "0.6429858" ]
0.7929102
0
Find all divisors of a number. O(sqrt(n)) timecomplexity O(n) spacecomplexity
def find_divisors(n: int) -> Set[int]: divisors = {1, n} for i in range(2, int(n ** 0.5) + 1): if n % i == 0: divisors.add(i) divisors.add(n // i) return divisors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDivisors(n):", "def find_divisors(n):\n\n\tpd = [1]\n\n\tsqrtN = int(math.sqrt(n))\n\n\tfor d in range(2, sqrtN+1):\n\t\tif n % d == 0:\n\t\t\tpd.append(d)\n\t\t\tpair = int(n/d)\n\t\t\tif not pair == d:\n\t\t\t\tpd.append(pair)\n\n\treturn pd", "def divisors(n):\n d = []\n for i in range(1, int(math.sqrt(n) + 1)):\n if n % i == 0:\n d.append(i)\n d.append(n / i)\n return set(d)", "def divisors(n):\n dvs = []\n for i in range(1, int(math.sqrt(n)) + 1):\n if n % i == 0:\n dvs.append(i)\n j = n / i\n if j != i:\n dvs.append(j)\n\n dvs.remove(n)\n return dvs", "def get_divisors(n):\n n = abs(n)\n divisors = []\n for i in range(1, int(n**0.5)+1):\n if n%i == 0:\n divisors.append(i)\n divisors.append(-i)\n if i*i != n:\n divisors.append(n//i)\n divisors.append(-n//i)\n return sorted(divisors, key=abs)", "def divisors(x):\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x//i)\n return sorted(distinct(result))", "def proper_divisors(n):\n divisors = set([1])\n for i in range(2, int(ceil(sqrt(n)))+1):\n if n % i == 0:\n divisors.add(i)\n divisors.add(n/i)\n return divisors", "def get_divisors(num):\n assert num != 0, \"Num is 0\"\n divisors = []\n sq_root = int(num**0.5)\n for i in range(1, sq_root + 1):\n if num % i == 0:\n divisors.extend([i, num // i])\n # if num has a perfect sq, that number will be added twice, then:\n if sq_root ** 2 == num:\n divisors.remove(sq_root)\n return divisors", "def find_divisors_2(number):\n divisors = [n for n in range(1, number) if number % n == 0]\n return divisors", "def find_divisors_1(number):\n divisors = []\n # Test all numbers from 1 to number-1.\n # Actually, we can be more efficient with range(1, (number//2)+1)\n for n in range(1, number): \n if number % n == 0:\n divisors.append(n)\n return divisors", "def findDivisor(num):\n divisors = [1]\n for i in range(2, int(sqrt(num)) + 1):\n if num % i == 0:\n divisors.append(i)\n temp = num / i\n if temp != i:\n divisors.append(temp)\n return divisors", "def find_proper_divisors(n: int) -> Set[int]:\n\n divisors = find_divisors(n)\n return divisors - {n} # without n", "def divisors(n):\r\n numbers = []\r\n for i in xrange(1, n+1):\r\n if n % i == 0:\r\n numbers.append(i)\r\n return numbers", "def divisors(number: int) -> Set[int]:\n\n if number == 0:\n return {0}\n divisor = 2\n while divisor * divisor <= number:\n if number % divisor == 0:\n smaller_result = divisors(number // divisor)\n multiplied_result = {d * divisor for d in smaller_result}\n\n return smaller_result | multiplied_result\n divisor = divisor + 1\n\n return {1, number}", "def proper_divisors(n: int) -> [int]:\n\n if n == 1:\n return []\n\n x = 2\n divisors = set([1])\n while x * x <= n and n > 1:\n if n % x == 0:\n divisors.add(x)\n divisors.add(n // x)\n\n x += 1\n\n s = sorted(divisors)\n return s", "def proper_divisors(n):\r\n numbers = []\r\n for i in xrange(1, n):\r\n if n % i == 0:\r\n numbers.append(i)\r\n \r\n return numbers", "def divisors(num: int) -> Iterable[int]:\n assert num > 0\n if num == 1:\n yield 1\n return\n\n for divisor in range(1, int(math.sqrt(num)) + 1):\n if num % divisor == 0:\n yield divisor\n divisor_2 = num // divisor\n if divisor_2 != divisor:\n yield divisor_2\n else:\n return", "def simple_get_divisors(num: int) -> list:\n all_divisors = []\n for possible_divisor in range(1, math.floor(num / 2) + 1):\n if num % possible_divisor == 0:\n all_divisors.append(possible_divisor)\n return all_divisors", "def d(n):\n divisors = []\n for i in range(1, n):\n if n % i == 0:\n divisors.append(i)\n return sum(divisors)", "def divisors(n):\n return [x for x in range(1, n) if n % x == 0]", "def divisors(n: int) -> list:\n # iterate through every number <= n/2 and check whether the number is a divisor\n # append to list if not in list\n # in the end, append the number\n divs = [n]\n for i in range(1, n//2 + 1):\n if n % i == 0:\n divs.append(i)\n return divs", "def proper_divisors(n):\n l = [1]\n if n == 1 or n == 2:\n return l\n else:\n limit = math.floor(n/2) + 1\n for i in range(2, limit):\n if n % i == 0:\n l.append(i)\n return l", "def count_divisors(n):\r\n if n == 1:\r\n return 0\r\n m = int(sqrt(n))\r\n c = 1\r\n if m * m == n:\r\n c += 1\r\n m -= 1\r\n for i in xrange(2, m+1):\r\n if n % i == 0:\r\n c += 2\r\n return c", "def getNumDivisors(n):\n\n n = abs(int(n))\n\n r = 1\n i = 2\n while i <= n:\n a = 0\n while n % i == 0:\n n = n / i\n a = a + 1\n r = r * (a + 1)\n i = i + 1\n\n return r", "def list_of_divisors_v1(n):\n \"\"\"\n This is a slow algorithm. But it is correct.\n \"\"\"\n if n == 1:\n return [1]\n if n == 2:\n return [1,2]\n L = {}\n if n > 0:\n L[1] = True\n if n > 1:\n L[n] = True\n for i in list_of_prime_factors(n):\n L[i] = True\n for j in list_of_divisors(n // i):\n L[j] = True\n return L.keys()", "def get_divisors_with_parity_check(num: int) -> list:\n all_divisors = []\n # if number is odd, increment by 2 because don't have to check evens\n increment = 2 if num % 2 == 1 else 1\n\n for possible_divisor in range(1, math.floor(num / 2) + 1, increment):\n if num % possible_divisor == 0:\n all_divisors.append(possible_divisor)\n return all_divisors", "def count_proper_divisors(n):\r\n if n == 1:\r\n return 0\r\n m = int(sqrt(n))\r\n c = 1\r\n if m * m == n:\r\n c += 1\r\n m -= 1\r\n for i in xrange(2, m+1):\r\n if n % i == 0:\r\n c += 2\r\n return c", "def divisori(n):\n div=set()\n for i in range(1,int(n**0.5+1)):\n if n%i==0:\n div.add(int(n/i))\n div.add(i)\n return sorted(div)", "def prime_divisors(n):\r\n\treturn list(set(factors(n)))", "def divisors(N):\n # Initialize the list of divisors\n divisor_list = [1]\n # Check division by d for d <= N/2\n for d in range(2,N // 2 + 1):\n if N % d == 0:\n divisor_list.append(d)\n divisor_list.append(N)\n return divisor_list" ]
[ "0.8378229", "0.82921267", "0.8258386", "0.824777", "0.8161784", "0.81405884", "0.81158715", "0.80825186", "0.80824065", "0.8021525", "0.79500633", "0.79354244", "0.7930292", "0.7918839", "0.7914931", "0.7914827", "0.7891488", "0.788354", "0.78337795", "0.7827369", "0.7817869", "0.77919513", "0.77014714", "0.7697298", "0.76914763", "0.7665921", "0.7620871", "0.7595224", "0.7575621", "0.7555262" ]
0.83815986
0
Check if a number is divisible by all numbers in array. O(n) timecomplexity O(1) spacecomplexity
def check_divisible(n: int, divisors: Iterable) -> bool: for i in divisors: if n % i != 0: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def div_by(n, list_of_num):\n for num in list_of_num:\n if not n % num:\n return True\n return False", "def isGoodArray(self, nums: List[int]) -> bool:\n return functools.reduce(math.gcd, nums)==1", "def is_divisible(inpt:int, i:int) -> bool:\n return inpt%i == 0", "def is_divisible(inpt:int, i:int) -> bool:\n return inpt%i == 0", "def divisible_by(array, divisor):\n return_list = list()\n for i in array:\n if i % divisor == 0:\n return_list.append(i)\n return return_list", "def is_divisible(num, n):\n return num % n == 0", "def verifica_element_divide_lista(numar, lista_divizori):\n for i in lista_divizori:\n if i == 0:\n return False\n if numar % i != 0:\n return False\n return True", "def is_primary_trivial_division(n):\n mod = int(math.sqrt(n))\n for _ in xrange(2, mod + 1):\n if n % _ == 0:\n return 0\n return n", "def divisible(a, b):\n return not a % b", "def divisible(x, y):\n return x % y == 0", "def divisible_by(n):\n return lambda x: x % n == 0", "def divisible_by(n):\n return lambda x: x % n == 0", "def is_multiple(n,m):\n return n % m == 0", "def is_perfect(n):\r\n if sum_proper_divisors(n) == n:\r\n return True\r\n else:\r\n return False", "def trial_div(n: int) -> bool:\n if n == 1:\n return False\n i = 2\n while i**2 <= n:\n if n % i == 0:\n return False\n i += 1\n return True", "def divides(i, j):\n\tif j is 0:\n\t\treturn False\n\telif i % j:\n\t\treturn False\n\telse:\n\t\treturn True", "def es_primo(n):\n \n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def snt(n):\r\n f = True\r\n for j in range(2, n):\r\n if n % j == 0:\r\n f = False\r\n break\r\n return f", "def is_perfect_number(x):\n return sum(proper_divisors(x)) == x", "def perfect_num(number):\n new_list=[]\n for indx in range(1,number):\n if number % indx==0:\n new_list.append(indx)\n total=sum(new_list)\n if total==number:\n return True\n else:\n return False", "def quick_test():\n if PERIOD < 2:\n return False\n if SIZE % PERIOD != 0:\n return False\n return True", "def check_modulus(n):\n # type: (int) -> List[RE]\n return [check_composite(n), check_modulus_size(n)]", "def perfectd(n: int) -> bool:\n if sum(divisors(n)) - n == n:\n return True\n else:\n return False", "def canPartition(self, nums):\n cache = {}\n\n def helper(nums, i, k):\n if (i, k) in cache:\n return False\n if i >= len(nums):\n return False\n if k == 0:\n return True\n include_curr = helper(nums, i + 1, k - nums[i])\n exclude_curr = helper(nums, i + 1, k)\n if include_curr:\n cache[(i, k)] = False\n return include_curr or exclude_curr\n if not nums:\n return True\n s = sum(nums)\n if s % 2 != 0:\n return False\n return helper(nums, 0, s/2)", "def sat(n: int, nums=[77410, 23223, 54187], lower_bound=2):\n return all(i % n == 0 for i in nums) and n >= lower_bound", "def proper_divisors(n):\r\n numbers = []\r\n for i in xrange(1, n):\r\n if n % i == 0:\r\n numbers.append(i)\r\n \r\n return numbers", "def is_abundant(check_number):\n if number < sum(proper_divisors(check_number)):\n return True\n else:\n return False", "def canMakeArithmeticProgression(arr): \n new_arr = sorted(arr)\n diff = new_arr[1] - new_arr[0]\n for idx, num in enumerate(new_arr):\n if idx == 0:\n pass\n elif num - new_arr[idx - 1] != diff:\n return False\n return True", "def even_quotient(nums: list) -> int:\n for i in range(len(nums[:-1])):\n for j in range(i + 1, len(nums)):\n if nums[i] % nums[j] == 0:\n return nums[i] // nums[j]\n elif nums[j] % nums[i] == 0:\n return nums[j] // nums[i]", "def checkPerfectNumber(self, num: int) -> bool:\n if num <= 0:\n return False\n s = 0\n for i in range(1, int(math.sqrt(num) + 1)):\n if i != num:\n res = num % i\n if res == 0:\n s += i\n divisor = num // i\n if divisor != num:\n s += divisor\n if s > num:\n return False\n return s == num" ]
[ "0.7376041", "0.71703494", "0.7145728", "0.7145728", "0.6929325", "0.68242365", "0.6741569", "0.6678536", "0.6652018", "0.6615477", "0.655402", "0.655402", "0.6532272", "0.6516223", "0.64999205", "0.641369", "0.63702595", "0.631113", "0.6293705", "0.6261746", "0.62533575", "0.62234426", "0.62126195", "0.61566275", "0.6125877", "0.6123124", "0.6120375", "0.6119408", "0.6113242", "0.61068517" ]
0.7214447
1
Compute len of Collatz sequence. O(1) spacecomplexity
def collatz_sequence_len(n: int) -> int: result = 1 while n != 1: if n % 2 == 0: n //= 2 else: n = 3 * n + 1 result += 1 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collatz_length(val):\r\n assert val >= 1\r\n \r\n # Seed the dictionary with collatz_length(1) = 1.\r\n if val == 1:\r\n collatz_len_dict[1] = 1\r\n return collatz_len_dict[1]\r\n \r\n # Return the collatz length if it exists in the dictionary.\r\n if val in collatz_len_dict:\r\n return collatz_len_dict[val]\r\n \r\n # Make a recursive call to collatz_length() using mapped_val to find this\r\n # val's length.\r\n mapped_val = collatz(val)\r\n collatz_len_dict[val] = 1 + collatz_length(mapped_val)\r\n return collatz_len_dict[val]", "def collatz(n):\n iterCount = 0\n while(n != 1):\n if(n & 1):\n n = 3 * n + 1\n else:\n n //= 2\n iterCount += 1\n return iterCount", "def collatz(start):\n counter = 1\n n = start\n while n != 2:\n if n % 2 == 0:\n n /= 2\n else:\n n = (n * 3) + 1\n counter += 1\n\n counter += 1\n return counter", "def brute(limit):\n c_lengths = {s: collatz_length(s) for s in range(1, limit+1)}\n return max(c_lengths, key=lambda x: c_lengths[x])", "def sequence_length_3D(sequence: torch.Tensor) ->torch.Tensor:\n used = torch.sign(torch.amax(torch.abs(sequence), dim=2))\n length = torch.sum(used, 1)\n length = length.int()\n return length", "def lcs_len(x, y):\r\n \r\n if len(x) == 0 or len(y) == 0:\r\n return 0\r\n \r\n xx = x[:-1] # xx = sequence x without its last element \r\n yy = y[:-1]\r\n \r\n if x[-1] == y[-1]: # if last elements of x and y are equal\r\n return lcs_len(xx, yy) + 1\r\n else:\r\n return max(lcs_len(xx, y), lcs_len(x, yy))", "def collatz(start):\n n = start\n\n collatz_sequence = [n]\n\n while global.collatz_sequences.key().contains(n):\n if n % 2 == 0:\n n = n // 2\n else:\n n = 3 * n + 1\n\n collatz_sequence.append(n)\n\n global.collatz_sequences[]\n\n return collatz_sequence", "def _get_run_length_ac(self):\n self._run_length_ac = []\n for block in self.data:\n self._run_length_ac.extend(\n encode_run_length(tuple(iter_zig_zag(block))[1:])\n )", "def GetSequenceLength(num_nodes: int) -> int:\n return num_nodes * (3 + (num_nodes - 1) * 2)", "def collatz(n):\n if n%2==0: return n/2\n else: return 3*n+1", "def count_runlength_per_character(sequence):\n character_counts = defaultdict(list)\n current_character = None\n\n for character in sequence:\n if character != current_character:\n character_counts[character].append(1)\n else:\n character_counts[character][-1] += 1\n\n current_character = character\n\n return character_counts", "def vector_length(self, x: float, y: float, z: float) -> float:\n A = 2.0 * (x * y * self.aga + x * z * self.bbe + y * z * self.cal)\n return sqrt(x ** 2 * self.asq + y ** 2 * self.bsq + z ** 2 * self.csq + A)", "def lcs_length(s1, s2):\n n = len(s1)\n m = len(s2)\n matrix = [([0]*(m+1)) for i in xrange(n+1)]\n for i in xrange(1, n+1):\n for j in xrange(1, m+1):\n if(s1[i-1]==s2[j-1]):\n matrix[i][j] = matrix[i-1][j-1] + 1\n else:\n matrix[i][j] = max(matrix[i-1][j], matrix[i][j-1])\n return matrix[n][m]", "def __len__(self):\n return len(self.cumulative_length)", "def number_bits_in_cardinality(self,card):\n return 32 - self.count_lead_zs(card)", "def get_length(board):\n length = 0\n for i in range(n):\n for j in range(n):\n length += len(board[i][j])\n return length", "def collatz(n):\n if n==1:\n return [n]\n \n if n>1:\n seq = [n]\n while n>1:\n n = collatz_step(n)\n seq.append(n)\n\n if seq[-1]==1:\n return seq", "def bit_smarter(limit):\n c_lengths = {}\n\n for s in range(1, limit+1):\n c_lengths[s] = s_collatz_length(s, c_lengths)\n\n return max(c_lengths, key=lambda x: c_lengths[x])", "def _lcs_len(a, b):\n dp = _lcs_dp(a, b)\n return dp[-1][-1]", "def length(xyz, along=False):\n xyz = np.asarray(xyz)\n if xyz.shape[0] < 2:\n if along:\n return np.array([0])\n return 0\n dists = np.sqrt((np.diff(xyz, axis=0) ** 2).sum(axis=1))\n if along:\n return np.cumsum(dists)\n return np.sum(dists)", "def CDSlen(self):\n return sum(self.exonLengths)", "def vec_len(x):\r\n \r\n length = math.sqrt(x[0]**2 + x[1]**2)\r\n return length", "def cable_length(self):\n skel = self.physical_space(copy=False)\n\n v1 = skel.vertices[skel.edges[:,0]]\n v2 = skel.vertices[skel.edges[:,1]]\n\n delta = (v2 - v1)\n delta *= delta\n dist = np.sum(delta, axis=1)\n dist = np.sqrt(dist)\n\n return np.sum(dist)", "def _len_lcs(x, y):\n table = _lcs(x, y)\n n, m = len(x), len(y)\n return table[n, m]", "def _len_lcs(x, y):\n table = _lcs(x, y)\n n, m = len(x), len(y)\n return table[n, m]", "def collatz(n):\n sequence = []\n\n while n != 1:\n if n > 1:\n sequence = sequence + [n]\n n = collatz_step(n)\n elif n < 1:\n n = collatz_step(n)\n sequence = sequence + [n]\n break\n if n == 1:\n sequence = sequence + [n]\n return sequence\n print sequence", "def largest_cc_size(ugraph):\n ccomp = cc_visited(ugraph)\n if len(ccomp) == 0:\n return 0\n \n return max([len(s) for s in ccomp])", "def collatz_cycle(n):\n cycles = 1\n assert n > 0\n while(n > 1):\n\tif n < cache_size and cycle_table[n] != 0:\n\t cycles = cycles + cycle_table[n] - 1\n\t break\n\tif n % 2 == 0:\n\t n = n / 2\n\t cycles+= 1\n\telse:\n\t n = n + (n >> 1) + 1\n\t cycles+=2\n assert cycles > 0\n return cycles", "def _len_lcs(x, y):\n table = _lcs(x, y)\n n, m = len(x), len(y)\n return table[n, m]", "def n_cs(self):\n return np.size(self._cs, 0)" ]
[ "0.726585", "0.70003325", "0.6974426", "0.6440774", "0.64402974", "0.63847375", "0.6323994", "0.6320161", "0.6246694", "0.62032574", "0.6161653", "0.61555225", "0.61552596", "0.61382324", "0.61251944", "0.6114598", "0.6080297", "0.60793746", "0.6055634", "0.6054138", "0.60317475", "0.60266685", "0.6021647", "0.6008521", "0.6008521", "0.5998803", "0.597823", "0.597637", "0.5975393", "0.5962252" ]
0.80468637
0
Fibonacci numbers generator that yields position of the number and number itself.
def fibonacci_generator(): fib_prev = 0 # prev fib number fib_cur = 1 # next fib number i = 1 # number position while True: yield i, fib_cur i += 1 fib_prev, fib_cur = fib_cur, fib_prev + fib_cur
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def yieldFibonacci():\n yield 1\n a = 1\n b = 2\n while True:\n yield b\n a, b = b, a + b", "def fibonacci_gen(n=1):\n a, b = 0, 1\n while True:\n yield a\n a, b = b, (a + b) * n", "def fib(number: int) -> int:\n return next(islice(generator(number), number, number + 1))", "def fibonacci():\n\ta, b = 0, 1\n\tyield 0\n\twhile True:\n\t\ta, b = b, a + b\n\t\tyield a", "def fibonacci(n):\n a, b = 1, 1\n count = 0\n while count < n:\n yield a\n count += 1\n a, b = b, a+b", "def fibonacci():\n yield 0\n element = yield 1\n previous = element\n while element < 1e100:\n current = yield element\n element = previous + current\n if current > 1:\n previous = current\n\n return element", "def fibo_generator(count):\n try:\n if count <= 0:\n return\n a = 0\n b = 1\n yield a\n if count == 1:\n return\n yield b\n if count == 2:\n return\n for i in range(count - 2):\n c = a + b\n yield c\n a, b = b, c\n except TypeError:\n raise TypeError(\"Only integers allowed\")", "def fibonacci(n):\n a = 0\n b = 1\n counter = n\n placeholder = 1\n\n while(counter > 0):\n placeholder = a + b\n yield \"fib({})\".format(counter)\n a = b\n b = placeholder\n counter = counter - 1\n yield a\n return", "def t_fibonnaci():\n a = 1\n b = 1\n c = a + b\n while True:\n yield c\n a = b + c\n b = c + a \n c = a + b", "def fibonacci(a=1, b=2):\n while True:\n yield a\n a, b = b, b+a", "def fibonacci_iter(n):\n f = []\n for x in range(n + 1):\n if x == 0:\n f.append(x)\n elif x == 1:\n f.append(x)\n else:\n f.append(f[-1] + f[-2])\n return f[-1]", "def fibonacci() -> Iterator[int]:\n a, b = 0, 1\n while True:\n yield a\n a, b = b, a + b", "def fibonacci(num):\n sequence = [1, 1]\n for x in range(num-2):\n sequence.append(sequence[x] + sequence[x+1])\n return sequence", "def fibonacci(n: int):\n a, b, counter = 0, 1, 0\n while True:\n if (counter > n):\n return\n yield a\n a, b = b, a + b\n counter += 1", "def fibonacci(n):", "def fibonacci(n):\n a, b, counter = 0, 1, 0\n while True:\n if counter > n:\n return\n yield a\n a, b = b, a + b\n counter += 1", "def fibonacci(n):\n a = 0\n b = 1\n counter = 0\n while True:\n if (counter > n): return\n yield a\n a = b\n b = a + b\n counter += 1", "def fib(num, fib_0_1=(0, 1)):\n (fib_prev_prev, fib_prev) = fib_0_1 # fib(0), fib(1)\n\n for _ in range(num):\n fib_next, fib_prev = fib_prev, fib_prev_prev + fib_prev\n\n return fib_next", "def fibi(n):\n a, b = 0, 1\n for i in range(n):\n # fibonacci series is next no. is sum of previous two number.\n temp = a\n a = b\n # now nth fibonacci no. is sum of previous two number.\n b = temp+b\n # returning a because a changing each places\n return a", "def fibo(n):\n first = 0\n second = 1\n for i in range (1,n+1):\n if (i<=1): \n #begins sequence (terms 0 and 1 do not have two prior terms)\n newVal = i\n else:\n #continues sequence by adding the previous two numbers in the\n #sequence, and updating the variables\n newVal = first + second\n first = second\n second = newVal\n print(i,newVal)", "def fibonacci_inner_generator() -> Iterator[int]:\n yield 0\n yield 1\n fib1 = fibonacci_inner_generator()\n next(iter(fib1))\n yield from (f2 + f1 for f2, f1 in zip(fibonacci_inner_generator(), fib1))", "def fibonacci(number: int) -> int:\n fibs = [0] * (number + 2)\n fibs[0] = 0\n fibs[1] = 1\n for i in range(2, number + 1):\n fibs[i] = fibs[i - 1] + fibs[i - 2]\n return fibs[number]", "def fib(limit):\n a, b = 0, 1\n while a <= limit:\n yield a\n a, b = b, a + b", "def fibonacci(n):\n sequence = [0, 1]\n for i in range(n + 1):\n value = add(sequence[-2], sequence[-1])\n sequence.append(value)\n return sequence[n]", "def fibonacci(self=None):\n \"\"\"\"\"\"\n a, b = 0, 1\n while a < 10:\n print(a)\n a, b = b, a + b", "def get_fib(position):\n\n # Base Case: Positions greater thatn 0 or 1, since Fibonacci for 0 is 0 and\n # 1 is 1.\n if position == 0 or position == 1:\n return position\n\n return get_fib(position - 1) + get_fib(position - 2)", "def next_fib(f):\n for f in fib:\n i = fib.index(f)\n return f+fib[i-1]", "def fibonacci(a):\n fib = [1,1]\n x = 0\n i = 1\n while x < a:\n x = fib [i] + fib[i-1]\n i += 1\n fib.append(x)\n return i, fib", "def fib():\n x, y = 0, 1\n while True:\n yield x\n x, y = y, x + y", "def fibonacci(num):\n counter = 0\n\n # Start fibonacci\n sequence = [0, 1]\n while len(sequence) < num:\n n1 = sequence[counter]\n n2 = sequence[counter + 1]\n sequence.append(n1+n2)\n\n counter += 1\n\n return sequence" ]
[ "0.7914288", "0.78763527", "0.77993584", "0.7797588", "0.7692012", "0.76505595", "0.764296", "0.76354194", "0.76327753", "0.76297504", "0.75923735", "0.7549603", "0.75330853", "0.7509857", "0.7501625", "0.7491575", "0.7451226", "0.73421276", "0.73374224", "0.73136836", "0.73024064", "0.7301367", "0.72907495", "0.72627276", "0.72493106", "0.7238345", "0.7228261", "0.7222131", "0.72180474", "0.7197899" ]
0.8405769
0
Prime numbers generator that yields position of the number and number itself.
def prime_generator(): i = 0 # prime numbers counter num = 0 # current number while True: num += 1 if is_prime(num): i += 1 yield i, num
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_primes():\n\tyield 2\n\tyield 3\n\tprime_list = [2, 3]\n\twhile 1:\n\t\tnext = prime_list[-1] + 2\n\t\ti = 0\n\t\twhile i < len(prime_list):\n\t\t\tif next%prime_list[i] == 0:\n\t\t\t\tnext+=2\n\t\t\t\ti=0\n\t\t\telse:\n\t\t\t\ti+=1\n\t\tprime_list.append(next)\n\t\tyield next", "def gen_primes():\n\n n = 1\n while True:\n while not isPrime(n):\n n += 1\n\n yield n\n n += 1", "def get_primes(self, startnum=2):\n i = startnum\n while True:\n if self.is_prime(i):\n yield i\n i += 1", "def prime_generator() -> Iterator[int]:\n\n num = 2\n while True:\n if is_prime(num):\n yield num\n num += 1", "def generate():\n j = [2]\n i = 3\n while i:\n if is_prime(i):\n j.append(i)\n yield [j, j[-1]]\n i += 2", "def next_prime(n):\n i = 1\n known_prime = []\n while i < n:\n if is_prime(i, known_prime):\n known_prime.append(i)\n yield i\n i += 1", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def primes():\n yield 2\n found_primes = [2]\n a = 3\n while True:\n for p in found_primes:\n if p**2 > a:\n found_primes.append(a)\n yield a\n a += 2\n break\n elif a % p == 0:\n a += 2\n break", "def prime_gen():\n for i in memo_primes: yield i\n x = memo_primes[-1] + 1\n \n while True:\n if prime_with(x, memo_primes):\n yield x\n memo_primes.append(x)\n x += 1", "def nextPrime(self):\n\t\tnum = self.cur + 1\n\t\twhile not self.isPrime(num):\n\t\t\tnum += 1\n\t\tself.cur = num\n\t\tself.prev.append(num)\n\t\t# print num\n\t\treturn num", "def prime_generator() -> int:\n \n #Start with the first prime.\n counter = count(2)\n candidate = next(counter)\n cache: list = [candidate]\n yield candidate\n \n # Set a flag.\n divisible = False\n while True:\n candidate = next(counter)\n # Check if the candidate is prime.\n for number in cache:\n # If number is greater than the squareroot of candidate, we are done.\n if number * number > candidate:\n break\n # If number divides candidate, candidate is not prime.\n if candidate % number == 0:\n divisible = True\n break\n # If is is prime, add it to the list.\n if not divisible:\n cache.append(candidate)\n yield candidate\n # Reset the flag.\n divisible = False", "def prime_generator():\r\n for i in itertools.count(start=1):\r\n for j in ((6 * i) - 1, (6 * i) + 1):\r\n if is_prime(j): yield(j)", "def Primes():\n candidate = 1\n _primes_so_far = [2] # first prime, only even prime\n yield _primes_so_far[-1]\n while True:\n candidate += 2 # check odds only from now on\n for prev in _primes_so_far:\n if prev**2 > candidate:\n yield candidate\n _primes_so_far.append(candidate)\n break\n if not divmod(candidate, prev)[1]: # no remainder!\n break # done looping", "def primes():\n yield 2\n found = []\n for i in itertools.count(start=3, step=2):\n for p in found:\n if i % p == 0:\n break\n else:\n yield i\n found.append(i)", "def prime_numbers(upto):\n sieve = BitArray(upto + 1, 1)\n for number in xrange(2, upto + 1):\n if not sieve[number]:\n continue\n yield number\n for multiple in xrange(number ** 2, upto + 1, number):\n sieve[multiple] = 0\n return", "def print_next_prime(number):\n index = number\n while True:\n index += 1\n if is_prime(index):\n print(index)", "def prime_generator(num):\n prime_list = [i for i in range(1,num+1,2) if prime_checker(i)]\n\n if num > 1:\n prime_list.insert(0,2)\n\n return prime_list", "def get_primes_in(self, grange):\n for n in grange:\n if self.is_prime(n):\n yield n", "def next_p(a, b, primes):\n if b == 0:\n return primes\n elif is_prime(a):\n primes.append(a)\n return next_p(a + 2, b - 1, primes)\n else:\n return next_p(a + 2, b, primes)", "def next(self):\n nextPrime = None\n i = self.lastPrime+2\n while nextPrime is None:\n sqrt_i = math.sqrt(i)\n isPrime = True\n for p in self.primes:\n if i%p == 0:\n isPrime = False\n i += 2\n break\n if p > sqrt_i:\n break\n if isPrime:\n nextPrime = i\n self.primes.append(nextPrime)\n self.lastPrime = nextPrime\n return nextPrime", "def primes():\n yield 1\n primes = []\n for n in itertools.count(2):\n if not any(n % p == 0 for p in primes):\n # No divisor found among previous primes\n yield n\n primes.append(n)", "def test_prime_2(self):\n\t self.assertTrue(prime_generator(2), [2])", "def test_prime_10(self):\n\t self.assertTrue(prime_generator(10), [2, 3, 5, 7])", "def get_primes_over(limit):\n candidate = 1000000\n count = 0\n while count < limit:\n if is_prime(candidate):\n yield candidate\n count += 1\n candidate += 1\n else:\n candidate += 1", "def prime():\n prime_set = {2} # Set of prime numbers that have been found\n yield 2 # First prime\n for x in itertools.count(3, 2): # Check odd numbers, starting with 3\n primes_below_sqrt = {i for i in prime_set if i <= sqrt(x)} \n for prime in primes_below_sqrt:\n if x % prime == 0:\n break # x is divisible by a prime factor, so it is not prime\n else:\n prime_set.add(x) # x has been shown to be prime\n yield x", "def nth_prime(n):\n # Tweaked version of the itertools nth recipe\n return next(islice(generate_primes(), n-1, None), None)", "def get_primes(lower: int, upper: int) -> typing.Generator[int, None, None]:\r\n for num in range(lower, upper + 1):\r\n if num > 1:\r\n for i in range(2, int(math.sqrt(num)) + 1):\r\n if num % i == 0:\r\n break\r\n else:\r\n yield num", "def iter_coords(start, num):\n yield start\n odd = num % 2 == 1\n bound = (num + 2) // 2\n for i in range(1, bound):\n yield (start + i) % (num + 1)\n yield (start - i) % (num + 1)\n if odd:\n yield (start + bound) % (num + 1)", "def primes(n):\n\tsieve = [True] * n\n\tyield 2\n\tfor i in xrange(3,int(n**0.5)+1,2):\n\t\tif sieve[i]:\n\t\t\tyield i\n\t\t\tsieve[i*i::2*i] = [False]*((n-i*i-1)/(2*i)+1)\n\tfor i in xrange(i+2,n,2):\n\t\tif sieve[i]: yield i" ]
[ "0.7023231", "0.70171297", "0.69697934", "0.6942526", "0.6920672", "0.6914686", "0.67672896", "0.67672896", "0.67569935", "0.66785425", "0.6669284", "0.66367215", "0.6618157", "0.65434796", "0.64823705", "0.6432703", "0.6387829", "0.6363819", "0.6362022", "0.63597316", "0.630817", "0.63076204", "0.63040173", "0.6289798", "0.62868434", "0.628431", "0.6279324", "0.6275577", "0.62522244", "0.62464625" ]
0.7427608
0
Sets the Group the Member is in. Needs to be set so tab order works properly.
def set_group(self, group): self._group = group
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_group(self, group: str) -> None:\n self.group = group", "def group(self, group):\n self._group = group", "def set_group(self, address, group):\n self.groups[address] = group", "def setGroup(self, group):\n\t\tself.config.GROUP = group", "def group(self, val):\n self.set_property(\"Group\", val)", "def group(self, group):\n\n self._group = group", "def group(self, group):\n\n self._group = group", "def group(self, group):\n\n self._group = group", "def set_group(self, id_: str, player: str, group: list):\n self._groups[id_] = {\n 'player': player,\n 'group': group\n }", "def grp(self, grpNode):\n\t\tself._grp = grpNode", "def with_group(self, group):\n\t\tself.variables['group'] = group\n\t\treturn self", "def set_group_name(self, name):\n self.groupname = name", "def _set_group_name(self):\n self._scene_gen.group_name = self._group_name_le.text()\n self._refresh_view()", "def set_group(self, group):\n # Implemented from template for osid.resource.ResourceForm.set_group_template\n if self.get_group_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_boolean(group):\n raise errors.InvalidArgument()\n self._my_map['group'] = group", "def setGatingGroup(self, channel, group, unitCode=0):\n resp = self.XAPCommand('GRPSEL', channel, group, unitCode=unitCode)\n return resp", "def set_group(self, bot, update, args):\n username = str(update.message.from_user['username'])\n chat_id = str(update.message.from_user['id'])\n\n try:\n group_name = self.format_group(str(args[0]))\n\n if self.is_group(group_name):\n self.user_db.add_new_user(username, group_name, chat_id)\n bot.send_message(update.message.chat_id,\n 'Расписание для группы *{}* успешно установлено!\\n'\n '/today\\n'\n '/tomorrow\\n'\n '/week\\n'\n '/nextweek\\n'\n '/full\\n'\n '/timetable\\n'\n '/keyboard\\n'.format(group_name),\n parse_mode='Markdown')\n else:\n raise Exception(\"Group is not exists.\")\n except (Exception, IndexError):\n bot.send_message(update.message.chat_id,\n 'Группы с таким именем не существует, проверьте корректность введенного имени.',\n parse_mode='Markdown')", "def group(self, group):\n self.proxy_group = group\n return self", "def set_group(self, name, members):\n if name in self._groups:\n return self._groups[name].add_members(members)\n self._groups[name] = BUIaclGroup(name, members)\n return self._groups[name].members", "def set_group_name(self, name):\n params = [('groupname', name, 'cdata')]\n\n self.get(COMMAND_UIC, 'SetGroupName', params)", "def member(self, member):\r\n return GroupMember(self, member)", "def set_group(group_name):\n group_config = env.groups[group_name]\n set_role_defs(\n web=group_config['servers'][WEB_ROLE],\n db=group_config['servers'][DB_ROLE],\n )\n env.branch = group_config['branch']\n env.subdomain = group_config.get('subdomain', 'www')", "def instance_group(self, instance_group):\n if instance_group is None:\n raise ValueError(\"Invalid value for `instance_group`, must not be `None`\")\n\n self._instance_group = instance_group", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def setNetGroup(addr): #status: Done, not tested\r\n pass", "def set(self, name_group, key, value):\n self.psettings.beginGroup(name_group)\n self.psettings.setValue(key, value)\n self.closeGroup()" ]
[ "0.7711096", "0.7326087", "0.72447264", "0.71994495", "0.709708", "0.7083411", "0.7083411", "0.7083411", "0.6929075", "0.68464977", "0.68303144", "0.67630804", "0.6596073", "0.65727514", "0.65307826", "0.65190583", "0.65161115", "0.65015715", "0.6412089", "0.6363025", "0.63585263", "0.6335413", "0.6300008", "0.6300008", "0.6300008", "0.6300008", "0.6300008", "0.6300008", "0.6280312", "0.6254653" ]
0.76793545
1
Gets the Group this Member is in. The Group this Member is in.
def get_group(self): return self._group
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getGroup(self):\n\t\treturn self.Group", "def get_group(self, group_name):\n\n return self._group[group_name]", "def group(self):\n return self._group", "def group(self):\n return self._group", "def group(self):\n return self._group", "def get_group(self) -> Optional[str]:\n return self.group", "def getGroup(self, group_id: int) -> 'Group':\n return self.sObj.getGroup(group_id)", "def _get_group(self):\n if self.resource.group is not None:\n try:\n return grp.getgrnam(self.resource.group).gr_gid\n except KeyError:\n raise error.InvalidGroup()", "def get_group(self):\n\t\treturn self.variables.get('group')", "def get_group(self, group_id):\n\t\treturn Group(group_id, self.user_id, self.site_id)", "def get_group(self, wanted_group):\n if self.group_file:\n return self._get_group_from_file(wanted_group)\n return self._get_group_from_host(wanted_group)", "def get_group(self, group_id):\n return self.root.get(group_id)", "def group(self):\n return self.properties.get('Group', None)", "def get_group(self, uuid):\n return Group.deserialize(self._get_single('groups', {'uuid': uuid}))", "def get_group(self, id: utils.Intable) -> Group | None:\n steam_id = SteamID(id=id, type=Type.Chat)\n return self._connection.get_group(steam_id.id)", "def group_id(self):\n return self._group_id", "def group_id(self):\n return self._group_id", "def group_id(self):\n return self._group_id", "def get_group_name(self):\n return self.groupname", "def getGroupByName(self, name):\n for group in self.groups:\n if name == group.name:\n return group\n\n return None", "def get(self):\n self._group = self._client.get(\n url=self._client.get_full_url(\n self.get_path(\n 'single', realm=self._realm_name, group_id=self._group_id\n )\n )\n )\n self._group_id = self._group[\"id\"]\n return self._group", "def group(self) -> str:\n return pulumi.get(self, \"group\")", "def get_current_grp():\n return get_group_grp(os.getgid())", "def group(self) -> Optional[str]:\n return pulumi.get(self, \"group\")", "def get_group(username: str) -> Group:\n return grp.getgrnam(username)", "def GroupId(self):\n\t\treturn self._get_attribute('groupId')", "def group(self):\n if iswin32:\n raise NotImplementedError(\"XXX win32\")\n import grp\n\n entry = error.checked_call(grp.getgrgid, self.gid) # type:ignore[attr-defined]\n return entry[0]", "def getGroup(self, *args):\n return _libsbml.GroupsModelPlugin_getGroup(self, *args)", "def get_group(name, parent):\n result = parent.findGroup(name)\n if result is None:\n result = parent.addGroup(name)\n return result", "def get_group(self, group_id: str) -> dict:\n group = self.ms_client.http_request(method='GET', url_suffix=f'groups/{group_id}')\n return group" ]
[ "0.7794998", "0.7636299", "0.7622602", "0.7622602", "0.7622602", "0.75684416", "0.74724185", "0.73655677", "0.7326051", "0.71922463", "0.71530706", "0.7129704", "0.7088261", "0.70684534", "0.70625037", "0.70310026", "0.70310026", "0.70310026", "0.70285153", "0.7016255", "0.6974442", "0.69635016", "0.69531995", "0.6946419", "0.69429946", "0.69335407", "0.6925994", "0.6918707", "0.6918668", "0.69144475" ]
0.8379241
0
Sets the nickname of the Member
def set_nickname(self, nickname): if len(nickname) > globals.MAX_NICKNAME_LENGTH: nick = nickname[0:globals.MAX_NICKNAME_LENGTH-3]+"..." else: nick = nickname self._nickname.set_message(nick)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nickname(self, new_nickname):\r\n self.set({\"nickname\": new_nickname})", "async def nickname(self, ctx, *, nickname=\"\"):\n # [p]set nickname <nickname>\n\n nickname = nickname.strip()\n if nickname == \"\":\n nickname = None\n try:\n await self.bot.change_nickname(ctx.message.server.me, nickname)\n await self.bot.say(\"Done.\")\n except discord.Forbidden:\n await self.bot.say(\"I cannot do that, I lack the \"\n \"\\\"Change Nickname\\\" permission.\")", "def nickname(self, nickname):\n if nickname is None:\n raise ValueError(\"Invalid value for `nickname`, must not be `None`\") # noqa: E501\n\n self._nickname = nickname", "def set_nick(self, nick):\n raise NotImplementedError", "async def nick(self, ctx, *, nickname):\n if len(nickname) > 32:\n await ctx.send(\"Nickname must be 32 characters or fewer\")\n return\n await ctx.me.edit(nick=nickname)\n await ctx.send(f\"Nickname changed to {nickname}\")", "async def nick(\n self, context: Context, user: discord.User, *, nickname: str = None\n ) -> None:\n member = context.guild.get_member(user.id) or await context.guild.fetch_member(\n user.id\n )\n try:\n await member.edit(nick=nickname)\n embed = discord.Embed(\n description=f\"**{member}'s** new nickname is **{nickname}**!\",\n color=0x9C84EF,\n )\n await context.send(embed=embed)\n except:\n embed = discord.Embed(\n description=\"An error occurred while trying to change the nickname of the user. Make sure my role is above the role of the user you want to change the nickname.\",\n color=0xE02B2B,\n )\n await context.send(embed=embed)", "def setName(self, newName):\n self.__username = newName", "def set_nickname(self, nickname):\n self.nickname = nickname\n self.tweets_list = TweetsLinkedList(nickname)\n self.tweets_list.create_linked()", "async def nick(self, context: SlashContext, user: discord.User, nickname: str = None):\n author = await context.guild.fetch_member(context.author_id)\n if not author.guild_permissions.manage_nicknames:\n embed = discord.Embed(\n title=\"Error!\",\n description=\"You don't have enough permissions to change the nickname of this user.\",\n color=0xE02B2B\n )\n return await context.send(embed=embed)\n member = await context.guild.fetch_member(user.id)\n try:\n await member.edit(nick=nickname)\n embed = discord.Embed(\n title=\"Changed Nickname!\",\n description=f\"**{member}'s** new nickname is **{nickname}**!\",\n color=0x42F56C\n )\n await context.send(embed=embed)\n except:\n embed = discord.Embed(\n title=\"Error!\",\n description=\"An error occurred while trying to change the nickname of the user. Make sure my role is above the role of the user you want to change the nickname.\",\n color=0xE02B2B\n )\n await context.message.channel.send(embed=embed)", "def add_nickname(self, nickname):\n if 'Nicknames' not in self.properties:\n self.properties['Nicknames'] = []\n if (len(self.properties['Nicknames']) == 1 and self.properties['Nicknames'][0].startswith('Temp')):\n self.properties['Nicknames'][0] = nickname.title()\n else:\n self.properties['Nicknames'].append(nickname.title())", "async def update_nickname(guild_id):\n user_id = await token_check()\n await guild_check(user_id, guild_id)\n\n j = validate(await request.get_json(), {\n 'nick': {'type': 'nickname'}\n })\n\n nick = j['nick'] or None\n\n await app.db.execute(\"\"\"\n UPDATE members\n SET nickname = $1\n WHERE user_id = $2 AND guild_id = $3\n \"\"\", nick, user_id, guild_id)\n\n member = await app.storage.get_member_data_one(guild_id, user_id)\n member.pop('joined_at')\n\n # call pres_update for nick changes, etc.\n await app.dispatcher.dispatch(\n 'lazy_guild', guild_id, 'pres_update', user_id, {\n 'nick': j['nick']\n })\n\n await app.dispatcher.dispatch_guild(guild_id, 'GUILD_MEMBER_UPDATE', {**{\n 'guild_id': str(guild_id)\n }, **member})\n\n return j['nick']", "async def _nick(self, nick: str) -> str:\n\n logger.debug(f\"Setting nick to {nick!r}\")\n\n self._target_nick = nick\n\n reply = await self._connection.send(\"nick\", {\"name\": nick})\n data = self._extract_data(reply)\n\n new_nick = data[\"to\"]\n self._target_nick = new_nick\n\n if self._session is not None:\n self._session = self._session.with_nick(new_nick)\n\n logger.debug(f\"Set nick to {new_nick!r}\")\n\n return new_nick", "def change_username(self, name):\n self.username = name", "def update_nick(self, nick):\n if self.nick == nick:\n return\n\n # Update the nick hashmap\n if self.nick:\n self.users.nick_hashmap[self.nick].remove(self)\n self.users.nick_hashmap[nick].append(self)\n\n LOG.info(\"Updating user nick: {} -> {}\".format(self.nick, nick))\n\n self.nick = nick\n\n self.users.modified_callback()", "def set_username(self, value):\n self.username = value", "async def change_nick(self, member: discord.Member, nick_chosen: str):\n try:\n await self.bot.change_nickname(member, nick_chosen)\n await self.bot.say(\"{0} nickname successfully changed to '{1}'\".format(member, nick_chosen))\n except discord.HTTPException:\n await self.bot.say(\"[ERROR:HTTPException] {0.name} has not enough permissions.\".format(self.bot.user))", "async def set_nick(\n client,\n event,\n user: ('user', 'Who\\'s?'),\n nick: P(str, 'Their new nick', min_length = 1, max_length = 32) = None,\n):\n yield\n await client.user_guild_profile_edit(event.guild, user, nick=nick)\n yield f'{user:f}\\'s nick has been updated'", "def _switch_nick(self):\n self.nickname = self.firstnick + str(random.randint(1000, 9999))\n self._log(self.botlog, 'Switching to nick %s' % self.nickname)\n self._send('NICK %s' % self.nickname)", "def add_nickname(self, name):\n if not(name in self.nicknames):\n self.nicknames.append(name)", "def set_username(self, value):\n raise NotImplementedError('set_username')", "def change_nick(self, before, after):\n userdata = self.users[irc.strings.lower(before)]\n self.del_user(before)\n self.add_user(after, userdata)", "def nickname(self):\r\n if \"nickname\" in self.data:\r\n return self.data[\"nickname\"]\r\n return None", "def rename(self,newName):\n self.userName = newName", "async def name(self, ctx, *, name):\n # [p]set name <name>\n\n name = name.strip()\n if name != \"\":\n try:\n await self.bot.edit_profile(username=name)\n except:\n await self.bot.say(\"Failed to change name. Remember that you\"\n \" can only do it up to 2 times an hour.\"\n \"Use nicknames if you need frequent \"\n \"changes. {}set nickname\".format(ctx.prefix))\n else:\n await self.bot.say(\"Done.\")\n else:\n await send_command_help(ctx)", "async def assign_clan(self, ctx, user : discord.Member, *, clanname=\"\"):\r\n nickname = '[{}] {}'.format(clanname.strip(), user.name)\r\n if clanname == \"\":\r\n nickname = None\r\n try:\r\n await self.bot.change_nickname(user, nickname)\r\n await self.bot.say(\"Done.\")\r\n except discord.Forbidden:\r\n await self.bot.say(\"I cannot do that, I lack the \"\r\n \"\\\"Manage Nicknames\\\" permission.\")", "def set_name(net_id, name):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"UPDATE Member SET name='\"+name+\"' WHERE netID='\"+net_id+\"'\"\n cursor.execute(sql_string)\n connection.commit()", "async def massnick(ctx, nickname: str):\n server = ctx.message.server\n counter = 0\n for user in server.members:\n if user.nick is None:\n nickname = \"{} {}\".format(nickname, user.name)\n else:\n nickname = \"{} {}\".format(nickname, user.nick)\n try:\n await bot.change_nickname(user, nickname)\n except discord.HTTPException:\n counter += 1\n continue\n await bot.say(\"Finished nicknaming server. {} nicknames could not be completed.\".format(counter))", "def sendnick(self):\n self._send(\"NICK %s\" % (CONFIG[\"nick\"]))", "def change_nick_in_room(self, login, room, nick):\n pass", "def set_wife_name(self, w, line_number=0):\n self.wife_name = w\n self._wife_name_life = line_number" ]
[ "0.8081207", "0.793872", "0.77112406", "0.7681544", "0.74439347", "0.71882993", "0.7137291", "0.70793533", "0.7025866", "0.6879157", "0.6874425", "0.687341", "0.6866829", "0.67659074", "0.6725574", "0.6692538", "0.6663959", "0.66156805", "0.65894735", "0.6530449", "0.65269184", "0.6456158", "0.64387673", "0.6427786", "0.64232296", "0.6415986", "0.6415528", "0.63537276", "0.6352672", "0.63262826" ]
0.81594807
0
Gets the nickname of a Member
def get_nickname(self): return self._nick
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nickname(self):\r\n if \"nickname\" in self.data:\r\n return self.data[\"nickname\"]\r\n return None", "def get_display_name(member):\n if member.nick is None:\n name = member.name\n else:\n name = member.nick\n if User.objects.get(id=member.id).is_ironman:\n name += ' (IM)'\n return name", "def find_member(message, nickname):\n for member in message.guild.members:\n if nickname in member.display_name:\n return member", "def get_nick(infraction_id: int, member_id: int) -> str:\n rng = random.Random(str(infraction_id) + str(member_id))\n return rng.choice(STAR_NAMES)", "def get_black_player_nickname(self, obj):\n return obj.black_player.nickname", "def get_nickname_for_user(cls, user):\n return cls.get_account_for_user(user).nickname", "def nickname(self):\n if (self.__email and self.__auth_domain and\n self.__email.endswith('@' + self.__auth_domain)):\n suffix_len = len(self.__auth_domain) + 1\n return self.__email[:-suffix_len]\n else:\n return self.__email", "def get_white_player_nickname(self, obj):\n return obj.white_player.nickname", "async def update_nickname(guild_id):\n user_id = await token_check()\n await guild_check(user_id, guild_id)\n\n j = validate(await request.get_json(), {\n 'nick': {'type': 'nickname'}\n })\n\n nick = j['nick'] or None\n\n await app.db.execute(\"\"\"\n UPDATE members\n SET nickname = $1\n WHERE user_id = $2 AND guild_id = $3\n \"\"\", nick, user_id, guild_id)\n\n member = await app.storage.get_member_data_one(guild_id, user_id)\n member.pop('joined_at')\n\n # call pres_update for nick changes, etc.\n await app.dispatcher.dispatch(\n 'lazy_guild', guild_id, 'pres_update', user_id, {\n 'nick': j['nick']\n })\n\n await app.dispatcher.dispatch_guild(guild_id, 'GUILD_MEMBER_UPDATE', {**{\n 'guild_id': str(guild_id)\n }, **member})\n\n return j['nick']", "def get_member(self, name):\n members = self.wls_board.get_members()\n for member in members:\n if name in member.full_name:\n return member\n return 'None'", "def get_current_nick(self, login, room):\n pass", "def get_nickname_for_email(cls, email, default=None):\n account = cls.get_account_for_email(email)\n if account is not None and account.nickname:\n return account.nickname\n if default is not None:\n return default\n return email.replace('@', '_')", "def get_full_name(self):\n return self.username", "def get_full_name(self):\n return self.username", "def get_author_nickname(self, attribute_name, default=None):\n return getattr(self, '%s__author_nickname' % attribute_name, default)", "def name(self) -> str:\n try:\n return self.stats[\"Player name\"]\n except KeyError as ke:\n logger.debug(ke, exc_info=True)\n logger.warn(\"unable to get player name\")\n return \"\"", "def getMember(unique_name):", "def getMember(unique_name):", "def getUserName(self):\n user = User.by_id(self.user_id)\n return user.name", "def mail_nickname(self):\n if \"mailNickname\" in self._prop_dict:\n return self._prop_dict[\"mailNickname\"]\n else:\n return None", "def mail_nickname(self):\n if \"mailNickname\" in self._prop_dict:\n return self._prop_dict[\"mailNickname\"]\n else:\n return None", "def get_user(self, username: str) -> Optional[discord.Member]:\n for m in self.guild.members:\n if utils.istrcmp(m.display_name, username):\n return m\n return None", "def getName(self):\n return self.__username", "def __str__(self):\n return self.nickname", "def group_nickname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_nickname\")", "def get_username(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip()[0:1])\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()", "def nickname():\n return jsonify(name=getRandomLine(nickNamesFile))", "def get_name(self):\n return self.user.username if self.user.username else self.user.email", "def getMemberFromName(self, name):\n for member in self.playersAndRoles:\n if name in member.user.name:\n return member", "def get_nickname(image):\r\n extension = len(image.split('/')[-1:][0].split('.')[-1:][0])\r\n return image.split('/')[-1:][0][:-extension-1]" ]
[ "0.7942136", "0.7683267", "0.7238209", "0.7235817", "0.7205748", "0.71453834", "0.6900149", "0.68039113", "0.6789344", "0.6717712", "0.6629654", "0.6436516", "0.64070326", "0.64070326", "0.63944316", "0.6376947", "0.63725275", "0.63725275", "0.6366259", "0.6364886", "0.6364886", "0.63646626", "0.6315764", "0.63099045", "0.6298766", "0.62912667", "0.62567776", "0.62459826", "0.6242281", "0.62369937" ]
0.8068357
0
Sets the status of the Member (offline, online, away, busy) offline (red) Unable to talk to Member. online (Green) Able to talk to Member. away (Grey) Able to send messages to Member, but don't expect a response. busy (Yellow) Member is there, but will not likely respond.
def set_status(self, status): if status == "offline": self._status.set_message("N") self._status.set_foreground_color("red") elif status == "online": self._status.set_message("Y") self._status.set_foreground_color("Green") elif status == "away": self._status.set_message("A") self._status.set_foreground_color("Grey") elif status == "busy": self._status.set_message("B") self._status.set_foreground_color("Yellow")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def set_status(self, ctx, *, status: str = \"online\"):\n\n try:\n status = discord.Status[status.lower()]\n except KeyError:\n await ctx.error(\"Invalid Status\", \"Only `online`, `idle` or `dnd` statuses are available.\")\n else:\n await self.bot.change_presence(status=status, activity=ctx.me.activity)\n await ctx.success(f\"Status changed to {status}.\")", "async def status(self, ctx, *, status=None):\n # [p]set status <status>\n\n statuses = {\n \"online\": discord.Status.online,\n \"idle\": discord.Status.idle,\n \"dnd\": discord.Status.dnd,\n \"invisible\": discord.Status.invisible\n }\n\n server = ctx.message.server\n\n current_game = server.me.game if server is not None else None\n\n if status is None:\n await self.bot.change_presence(status=discord.Status.online,\n game=current_game)\n await self.bot.say(\"Status reset.\")\n else:\n status = statuses.get(status.lower(), None)\n if status:\n await self.bot.change_presence(status=status,\n game=current_game)\n await self.bot.say(\"Status changed.\")\n else:\n await send_command_help(ctx)", "async def status(self, ctx:utils.Context, status:str):\n\n status_o = getattr(discord.Status, status.lower())\n await self.bot.change_presence(activity=self.bot.guilds[0].me.activity, status=status_o)", "def SetStatus(self, status):\r\n self.status = status", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def updateBuddy(self,username,online,evilness,signontime,idletime,userclass,away):\n print \"status changed for\",username", "def set_online_status(self, status):\r\n name = \"label\"\r\n label = self.label\r\n label_sign = self.lbl_online\r\n text = [\"ONLINE\", \"OFFLINE\"]\r\n if status:\r\n label.setStyleSheet(\"#label{color: green;}\")\r\n label.setText(text[0])\r\n pixmap = QPixmap(os.path.abspath(os.path.join(self.app.path, \"PySkeletonViewer\", \"images\", \"green_dot.png\")))\r\n else:\r\n label.setStyleSheet(\"#\"+name+\"{color: red;}\")\r\n label.setText(text[1])\r\n pixmap = QPixmap(os.path.abspath(os.path.join(self.app.path, \"PySkeletonViewer\", \"images\", \"red_dot.png\")))\r\n image = pixmap.scaled(QSize(30, 30))\r\n label_sign.setPixmap(image)", "def set_status(self, status: str) -> None:\n\n try:\n self.status = Buddy.status_map[status.lower()]\n except KeyError:\n self.status = status", "def UpdateStatus(self, status):\r\n self.status.update(status)", "def change_status(self):\n if self.status == \"Still Loaned\":\n self.status = \"Given Back\"\n else:\n self.status = \"Still Loaned\"", "def _update(self):\n path = \"/members/%s\" % self._dict['member_id']\n data = self.extract()\n if self._dict['member_status_id'] in (\n MemberStatus.Active, MemberStatus.Error, MemberStatus.OptOut):\n data['status_to'] = self._dict['member_status_id']\n if not self.account.adapter.put(path, data):\n raise ex.MemberUpdateError()", "async def change_status():\n await client.change_presence(activity=discord.Game(next(appearance.status)))", "def updateStatus(self, status):\n pass", "def _set_status(self, status):\n with self.status_lock:\n if (status in _ENDING_STATUSES) or (not self.status in _ENDING_STATUSES):\n self.status = status", "async def status(self, ctx, *, status_type: str.lower):\n if status_type == \"clear\":\n self.bot.config.remove(\"status\")\n await self.bot.config.update()\n await self.set_presence()\n embed = Embed(title=\"Status Removed\", color=self.bot.main_color)\n return await ctx.send(embed=embed)\n status_type = status_type.replace(\" \", \"_\")\n\n status, msg = (\n await self.set_presence(status_identifier=status_type, status_by_key=True)\n )[\"status\"]\n if status is None:\n raise commands.MissingRequiredArgument(SimpleNamespace(name=\"status\"))\n\n self.bot.config[\"status\"] = status.value\n await self.bot.config.update()\n\n embed = Embed(\n title=\"Status Changed\", description=msg, color=self.bot.main_color\n )\n return await ctx.send(embed=embed)", "def setstatus(self, status):\n with self.lock:\n self.status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def set_status(self, pokemon, status, setter):\n assert not pokemon.is_fainted()\n assert setter is None or isinstance(setter, BattlePokemon)\n\n if status is Status.SLP and any(teammate.status is Status.SLP and not teammate.is_resting\n for teammate in pokemon.side.team\n if teammate is not pokemon):\n if __debug__: log.i('Sleep Clause Mod!')\n return FAIL\n\n if pokemon.status is not None or pokemon.is_immune_to(status):\n if __debug__: log.i('Failed to set status %s: ' % status +\n ('%%s is already statused (%s)' % pokemon.status\n if pokemon.status is not None else\n '%s is immune') % pokemon)\n return FAIL\n\n for effector in (pokemon, pokemon.side, self.battlefield):\n if effector.activate_effect('on_set_status',\n status, pokemon, setter, self, failfast=True) is FAIL:\n return FAIL\n\n pokemon.status = status\n pokemon.set_effect(STATUS_EFFECTS[status](pokemon))\n pokemon.activate_effect('on_after_set_status', status, pokemon, setter, self)", "def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return", "def update_status(self):\n num_nbrs = len(self.neighbors)\n if not 2 <= num_nbrs <= 3:\n self.status = 0\n elif num_nbrs == 3:\n self.status = 1", "def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True" ]
[ "0.7392773", "0.735482", "0.68975866", "0.6805247", "0.66294175", "0.6618874", "0.6618874", "0.6618874", "0.6602507", "0.66011685", "0.6540653", "0.64994997", "0.6472553", "0.64587176", "0.6445178", "0.6426996", "0.63954735", "0.6344724", "0.6343002", "0.63307667", "0.63307667", "0.63307667", "0.63307667", "0.63307667", "0.63307667", "0.63307667", "0.6285622", "0.627905", "0.6271458", "0.6268006" ]
0.7881072
0
Sets the width of the border.
def set_border_width(self, width): self._border.set_border_width(width) self._border_width = width
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fl_set_border_width(borderwidth):\n _fl_set_border_width = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_border_width\",\\\n None, [cty.c_int],\\\n \"\"\"void fl_set_border_width(int bw)\"\"\")\n i_borderwidth = library.convert_to_intc(borderwidth)\n library.keep_elem_refs(borderwidth, i_borderwidth)\n _fl_set_border_width(i_borderwidth)", "def change_width(self, value):\n self.layer.edge_width = value\n self.widthSpinBox.clearFocus()\n self.setFocus()", "def set_width(self, width):\n self.width = width", "def set_border(self, color: tuple = (0, 0, 0, 255), width: int = 1):\n self.border_color = color\n self.border = width", "def set_width(self, width):\n self.__width = width", "def SetWidth(self, w):\r\n\r\n self._width = w", "def setWidth(self, width):\n if not self._width:\n self._width = int(width)", "def setWidth(self, width):\n self._reconfig(\"width\", width)", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def border_width(self):\n if self.has_border:\n return self._border_actor.GetProperty().GetLineWidth()\n return 0", "def SetWidth(self, width):\r\n\r\n self._width = width\r\n return self", "def set_width( self, width ):\n # label seems to be the controlling thing\n self.label_widget.configure( width = width )", "def width(self, width):\n\n self._width = width", "def width(self, width):\n\n self._width = width", "def width(self, width):\n\n self._width = width", "def set_line_width(self, val):\n self.lwidth = val", "def set_left_border(self, val):\n self.lborder = val", "def width(self, value: int):\n self.tk_ref.geometry(f'{value}x{self.height}')", "def set_central_border(self, width):\n self._center_widget.setFrameShape(QFrame.Box)\n self._center_widget.setLineWidth(width)", "def width(self, value):\n self.validate_input(width=value)\n self.__width = value" ]
[ "0.79277027", "0.74946475", "0.74399734", "0.73794276", "0.7376792", "0.7335927", "0.7126565", "0.70414203", "0.69472903", "0.69472903", "0.69472903", "0.69472903", "0.69472903", "0.69472903", "0.69472903", "0.69472903", "0.69472903", "0.69472903", "0.69472903", "0.69172925", "0.68967795", "0.68906826", "0.6834647", "0.6834647", "0.6834647", "0.68096", "0.6803082", "0.6792365", "0.6724652", "0.6694705" ]
0.8855588
0
operator < (overloaded for sorting) True/False First based on Status, then on nickname (alphebitical decending).
def __lt__(self, other): status = self.get_status() Ostatus = other.get_status() if status == Ostatus: return self.get_nickname() < other.get_nickname() if status == "online": return True elif status == "away" and Ostatus != "online": return True elif status == "busy" and Ostatus not in ["online", "away"]: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __lt__(self, other):\n return self.first_name < other.first_name", "def __lt__(self, value):\n return self.name < value.name", "def __lt__(self, other):\r\n print 'eaating shit from Person'\r\n if self.lastName == other.lastName :\r\n return self.name < other.name\r\n return self.lastName < other.lastName", "def __lt__(self,other):\r\n\t\treturn self.rank() < other.rank()", "def __lt__(self, other):\r\n if self.lastName == other.lastName:\r\n return self.name < other.name\r\n return self.lastName < other.lastName", "def __lt__(self, other):\n if self.last_name == other.last_name:\n return self.name < other.name\n return self.last_name < other.last_name", "def __lt__(self, other):\n if self.lastName == other.lastName:\n return self.name < other.name\n return self.lastName < other.lastName", "def __lt__(self, other):\n if self.lastName == other.lastName:\n return self.name < other.name\n return self.lastName < other.lastName", "def __lt__(self, other):\n if self.lastName == other.lastName:\n return self.name < other.name\n return self.lastName < other.lastName", "def __lt__(self, other):\n if self.lastName == other.lastName:\n return self.name < other.name\n return self.lastName < other.lastName", "def __lt__(self, other):\n if self.lastName == other.lastName:\n return self.name < other.name\n return self.lastName < other.lastName", "def __lt__(self, other):\n # If total amount is strictly less than, sort by it\n if self.total_donations() < other.total_donations():\n return True\n elif self.total_donations() == other.total_donations(): # Otherwise, sort by last name\n return self.name.split()[-1] < other.name.split()[-1]\n else:\n return False", "def __lt__(self, rs):\n Number.comparisons += 1\n result = self.data < rs.data\n return result", "def __lt__(self, other):\n return self.name < other.name", "def __lt__(self, other):\n return int(self.rank) < int(other.rank)", "def __lt__(self, rhs):\n return self.balance < rhs.balance", "def __lt__(self, other):\n return (self.timestamp < other.timestamp or\n self.timestamp_desc < other.timestamp_desc)", "def __lt__(self, other) -> bool:\r\n if SCHEDULING_TYPE == 'SJF':\r\n return self.job_size < other.job_size\r\n elif SCHEDULING_TYPE == 'Priority':\r\n return self.priority < other.priority\r\n elif SCHEDULING_TYPE == 'FIFO':\r\n return self.job_id < other.job_id", "def __lt__(self, other):\n if (self.name < other.name):\n return \"Less Than\"\n else:\n return \"Not less than\"", "def __lt__(self,other):\n if self. time2run < other.time2run:\n return True;\n if self.prio < other.prio:\n if self. time2run == other.time2run:\n return True\n return False", "def __lt__(self: \"Status\", other: \"Status\") -> bool:\n self_type = type(self)\n other_type = type(other)\n both_not_in_progress = not self.in_progress and not other.in_progress\n\n if both_not_in_progress and self_type is other_type:\n return False\n elif self_type is Failed:\n return True\n elif self_type is NotStarted and other_type in (InProgress, Succeeded):\n return True\n elif self_type is InProgress and other_type is InProgress:\n return self.progress < other.progress # type: ignore\n elif self_type is InProgress and other_type is Succeeded:\n return True\n else:\n return False", "def __lt__(self, other):\n return( (self.last_name, self.first_name)\n < (other.last_name, other.first_name) )", "def __lt__(self, other):\n\n return self._ordinals < other.ordinal()", "def __lt__(self, other):\n return self._d[\"priority\"] < other[\"priority\"]", "def __lt__(self, other):\n return self.name.lower() < other.name.lower()", "def __lt__(self, other):\n return self.name.lower() < other.name.lower()", "def compare_to(self, other) -> int:\n if self.id == other.id:\n return 0\n if self.status != other.status:\n return -1 if self.status < other.status else 1\n if self.last_played != other.last_played:\n return -1 if self.last_played < other.last_played else 1\n return -1 if self.id < other.id else 1", "def __lt__(self, other):\n return self.f() < other.f()", "def __lt__(self, other):\n\n return (self.name) < (other.name)", "def __lt__(self, other):\n return self.priority < other.priority" ]
[ "0.6422751", "0.6379365", "0.6377821", "0.63655895", "0.6361564", "0.633996", "0.6331967", "0.6331967", "0.6331967", "0.6331967", "0.6331967", "0.6277708", "0.6256252", "0.62428635", "0.6210801", "0.61500895", "0.61423683", "0.6133925", "0.6117654", "0.611592", "0.60967314", "0.6086229", "0.6049023", "0.6021743", "0.5988053", "0.5988053", "0.5974995", "0.59738904", "0.59363014", "0.592724" ]
0.749902
0
operator = (overloaded for sorting) True/False Whether the status and nickname are exactly equal.
def __eq__(self, other): return (self.get_status() == other.get_status() and self.get_nickname() == other.get_nickname() and self._email == other._email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(a,b): #Todo: this needs major testing of reading, writing\n if not a.assigner.title() == b.assigner.title():\n return False\n if not a.description == b.description:\n return False\n if not a.starttime == b.starttime:\n return False\n if not a.followups == b.followups:\n return False\n if not a.id == b.id:\n return False\n if not a._ticket_id == b._ticket_id:\n return False\n if not a.iscompleted == b.iscompleted:\n return False\n if not a.name == b.name:\n return False\n if not a.priority == b.priority:\n return False\n if not a.whose == b.whose:\n return False\n if not a.submitter_email == b.submitter_email:\n return False\n return True", "def __eq__(self, other) -> bool:\n return self.Firstname == other.Firstname and self.LastName == other.LastName", "def __eq__(self, other):\n return self.last_name == other.last_name and self.first_name == other.first_name", "def __eq__(self, other):\n if not isinstance(other, ResultStatus):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, rs):\n Number.comparisons += 1\n result = self.data == rs.data\n return result", "def __lt__(self, other):\n status = self.get_status()\n Ostatus = other.get_status()\n \n if status == Ostatus:\n return self.get_nickname() < other.get_nickname()\n \n if status == \"online\":\n return True\n elif status == \"away\" and Ostatus != \"online\":\n return True\n elif status == \"busy\" and Ostatus not in [\"online\", \"away\"]:\n return True\n else:\n return False", "def cmp(cls, first, second):\n return first == second", "def __eq__(self, other):\n if not isinstance(other, UserOperatorsDataForSearch):\n return False\n\n return self.to_dict() == other.to_dict()", "def cmp_status(self, cur_status, prev_status, prev_prev_status, field_name_tuple):\n ignore_keys = ['update_date']\n # consider status as new if last record was at least one day ago\n header_field_name, update_time_field_name, activity_list_field_name = field_name_tuple\n time_diff = dateutil.parser.parse(cur_status[header_field_name][update_time_field_name]) - dateutil.parser.parse(prev_prev_status[header_field_name][update_time_field_name])\n if time_diff >= timedelta(days=1):\n return False\n\n # if last record is more recent, consider status as new only if any non-ignored field is different\n cur_status = {k:v for k,v in cur_status[activity_list_field_name][0].items() if k not in ignore_keys}\n prev_status = {k:v for k,v in prev_status[activity_list_field_name][0].items() if k not in ignore_keys}\n return cur_status == prev_status", "def __eq__(self, other):\n for ls, lo in zip(self.leaderboard_names, other.leaderboard_names):\n if ls != lo:\n return False\n for ls, lo in zip(self.leaderboard_groups, other.leaderboard_groups):\n if ls != lo:\n return False\n if self.top_left != other.top_left:\n return False\n if self.bottom_right != other.bottom_right:\n return False\n return True", "def __eq__(self, other):\n if (self.name == other.name):\n return \"Equal\"\n else:\n return \"Not Equal\"", "def _equal_to_op(spec):", "def is_equal(self, a, b):\n return a == b", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass" ]
[ "0.6392048", "0.6031803", "0.5996846", "0.5926732", "0.5911945", "0.5871551", "0.58615893", "0.5776352", "0.57724035", "0.5771185", "0.5749959", "0.5705553", "0.56784385", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999", "0.5667999" ]
0.6560987
0
Select this Member, and deselect all other members.
def clicked(self, event): for member in Member.focus: member.select(False) self.select()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deselect_me(self):\r\n\t\tself.active = False", "def deselectall(self):\n if self.selection:\n for node in self.selection[:]: node.deselect()", "def Unselect(self):\r\n\r\n if self._current:\r\n self._current.SetHilight(False)\r\n self.RefreshLine(self._current)\r\n\r\n self._current = None\r\n self._select_me = None", "def unselectAll(self):\n\t\tself.tree.UnselectAll()", "def toggle_selected(self):\n\n self._selected = not self._selected", "def __editDeselectAll(self):\n self.activeWindow().selectAll(False)", "def deSelect(self):\n for i in range(len(self.__controlsChecks)):\n self.__controlsChecks[i].setChecked(False)", "def deselect_me(self):\r\n\t\tself.active = False\t\t\r\n\t\t#print('Frame active')\r", "def deselect_all(self):\n if not self.is_multiple:\n raise NotImplementedError(\"You may only deselect all options of a multi-select\")\n\n for opt in self.browser.execute_script(self.SELECTED_OPTIONS, self.browser.element(self)):\n self.browser.raw_click(opt)", "def StopSelection( self ):\n\n # Return if the marquee is not running\n if not self.nodePicker.marquee.IsRunning():\n return\n\n # Stop the marquee\n self.nodePicker.StopSelection()\n\n # Set the colour of the selected objects\n for i in self.nodePicker.selection:\n i.setColorScale( Vec4(1, 0, 0, 1) )\n\n # Attach the selection to the gizmo manager\n self.gizmoMgr.AttachNodePaths( self.nodePicker.selection )\n\n # Get the active gizmo\n activeGizmo = self.gizmoMgr.GetActiveGizmo()\n if activeGizmo is not None:\n\n # Refresh the active gizmo so it appears in the right place\n activeGizmo.Refresh()", "def UnselectAll(self):\r\n\r\n rootItem = self.GetRootItem()\r\n\r\n # the tree might not have the root item at all\r\n if rootItem:\r\n self.UnselectAllChildren(rootItem)\r\n\r\n self.Unselect()", "def clearSelection(self):\n selectionGroup = self.getSelectionGroup()\n if selectionGroup is not None:\n selectionGroup.clear()\n selectionGroup = Field() # NULL\n scene = self._sceneviewer.getScene()\n scene.setSelectionField(selectionGroup)", "def deselectAll(inObjMngr=False):\n if inObjMngr is True:\n c4d.CallCommand(100004767) # deselect all (Object Manager)\n else:\n c4d.CallCommand(12113) # deselect all", "def select(self, select=None):\n if select is not None:\n self.selected = select\n else:\n self.selected = not self.selected", "def deselect(self):\n if self._selected:\n \tself._selected = False\n\t\tself.log(\"device {} is now deselected\".format(self._secondary_address))", "def invert_selection(self):\n pass", "def clearMouseSelection(self):\n pass", "def toggle_select(self):\r\n if not len(self.items):\r\n return\r\n item = self.items[self.item_sel]\r\n if item in self.selected:\r\n self.selected.remove(item)\r\n else:\r\n self.selected.append(item)\r\n self.do_paint()", "def action_unselect_all(self):\n for statement in self:\n statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids\n statement_lines.write({'cleared_bank_account': False})\n return True", "def select_toggle(self):\n self.selection_toggle(*self.get_children())", "def Reset_Selection(self):\r\n #if previous selection\r\n if( self.selected != 0 ):\r\n self.canvas_one.delete( self.selected ) #remove bounding rectangle\r\n #return chosen node to branch_color\r\n self.canvas_one.itemconfig( self.selected_ls.line_handle , fill = self.branch_color )\r\n self.system.Set_Selected_Node(0)\r\n self.selected = 0\r\n self.selected_ls = 0", "def deselectOfSample(self, names, current_tax_level):\n index_list = list(self.sample[self.sample['masked'] == False].index)\n for name in names:\n idx = self.sample[self.sample[current_tax_level]==name].index\n self.sample.at[idx, 'masked'] = True", "def unselect_all_midi_events(self):\n self.select_all_midi_events(select=False)", "def clear_sel(self):\n run(['ipmitool', 'sel', 'clear'], stdout=DEVNULL, stderr=DEVNULL)", "def unassign_members(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"unassign_members\"), kwargs)", "def clearAllCanSelectFlags(self):\n for key in self.canSelectFlags.keys():\n self.canSelectFlags[key] = 0", "def doDeselectItems(self, silent: bool = False) -> None:\n for item in self.getSelectedItems():\n item.setSelected(False)\n if not silent:\n self.onItemsDeselected()", "def unmask_all(self):\n self.Active.mask = False\n self.Confirmed.mask = False\n self.Deaths.mask = False\n self.NewDeaths.mask = False\n self.NewCases.mask = False", "def clear_members(self):\r\n self._members = []\r\n self._length = 4", "def select(self):\r\n pass" ]
[ "0.6832077", "0.6709428", "0.62388176", "0.6035515", "0.6020763", "0.5963187", "0.59248453", "0.59019464", "0.58477366", "0.57764304", "0.5769103", "0.56828064", "0.5629517", "0.56136143", "0.55669916", "0.5542779", "0.55427396", "0.5530634", "0.53993815", "0.5393986", "0.5387687", "0.53833413", "0.5371398", "0.53501034", "0.5339485", "0.53042793", "0.53001434", "0.52652144", "0.5263862", "0.5256104" ]
0.6710304
1
Save directional fit plots (per BX) to PDF files
def plotPerDirectionBx(options): name = options['scan'] + '_'+ options['name'] + options['fitted'] if 'method' in options: name += '_' + options['method'] name += '_collected' f = openRootFileR(name) for bx in options['crossings']: plotname = plotName(name+'_bx'+str(bx), timestamp=False) filename = plotName(name+'_bx'+str(bx), timestamp=True) filepath = plotPath(name+'_bx'+str(bx), timestamp=True) print '<<< Save plot:', filepath graphs = f.Get(plotname) residuals = f.Get(plotname+'_residuals') if 'final' in options: graphs.SetTitle('') residuals.SetTitle('') gStyle.SetOptFit(options['optfit']) canvas = TCanvas(plotname+'_canvas', '', 700, 600) canvas.cd() canvas.SetMargin(0.13, 0.03, 0.33, 0.05) graphs.Draw('AP') gPad.Update() text = TLatex() text.SetNDC() for j, graph in enumerate(graphs.GetListOfGraphs()): graph.SetMarkerStyle(21) graph.SetMarkerColor(2+2*j) graph.GetFunction(options['fit']).SetLineColor(2+2*j) stats = graph.GetListOfFunctions().FindObject('stats') stats.SetTextColor(2+2*j) stats.SetBorderSize(0) stats.SetTextSize(0.04) inverted = graph.GetFunction(options['fit']).GetParameter('p1')<0.0 text.SetTextFont(42) text.SetTextSize(0.04) text.SetTextColor(2+2*j) if inverted and j==0: text.DrawLatex(0.18,0.54,options['scan']+' scan forward') stats.SetX1NDC(0.16) stats.SetX2NDC(0.53) stats.SetY1NDC(0.38) stats.SetY2NDC(0.53) elif inverted and j==1: text.DrawLatex(0.61,0.9,options['scan']+' scan backward') stats.SetX1NDC(0.59) stats.SetX2NDC(0.96) stats.SetY1NDC(0.74) stats.SetY2NDC(0.89) elif j==0: text.DrawLatex(0.18,0.9,options['scan']+' scan forward') stats.SetX1NDC(0.16) stats.SetX2NDC(0.53) stats.SetY1NDC(0.74) stats.SetY2NDC(0.89) else: text.DrawLatex(0.61,0.54,options['scan']+' scan backward') stats.SetX1NDC(0.59) stats.SetX2NDC(0.96) stats.SetY1NDC(0.38) stats.SetY2NDC(0.53) graphs.GetXaxis().SetTitle('Nominal Position [#mum]') graphs.GetYaxis().SetTitle(options['ytitle']) graphs.GetYaxis().SetTitleOffset(1.3) if('final' in options): text.SetTextColor(1) text.SetTextFont(42) text.SetTextSize(0.04) text.SetTextAlign(31) text.DrawLatex(0.97,0.96,O['plotsig']) text.SetTextAlign(11) if options['final'] == 'wip': text.SetTextFont(52) text.SetTextSize(0.04) text.DrawLatex(0.13,0.96,'Work in Progress') else: text.SetTextFont(62) text.SetTextSize(0.05) text.DrawLatex(0.13,0.96,'CMS') text.SetTextFont(52) text.SetTextSize(0.04) text.DrawLatex(0.22,0.96,'Preliminary') for axis in [graphs.GetYaxis(), graphs.GetXaxis()]: axis.SetTitleSize(0.05) axis.SetLabelSize(0.04) axis.SetLabelOffset(0.01) axis.CenterTitle() pad = TPad('pad', 'pad', 0, 0, 1, 0.2) pad.Draw() pad.cd() pad.SetMargin(0.13, 0.03, 0.01, 0.01) for j, residual in enumerate(residuals.GetListOfGraphs()): residual.SetMarkerStyle(21) residual.SetMarkerColor(2+2*j) residuals.Draw("AP") residuals.GetXaxis().SetTitle('') residuals.GetXaxis().SetLabelSize(0.0) residuals.GetXaxis().SetTickSize(0.151) residuals.GetYaxis().SetNdivisions(305) residuals.GetYaxis().SetTickSize(0.019) residuals.GetYaxis().SetLabelSize(0.2) residuals.GetYaxis().SetLabelOffset(0.01) pad.Update() line = TLine(pad.GetUxmin(), 0.0, pad.GetUxmax(), 0.0) line.SetLineColor(14) line.SetLineStyle(3) line.Draw() canvas.cd() text.SetTextFont(42) text.SetTextSize(0.05) text.SetTextAngle(90.0) text.DrawLatex(0.035,0.0,'Resid. '+options['restitle']) if not 'final' in options: drawSignature(filename) # canvas.Modified() # canvas.Update() canvas.Print(filepath) canvas.Close() closeRootFile(f, name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_to_pdf(pdf_fname, cmts_directory, misfit_windows_collection, iterations_list, snr_threshold, event_depth):\n rep_key = sorted(misfit_windows_collection.keys())[0]\n all_events = sorted(misfit_windows_collection[rep_key].keys())\n with PdfPages(pdf_fname) as pdf:\n for each_event in tqdm.tqdm(all_events):\n # we should plot the beachball and plot the source parameter table here\n plot_source_parameters(\n each_event, pdf, cmts_directory, iterations_list)\n # prepare information to plot\n each_misfit_windows_collection = {}\n for each_iteration in iterations_list:\n each_misfit_windows_collection[each_iteration] = (\n misfit_windows_collection[each_iteration][each_event])\n event_depth_dict = event_depth[each_event]\n data_collection, category_phases, category_list = get_plotting_data(\n each_misfit_windows_collection, iterations_list, snr_threshold, event_depth_dict)\n for each_category, phase_list_for_each_category in zip(category_list, category_phases):\n # one page for each category\n figs = plt.figure(figsize=(50, 50))\n collecction_all = {}\n if (each_category != \"surface\"):\n collecction_all[\"deltat\"] = [np.array([], dtype=np.float)\n for i in range(len(iterations_list))]\n collecction_all[\"similarity\"] = [np.array([], dtype=np.float)\n for i in range(len(iterations_list))]\n collecction_all[\"cc\"] = [np.array([], dtype=np.float)\n for i in range(len(iterations_list))]\n # we plot for each phases\n for row_index, each_phase in enumerate(phase_list_for_each_category):\n # we plot for deltat,similarity,cc\n for column_index, plot_type in enumerate([\"deltat\", \"similarity\", \"cc\"]):\n # num must be 1 <= num <= num_max, not 0\n # keep different category's figsize the same\n ax = figs.add_subplot(\n 8, 3, row_index * 3 + column_index+1)\n\n for interation_index, each_iteration in enumerate(iterations_list):\n sns.distplot(data_collection[each_iteration][each_category][row_index]\n [plot_type], ax=ax, hist=False, label=f\"before iteration {each_iteration}\",\n kde_kws={\"linewidth\": 6})\n # collect to the category summary\n if(each_category != \"surface\"):\n if (column_index == 0):\n collecction_all[\"deltat\"][interation_index] = np.concatenate(\n (collecction_all[\"deltat\"][interation_index], data_collection[each_iteration][each_category][row_index]\n [plot_type]))\n elif (column_index == 1):\n collecction_all[\"similarity\"][interation_index] = np.concatenate(\n (collecction_all[\"similarity\"][interation_index], data_collection[each_iteration][each_category][row_index]\n [plot_type]))\n elif (column_index == 2):\n collecction_all[\"cc\"][interation_index] = np.concatenate(\n (collecction_all[\"cc\"][interation_index], data_collection[each_iteration][each_category][row_index]\n [plot_type]))\n if (plot_type == \"deltat\"):\n ax.set_xlim((-10, 10))\n elif(plot_type == \"similarity\"):\n ax.set_xlim((0, 1))\n elif(plot_type == \"cc\"):\n ax.set_xlim((0, 1))\n # ax.legend()\n if (column_index == 0):\n ax.get_yaxis().set_ticklabels([])\n ax.set_ylabel(each_phase, fontsize=50, rotation=90)\n else:\n ax.get_yaxis().set_ticklabels([])\n ax.tick_params(axis=\"x\", labelsize=30)\n if(plot_type != \"similarity\"):\n ax.set_xlabel(plot_type, fontsize=30)\n else:\n ax.set_xlabel(\"zero-lag cc\", fontsize=30)\n if (row_index == 0 and column_index == 1):\n ax.set_title(\n f\"gcmtid: {each_event}\\ncategory: {each_category}\", fontsize=50)\n if (each_category != \"surface\"):\n for column_index, plot_type in enumerate([\"deltat\", \"similarity\", \"cc\"]):\n ax = figs.add_subplot(\n 8, 3, (row_index+1) * 3 + column_index+1) # pylint: disable=undefined-loop-variable\n for interation_index, each_iteration in enumerate(iterations_list):\n sns.distplot(collecction_all[plot_type][interation_index], ax=ax, hist=False, label=f\"before iteration {each_iteration}\",\n kde_kws={\"linewidth\": 6})\n if (plot_type == \"deltat\"):\n ax.set_xlim((-10, 10))\n elif(plot_type == \"similarity\"):\n ax.set_xlim((0, 1))\n elif(plot_type == \"cc\"):\n ax.set_xlim((0, 1))\n if (column_index == 0):\n ax.get_yaxis().set_ticklabels([])\n ax.set_ylabel(\n \"all phases\", fontsize=50, rotation=90)\n else:\n ax.get_yaxis().set_ticklabels([])\n ax.tick_params(axis=\"x\", labelsize=30)\n if(plot_type != \"similarity\"):\n ax.set_xlabel(plot_type, fontsize=30)\n else:\n ax.set_xlabel(\"zero-lag cc\", fontsize=30)\n\n figs.tight_layout()\n pdf.savefig(figs)\n plt.close(fig=figs)", "def plot_pdfs(meta):\n f = plt.figure(figsize=(5,5))\n sps = f.add_subplot(1,1,1)\n sps.set_title(meta.name+r' PDFs')\n plotstep(sps,meta.binends,meta.intPz,c=c_int,l=l_int+r'$P(z)$',s=s_int,w=w_int,d=d_int,a=a_int)\n dummy_x,dummy_y = np.array([-1,-2,-3]),np.array([-1,-2,-3])\n plotstep(sps,dummy_x,dummy_y,c=c_exp,s=s_map,w=w_exp,l=r' MLE $z$',d=d_map,a=a_map)\n sps.legend(loc='upper right',fontsize='x-small')\n np.random.seed(seed=meta.ngals)\n randos = random.sample(xrange(meta.ngals),len(meta.colors))\n for r in lrange(randos):\n plotstep(sps,meta.binends,meta.pdfs[randos[r]],c=meta.colors[r%len(meta.colors)],s=s_smp,w=w_smp,d=d_smp,a=a_smp)\n sps.vlines(meta.mleZs[randos[r]],0.,max(meta.pdfs[randos[r]]),color=meta.colors[r],linestyle=s_map,linewidth=w_map,dashes=d_map,alpha=a_map)\n sps.set_ylabel(r'$p(z|\\vec{d})$')\n sps.set_xlabel(r'$z$')\n sps.set_xlim(meta.binlos[0]-meta.bindif,meta.binhis[-1]+meta.bindif)\n sps.set_ylim(0.,1./meta.bindif)\n f.savefig(os.path.join(meta.topdir,'samplepzs.pdf'),bbox_inches='tight', pad_inches = 0)\n return", "def create_pdf():\n\n fig = plt.gcf()\n fig.set_size_inches(OutFileParameter.width, OutFileParameter.height)\n fig.savefig(OutFileParameter.name + '.' + OutFileParameter.ext, dpi=OutFileParameter.dpi)\n\n return None", "def save_pdf(bfile: str, plots: list):\n\n title = infile + \"_qc_report.pdf\"\n\n with PdfPages(title) as pdf:\n for plot in plots:\n pdf.savefig(plot)", "def save_file(self):\n\n file_name, _ = QFileDialog.getSaveFileName(self, \"Save Experiment Output\", \"\", \"Text Files (*.txt);;CSV Files (*.csv)\")\n plot_name = file_name.split(\".\")[0] + \"_plot.pdf\"\n\n try:\n data_file = open(file_name, \"w\")\n data_file.write(self._fitter.fit_as_csv)\n data_file.close()\n\n plot_save = PdfPages(plot_name)\n fig, ax = self._fitter.plot()\n plot_save.savefig(fig)\n plot_save.close()\n except:\n pass", "def save_fit(filename, traces={}, params={}, destinationfolder='', figs=True):\n \n if not os.path.exists(destinationfolder):\n os.makedirs(destinationfolder)\n \n od = os.path.join(destinationfolder, filename) + '_fit'\n \n for i in np.arange(0,500,1):\n d = od + str(i+1)\n \n condition = os.path.exists(d+'.svg') or os.path.exists(d+'.png') or os.path.exists(d+'_params.csv') or os.path.exists(d+'_traces.csv') \n if condition:\n pass\n \n else:\n dict_to_csv(d,params)\n traces_to_csv(d,traces) \n if figs:\n plt.savefig(d+'.svg', format='svg')\n print('Saved ' + d + '.svg')\n plt.savefig(d+'.png', format='png')\n print('Saved ' + d + '.png')\n break", "def plot(self, **kwargs):\n if self.order != None:\n name = str(_constructModelName(self.teff, self.logg, \n self.metal, self.en, self.order, self.path))\n output = kwargs.get('output', str(name) + '.pdf')\n ylim = kwargs.get('yrange', [min(self.flux)-.2, max(self.flux)+.2])\n title = kwargs.get('title')\n save = kwargs.get('save', False)\n \n plt.figure(figsize=(16,6))\n plt.plot(self.wave, self.flux, color='k', \n alpha=.8, linewidth=1, label=name)\n plt.legend(loc='upper right', fontsize=12)\n plt.ylim(ylim) \n \n minor_locator = AutoMinorLocator(5)\n #ax.xaxis.set_minor_locator(minor_locator)\n # plt.grid(which='minor') \n \n plt.xlabel(r'$\\lambda$ [$\\mathring{A}$]', fontsize=18)\n plt.ylabel(r'$Flux$', fontsize=18)\n #plt.ylabel(r'$F_{\\lambda}$ [$erg/s \\cdot cm^{2}$]', fontsize=18)\n if title != None:\n plt.title(title, fontsize=20)\n plt.tight_layout()\n\n if save == True:\n plt.savefig(output)\n plt.show()\n plt.close()\n\n else:\n output = kwargs.get('output'+ '.pdf')\n ylim = kwargs.get('yrange', [min(self.flux)-.2, max(self.flux)+.2])\n title = kwargs.get('title')\n save = kwargs.get('save', False)\n \n plt.figure(figsize=(16,6))\n plt.plot(self.wave, self.flux, color='k', alpha=.8, linewidth=1)\n plt.legend(loc='upper right', fontsize=12)\n plt.ylim(ylim)\n \n minor_locator = AutoMinorLocator(5)\n #ax.xaxis.set_minor_locator(minor_locator)\n # plt.grid(which='minor') \n \n plt.xlabel(r'$\\lambda$ [$\\mathring{A}$]', fontsize=18)\n plt.ylabel(r'$Flux$', fontsize=18)\n #plt.ylabel(r'$F_{\\lambda}$ [$erg/s \\cdot cm^{2}$]', fontsize=18)\n if title != None:\n plt.title(title, fontsize=20)\n plt.tight_layout()\n\n if save == True:\n plt.savefig(output)\n plt.show()\n plt.close()", "def plotPerBxStep(options):\n name = options['scan'] + '_' + options['name'] + options['extra']\n if 'method' in options:\n name += '_' + options['method']\n f = openRootFileR(name)\n for bx in options['crossings']:\n for step in range(len(O['nominalPos'][options['scan']])):\n histname = plotName(name+'_bx'+str(bx)+'_step'+str(step), \\\n timestamp=False)\n filename = plotName(name+'_bx'+str(bx)+'_step'+str(step), \\\n timestamp=True)\n filepath = plotPath(name+'_bx'+str(bx)+'_step'+str(step), \\\n timestamp=True)\n print '<<< Save plot:', filepath\n hist = f.Get(histname)\n canvas = TCanvas()\n canvas.SetLogx(options['logx'])\n canvas.SetLogy(options['logy'])\n gStyle.SetOptStat(options['optstat'])\n gStyle.SetOptFit(options['optfit'])\n hist.Draw()\n gPad.Update()\n hist.GetXaxis().SetTitle(options['xtitle'])\n hist.GetXaxis().SetRangeUser(options['xmin'], options['xmax'])\n hist.GetYaxis().SetTitle(options['ytitle'])\n hist.GetYaxis().SetTitleOffset(1.2)\n for axis in [hist.GetXaxis(), hist.GetYaxis()]:\n axis.SetTitleFont(133)\n axis.SetTitleSize(16)\n axis.SetLabelFont(133)\n axis.SetLabelSize(12)\n axis.CenterTitle()\n stats = hist.FindObject('stats')\n stats.SetTextFont(133)\n stats.SetTextSize(16)\n drawSignature(filename)\n gPad.Modified()\n gPad.Update()\n if 'custom' in options:\n extragraphs = options['custom'](hist)\n canvas.Print(filepath)\n canvas.Close()\n closeRootFile(f, name)", "def make_pdf_reports(df, path):\n with PdfPages(path) as pdf:\n # settings for the file\n base = 10 # threshold for grouping points\n page_size = (11, 8.5)\n point_size = 1.5 # scatter plot point size\n\n df[\"color\"] = df.db.apply(rand_color) # adjacency color\n df[\"fuzzy_y\"] = df.y.apply(my_round) # horizontal group color\n df[\"y_color\"] = df.fuzzy_y.apply(rand_color)\n df[\"fuzzy_x\"] = df.x.apply(my_round) # vertical group color\n df[\"x_color\"] = df.fuzzy_x.apply(rand_color)\n\n # Add title and axis names\n plt.figure(figsize=page_size)\n plt.title('Horizontal Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.y_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Vertical Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.x_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Block Adjacency Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n data1 = df[[\"floor\", \"swing_drop\", \"name\"]]\n data = data1.groupby([\"floor\", \"swing_drop\"]).count()\n data = data.reset_index()\n data.head()\n data = data.fillna(0)\n pivot = data.pivot(index=\"floor\", columns=\"swing_drop\", values=\"name\")\n pivot = pivot.fillna(0)\n order = sorted(df.floor.unique(), reverse=True)\n pivot = pivot.reindex(order)\n plt.figure(figsize=page_size)\n ax = sns.heatmap(pivot, cmap=\"BuPu\")\n ax.set_title(\"Block Qty Heatmap\")\n pdf.savefig()\n plt.close()\n\n # bar chart\n plt.rcParams.update({'font.size': 5})\n plt.figure(figsize=page_size)\n plt.title('Block Style Bar Graph')\n plt.xlabel('Names')\n plt.xticks(rotation=90)\n plt.ylabel('Quantities')\n dd = df[['name', \"guid\"]].groupby(\"name\").count()\n dd = dd.reset_index()\n dd = dd.sort_values(\"guid\")\n plt.bar(dd.name, dd.guid)\n # plt.show()\n pdf.savefig()\n plt.close()\n\n # We can also set the file's metadata via the PdfPages object:\n d = pdf.infodict()\n d['Title'] = 'Multipage PDF Example'\n d['Author'] = 'Matthew Kreidler'\n d['Subject'] = 'How to create a multipage pdf file and set its metadata'\n d['Keywords'] = 'PdfPages multipage keywords author title subject'\n d['CreationDate'] = datetime.datetime.today()\n d['ModDate'] = datetime.datetime.today()\n\n print(\"Graphs and Charts finished!\")\n return path", "def plot_pdf(data,b,X,outfile):\t\r\n\tme = \"LE_Plot.plot_pdf: \"\r\n\tshowplot = False\r\n\tt0 = time.time()\r\n\t## Data\r\n\tx, y = data\r\n\txmax, ymax = np.abs(x).max(), np.abs(y).max()\r\n\t## Plot pdf\r\n\tfs = 25\r\n\t# counts, xedges, yedges, im = plt.hist2d(x,y, bins=100, range=[[-2*X,+2*X],blim(b,X)], normed=True)\r\n\tcounts, xedges, yedges, im = plt.hist2d(x,y, bins=100, range=[[-xmax,+xmax],[-ymax,ymax]], normed=True)\r\n\tplt.xlabel(\"$x$\",fontsize=fs);plt.ylabel(\"$\\eta$\",fontsize=fs)\r\n\tplt.suptitle(outfile)\r\n\ttry:\r\n\t\tplt.savefig(outfile+\".png\")\r\n\t\tprint me+\"Plot saved as\",outfile+\".png\"\r\n\texcept IOError:\r\n\t\tprint me+\"ERROR: ouput direcotry not found, could not save\",outfile+\".png\"\r\n\t## Output\r\n\tif showplot:\tplt.show()\r\n\tplt.close()\t\t\r\n\tprint me+\"Plotting PDF:\",round(time.time()-t0,1),\"seconds\"\r\n\treturn counts.T, xedges, yedges", "def save_plots(self):\n pdir = os.path.splitext(self.filename)[0] + '_plots'\n if not os.path.exists(pdir):\n os.mkdir(pdir)\n\n for ii in range(self.uv.n_ant):\n fig, ax = self.plot_single_baseline_dual_pol(ii+1, ii+1)\n print \"Saving ant %i\"%ii\n plt.savefig(os.path.join(pdir, 'ant-%i.png'%ii))\n plt.clf()", "def plot_pdf(pop_name, pop_val, pop_file, full_pop_file, outdir='.'):\n try:\n plt.style.use(\n \"https://gist.githubusercontent.com/avivajpeyi/4d9839b1ceb7d3651cbb469bc6b0d69b/raw/4ee4a870126653d542572372ff3eee4e89abcab0/publication.mplstyle\")\n except Exception:\n pass\n\n plt.close('all')\n all = pd.read_csv(full_pop_file, sep=\" \")\n all['cos_theta_1'] = all['cos_tilt_1']\n all = process_samples(all)\n sub = pd.read_csv(pop_file, sep=\" \")\n sub = process_samples(sub)\n sub['cos_theta_1'] = sub['cos_tilt_1']\n\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for ax, l in zip(axes, [\"cos_theta_1\", \"cos_theta_12\"]):\n ax.hist(all[l], density=True, histtype='step', color=\"tab:blue\", label=\"ALL\", lw=2, alpha=0.8)\n ax.scatter(all[l], [0 for _ in all[l]], color=\"tab:blue\",marker=\"+\")\n ax.hist(sub[l], density=True, histtype='step', color=\"tab:purple\", label=\"HIGH SNR\", lw=2, alpha=0.6)\n ax.scatter(sub[l], [0 for _ in sub[l]], color=\"tab:purple\", marker=\"+\")\n\n x = np.linspace(-1, 1, 100)\n y1 = TruncatedNormal(mu=1, sigma=pop_val[0], minimum=-1, maximum=1).prob(x)\n y2 = TruncatedNormal(mu=1, sigma=pop_val[1], minimum=-1, maximum=1).prob(x)\n axes[1].plot(x, y2, color='tab:gray', zorder=-10, lw=3, label=\"TRUE\")\n axes[0].plot(x, y1, color='tab:gray', zorder=-10, lw=3)\n\n for i in range(len(axes)):\n if (i == 0):\n axes[i].set_xlabel(r\"$\\cos\\ \\theta_1$\")\n axes[i].set_ylabel(\"PDF\")\n else:\n axes[i].set_xlabel(r\"$\\cos\\ \\theta_{12}$\")\n axes[i].set_yticklabels([])\n axes[i].legend()\n axes[i].grid(False)\n axes[i].set_xlim(-1, 1)\n\n plt.suptitle(f\"POP {pop_name}\")\n plt.tight_layout()\n plt.savefig(f\"{outdir}/pop_trues_{pop_name}.png\")", "def plot_orderfits(setup, model, ydata, xdata=None, xmodl=None, textplt=\"Slit\",\n maxp=4, desc=\"\", maskval=-999999.9, slit=None):\n\n plt.rcdefaults()\n plt.rcParams['font.family']= 'times new roman'\n\n # Outfil\n method = inspect.stack()[0][3]\n if 'Arc' in desc:\n method += '_Arc'\n elif 'Blaze' in desc:\n method += '_Blaze'\n else:\n msgs.bug(\"Unknown type of order fits. Currently prepared for Arc and Blaze\")\n outroot = qa.set_qa_filename(setup, method, slit=slit)\n #\n npix, nord = ydata.shape\n pages, npp = qa.get_dimen(nord, maxp=maxp)\n if xdata is None: xdata = np.arange(npix).reshape((npix, 1)).repeat(nord, axis=1)\n if xmodl is None: xmodl = np.arange(model.shape[0])\n # Loop through all pages and plot the results\n ndone = 0\n axesIdx = True\n for i in range(len(pages)):\n f, axes = plt.subplots(pages[i][1], pages[i][0])\n ipx, ipy = 0, 0\n for j in range(npp[i]):\n if pages[i][0] == 1 and pages[i][1] == 1: axesIdx = False\n elif pages[i][1] == 1: ind = (ipx,)\n elif pages[i][0] == 1: ind = (ipy,)\n else: ind = (ipy, ipx)\n if axesIdx:\n axes[ind].plot(xdata[:,ndone+j], ydata[:,ndone+j], 'bx', drawstyle='steps')\n axes[ind].plot(xmodl, model[:,ndone+j], 'r-')\n else:\n axes.plot(xdata[:,ndone+j], ydata[:,ndone+j], 'bx', drawstyle='steps')\n axes.plot(xmodl, model[:,ndone+j], 'r-')\n ytmp = ydata[:,ndone+j]\n gdy = ytmp != maskval\n ytmp = ytmp[gdy]\n if ytmp.size != 0:\n amn = min(np.min(ytmp), np.min(model[gdy,ndone+j]))\n else:\n amn = np.min(model[:,ndone+j])\n if ytmp.size != 0:\n amx = max(np.max(ytmp), np.max(model[gdy,ndone+j]))\n else: amx = np.max(model[:,ndone+j])\n # Restrict to good pixels\n xtmp = xdata[:,ndone+j]\n gdx = xtmp != maskval\n xtmp = xtmp[gdx]\n if xtmp.size == 0:\n xmn = np.min(xmodl)\n xmx = np.max(xmodl)\n else:\n xmn = np.min(xtmp)\n xmx = np.max(xtmp)\n #xmn = min(np.min(xtmp), np.min(xmodl))\n #xmx = max(np.max(xtmp), np.max(xmodl))\n if axesIdx:\n axes[ind].axis([xmn, xmx, amn-1, amx+1])\n axes[ind].set_title(\"{0:s} {1:d}\".format(textplt, ndone+j+1))\n else:\n axes.axis([xmn, xmx, amn, amx])\n axes.set_title(\"{0:s} {1:d}\".format(textplt, ndone+j+1))\n ipx += 1\n if ipx == pages[i][0]:\n ipx = 0\n ipy += 1\n # Delete the unnecessary axes\n if axesIdx:\n for j in range(npp[i], axes.size):\n if pages[i][1] == 1: ind = (ipx,)\n elif pages[i][0] == 1: ind = (ipy,)\n else: ind = (ipy, ipx)\n f.delaxes(axes[ind])\n if ipx == pages[i][0]:\n ipx = 0\n ipy += 1\n ndone += npp[i]\n # Save the figure\n if axesIdx: axsz = axes.size\n else: axsz = 1.0\n if pages[i][1] == 1 or pages[i][0] == 1: ypngsiz = 11.0/axsz\n else: ypngsiz = 11.0*axes.shape[0]/axes.shape[1]\n f.set_size_inches(11.0, ypngsiz)\n if desc != \"\":\n pgtxt = \"\"\n if len(pages) != 1:\n pgtxt = \", page {0:d}/{1:d}\".format(i+1, len(pages))\n f.suptitle(desc + pgtxt, y=1.02, size=16)\n f.tight_layout()\n outfile = outroot+'{:03d}.png'.format(i)\n plt.savefig(outfile, dpi=200)\n plt.close()\n f.clf()\n del f\n\n plt.rcdefaults()\n\n return", "def pltmulti(filename):\n pp = PdfPages(filename)\n\n for fig in figs:\n pp.savefig(fig)\n pp.close()\n for fig in figs:\n fig.clear()\n plt.close()", "def save_example_fit(fit):\n json_directory = os.path.join('examples', 'json')\n plot_directory = os.path.join('examples', 'plots')\n if not os.path.isdir(json_directory): os.makedirs(json_directory)\n if not os.path.isdir(plot_directory): os.makedirs(plot_directory)\n\n fit.to_json(os.path.join(json_directory, fit.name + '.json'), meta=fit.metadata)\n\n plot = Plot(fit)\n plot.save(os.path.join(plot_directory, fit.name + '.svg'))\n plot.close()", "def save(file_name):\n setup()\n plt.savefig(file_name)", "def save_reports(self, as_pdf=True, transparent=True, alg_sweep=False):\n for env in self._logger_dict.keys():\n for data_type in ['J', 'R', 'V', 'entropy']:\n if alg_sweep:\n env_dir = self._logger.get_path() / env\n for alg_dir in env_dir.iterdir():\n alg = alg_dir.name\n fig = self.get_report(env, data_type, alg)\n\n if fig is not None:\n self._logger.save_figure(fig, data_type, env + '/' + alg,\n as_pdf=as_pdf, transparent=transparent)\n plt.close(fig)\n else:\n fig = self.get_report(env, data_type)\n\n if fig is not None:\n self._logger.save_figure(fig, data_type, env, as_pdf=as_pdf, transparent=transparent)\n plt.close(fig)", "def plot_data(data, par, par_names, par_fixed, output_dir='./'):\n\n datasets = dict()\n\n for data_point in data:\n experiment_name = data_point.par['experiment_name']\n datasets.setdefault(experiment_name, list()).append(data_point)\n\n for experiment_name, dataset in datasets.items():\n\n # ##### Matplotlib ######\n\n name_pdf = ''.join([experiment_name, '.pdf'])\n name_pdf = os.path.join(output_dir, name_pdf)\n\n name_txt = ''.join([experiment_name, '.fit'])\n name_txt = os.path.join(output_dir, name_txt)\n\n print(\" * {} [.fit]\".format(name_pdf))\n\n # #######################\n\n data_grouped = group_data(dataset)\n profiles, r2_min, r2_max = compute_profiles(data_grouped)\n ymin, ymax = set_lim([r2_min, r2_max], 0.10)\n\n with PdfPages(name_pdf) as file_pdf, open(name_txt, 'w') as file_txt:\n\n for (_index, id_), profile in sorted(profiles.items()):\n write_profile(id_, profile, file_txt)\n\n ###### Matplotlib ######\n\n fig = plt.figure(1, frameon=True)\n ax = fig.add_subplot(111)\n\n ax.axhline(0, color='black', alpha=0.87)\n\n ########################\n\n frq, r2_cal, r2_exp, r2_erd, r2_eru = profile[0]\n\n ax.plot(\n frq,\n r2_cal,\n linestyle='-',\n color=red200,\n zorder=2,\n )\n\n ax.errorbar(\n frq,\n r2_exp,\n yerr=[r2_erd, r2_eru],\n fmt='o',\n color=red500,\n zorder=3,\n )\n\n xmin, xmax = set_lim(frq, 0.10)\n\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymin, ymax)\n\n ax.xaxis.set_major_locator(MaxNLocator(6))\n ax.yaxis.set_major_locator(MaxNLocator(6))\n\n ax.set_xlabel(r'$\\mathregular{\\nu_{CPMG} \\ (Hz)}$')\n ax.set_ylabel(\n r'$\\mathregular{R_{2,eff} \\ (s^{-1})}$')\n\n ax.set_title('{:s}'.format(id_.upper()))\n\n fig.tight_layout()\n\n ########################\n\n file_pdf.savefig()\n plt.close()\n\n ########################\n\n return", "def gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total):\n with PdfPages(name) as pdf:\n total_xuf = []\n total_yuf = []\n total_xf = []\n total_yf = []\n for entry in uf_dict:\n print 'Making plot for ' + entry\n xuf, yuf = zip(*uf_dict[entry])\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(xuf, yuf, c='#ad4851', marker='o', label='initial structures')\n try:\n xf, yf = zip(*f_dict[entry])\n ax1.scatter(xf, yf, c='orange', marker='x', label='selected structures')\n except ValueError:\n xf = []\n yf = []\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()\n\n if total:\n total_xuf.extend(xuf)\n total_yuf.extend(yuf)\n total_xf.extend(xf)\n total_yf.extend(yf)\n\n if histogram:\n bins = np.linspace(min_y, max_y, num=10)\n plt.hist(yuf, bins, alpha=0.5, color='b', label='initial structures')\n try:\n plt.hist(yf, bins, alpha=0.5, color='orange', label='selected structures')\n except ValueError:\n pass\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlabel(axes[1], fontsize=20)\n plt.ylabel('Frequency', fontsize=20)\n pdf.savefig()\n plt.close()\n\n if total:\n print 'Making composite plot'\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(total_xuf, total_yuf, c='#ad4851', marker='o', label='initial structures')\n ax1.scatter(total_xf, total_yf, c='orange', marker='x', label='selected structures')\n plt.legend(loc='upper right')\n plt.title('Composite Plot', fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()", "def make_F792_plots(data_obj, title_pages=False):\n\n print(\"Generating plots...\")\n\n # Create color maps\n cmap = plt.get_cmap('jet')\n cmap = plt.get_cmap('gray')\n\n # Call the\n plot_front_title(data_obj)\n\n # -----------------------------------------------------------------------#\n # Initialize the position variables for the text and graphs on the pdf. #\n # -----------------------------------------------------------------------#\n y0 = 0.9\n dy = [0.03, 0.025]\n\n ha = 'left'\n va = 'center'\n fs = 10\n dfs = 2\n\n # metric name value unc min\n xpos = [0.0, 0.4, 0.5, 0.75]\n yi = y0 - 0.1 # The position of the text on the y access, which is constantly updated as more text is added\n\n # -----------------------------------------------------------------------------------#\n # Plot the 'summary' page listing all the tests and the overall results - TEXT ONLY #\n # -----------------------------------------------------------------------------------#\n\n # Create the title of the page\n plot_overall_text(data_obj, yi, xpos, ha, va, fs)\n\n #Plot the overall results text of the first test, Steel Differentiation\n\n\n # Plot the overall results text of the second test, Penetration\n yi = yi - dy[0]\n plot_pen_text(data_obj, 2, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the third test, Organic Material Detection\n yi = yi - dy[0]\n plot_BSNR_text(data_obj, 3, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the fourth test, Spatial Resolution\n yi = yi - dy[0]\n plot_spatial_text(data_obj, 4, yi, yi - dy[1], xpos, ha, va, fs, dfs)\n yi = yi - dy[1] # Make sure the local yi is updated\n\n # Plot the overall results text of the fifth test, Dynamic Range\n yi = yi - dy[0]\n plot_dyn_text(data_obj, 5, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the sixth test, Noise\n yi = yi - dy[0]\n plot_noise_text(data_obj, 6, yi, dy, xpos, ha, va, fs, dfs)\n yi = yi - (dy[1] * 2) # Make sure to update yi, as it was only locally changed in 'plot_noise_text()'\n\n # --------------------------------------------------#\n # Plot the footnotes for the overall results page. #\n # --------------------------------------------------#\n plot_overall_footnotes(xpos, ha, va, fs, dfs, standard=\"ASTM F792\")\n\n\n #---------------------------------------------------------#\n # Plot the cropped and rotated images from the processing #\n #---------------------------------------------------------#\n plot_images(data_obj, fs) # Plot the images to the pdf\n\n plot_image_footnotes(data_obj, xpos, ha, va, fs, dfs) # Add in the footnotes to the pdf\n\n # NOTE: Above image plotting the same, with the same footnotes, for F792???\n\n #-----------------------------#\n # Steel differentiation plots #\n #-----------------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 1: Steel Differentiation\")\n\n #Call the function to plot the Steel Differentiation results to the pdf\n\n\n #-------------------#\n # Penetration plots #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 2: Penetration\")\n\n # Call the function to plot the Steel Penetration results to the pdf\n #plot_steel_pen(data_obj, 2)\n\n #------------#\n # BSNR plots #\n #------------#\n if title_pages:\n new_title_page(data_obj, \"Test 3: Organic Material Detection\")\n\n # Call the function to plot the Organic Material Detection results to the pdf\n plot_BSNR(data_obj, 3, cmap)\n\n #--------------------#\n # Spatial Resolution #\n #--------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 4: Spatial Resolution\")\n\n # Call the function to plot the Spatial Resolution results to the pdf\n plot_spatial_res(data_obj, 4)\n\n #---------------#\n # Dynamic Range #\n #---------------#\n if title_pages:\n new_title_page(data_obj, \"Test 5: Dynamic Range\")\n\n # Call the function to plot the Dynamic Range results to the pdf\n plot_dynamic_range(data_obj, 5)\n\n #-------#\n # Noise #\n #-------#\n if title_pages:\n new_title_page(data_obj, \"Test 6: Noise (NEQ)\")\n\n # Call the function to plot the Noise (NEQ) results to the pdf\n plot_noise(data_obj, 6)\n\n fig = new_pdf_page(data_obj.pdf_obj, open_fig=False)", "def save_to_file(ax, title: Optional[str] = None, folder: Optional[str] = None, **kwargs) -> None:\n dpi = kwargs.get(\"dpi\") or 140\n fig_size = kwargs.get(\"fig_size\") or (13, 10)\n if title is not None:\n postfix = \"_\" + str(CONFIG.STRATEGY) \\\n + \"_walks\" + str(CONFIG.WALKS_PER_NODE) \\\n + \"_pressure\" + str(CONFIG.PRESSURE) \\\n + \"_EMB\" + str(CONFIG.EMBD_SIZE) \\\n + \"_TFsteps\" + str(CONFIG.STEPS)\n if folder is None:\n plt.savefig(title + \"_for_\" + postfix + \".png\", bbox_inches=\"tight\", dpi=dpi,\n pad_inches=0.05)\n else:\n plt.savefig(folder + \"img/\" + title + \"_for_\" + postfix + \".png\", bbox_inches=\"tight\", dpi=dpi,\n pad_inches=0.05)", "def export_plot(plot, wight=600, height=400, path='./results/reports/', file_format='.svg'):\n name = ''.join(random.choice(ascii_lowercase) for _ in range(10)) + file_format\n pio.write_image(plot, path+name, width=wight, height=height)", "def save_as_fits(self, filename):", "def save(root, exts=['eps', 'pdf']):\n for ext in exts:\n filename = '%s.%s' % (root, ext)\n print 'Writing', filename\n pyplot.savefig(filename)", "def face_down_plot(model_file: str) -> None:\n drawn_model = ossssim.ModelFile(model_file)\n plot = RosePlot(drawn_model.epoch)\n plot.add_model(drawn_model, mc='k', ms=1, alpha=0.5, sample_size=5000)\n plt.savefig(f'{os.path.splitext(model_file)[0]}.pdf')", "def save_plot(self, ):\n pass", "def save_figs(self):\n if not self._fitted:\n self.fit()\n #self._message(\"Saving plots...\")\n # 1. Generate the required PNG plots\n # 1.1 Truncation plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,2,figsize=(8,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n cycd=0.5*(cyct[1:]+cyct[:-1])\n ax[0].plot(cyct,self.samplesdata[:,i],'k.-',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r-',linewidth=1,label=\"Truncated\")\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n plt.legend(loc='upper left',frameon=False)\n # First derivative\n ax[1].plot(cycd,self.samplesdatadiff[:,i],'k.-',linewidth=0.5)\n ax[1].axvline(self._cutoffidx[i],color='r')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([self.samplesdatadiff.min()*1.1,self.samplesdatadiff.max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"dF/dCycle (a.u.)\")\n ax[1].set_title(\"Fluorescence rate\")\n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"01truncation\",fn)\n self.info['samples'][s]['Data truncation for fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close() \n # 1.2 Fitting plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,3,figsize=(12,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n ax[0].plot(cyct,self.samplesdata[:,i],'k:',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r.-',linewidth=0.5,label=\"Truncated\")\n #ax[0].plot(cycf,self.mak3fpre[s],'y-',linewidth=1,label=\"prefit\")\n ax[0].plot(cycf,self.mak3fluorescence[s],'g-',linewidth=1,label=\"MAK3 fit\")\n ax[0].axvline(self._cutoffidx[i],color='k')\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n ax[0].legend(loc='upper left',frameon=False)\n # DNA levels\n ax[1].plot(cycf,self.mak3concentration[s],'g-',linewidth=1,label=\"MAK3\")\n ax[1].axvline(self._cutoffidx[i],color='k')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([0,self.mak3concentration[s].max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"concentration (a.u.)\")\n ax[1].set_title(\"estimated cDNA levels\")\n # Efficiency\n ax[2].plot(cycf,self.mak3efficiency[s],'b-',linewidth=1,label=\"MAK3\")\n ax[2].axvline(self._cutoffidx[i],color='k')\n ax[2].set_xlim([0,self.nvalues-1])\n ax[2].set_ylim([0,1.1])\n ax[2].set_xlabel(\"Cycle\")\n ax[2].set_ylabel(\"Efficiency\")\n ax[2].set_title(\"Amplification efficiency\") \n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"02mak3\",fn)\n self.info['samples'][s]['MAK3 Fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 2 Initial concentrations\n figwdth=np.maximum(5,0.4*self.nsamples+1)\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.initialConcentration.values())\n k=list(self.initialConcentration.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00initialConcentration\")\n self.info['figname_initialConcentration']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 3 Fitting Error\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.fitting_error.values())\n k=list(self.fitting_error.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,1e-2])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fittingError\")\n self.info['figname_fittingError']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 4 kinetic constant\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.k.values())\n k=list(self.k.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00kineticConstant\")\n self.info['figname_k']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 5 background fluorescence\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.Fb.values())\n k=list(self.Fb.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00bkgFluorescence\")\n self.info['figname_Fb']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 6 slope\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.slope.values())\n k=list(self.slope.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,0.025])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fluorescenceSlope\")\n self.info['figname_slope']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))", "def getUnscaledPDFs(makePlots=False):\n from ROOT import TFile, TH1D, gROOT\n\n pLo, pHi, ppb = 0, 30, 0.03 # requires ppb=0.03, the fit parameters are optimized for it\n nB = int((pHi-pLo)/ppb)\n\n # output file\n rOut = \"%s/data/specPDFs-sf7.root\" % dsi.latSWDir\n tf = TFile(rOut,\"RECREATE\")\n td = gROOT.CurrentDirectory()\n\n # print(\"Generating unscaled PDFs, eLo %.1f eHi %.1f epb %.2f: %s\" % (eLo, eHi, epb, rOut))\n\n # === 1. axion flux\n\n # axion flux scale.\n # NOTE: to do the fit and set a new limit, we set g_ae=1.\n # To plot an expected flux, we would use a real value.\n # Redondo's note: I calculated the flux using gae = 0.511*10^-10\n # for other values of gae use: FLUX = Table*[gae/(0.511*10^-10)]^2\n gae = 1\n gRat = (gae / 5.11e-11)\n redondoScale = 1e19 * gRat**2 # convert table to [flux / (keV cm^2 d)]\n\n axData = []\n with open(\"%s/data/redondoFlux.txt\" % dsi.latSWDir) as f1: # 23577 entries\n lines = f1.readlines()[11:]\n for line in lines:\n data = line.split()\n axData.append([float(data[0]),float(data[1])])\n axData = np.array(axData)\n\n # === 2. ge photoelectric xs\n phoData = []\n with open(\"%s/data/ge76peXS.txt\" % dsi.latSWDir) as f2: # 2499 entries, 0.01 kev intervals\n lines = f2.readlines()\n for line in lines:\n data = line.split()\n phoData.append([float(data[0]),float(data[1])])\n phoData = np.array(phoData)\n\n # === 3. tritium\n tritData = []\n with open(\"%s/data/TritiumSpectrum.txt\" % dsi.latSWDir) as f3: # 20000 entries\n lines = f3.readlines()[1:]\n for line in lines:\n data = line.split()\n conv = float(data[2]) # raw spectrum convolved w/ ge cross section\n if conv < 0: conv = 0.\n tritData.append([float(data[1]),conv])\n tritData = np.array(tritData)\n\n # NOTE: check sandbox/th1.py for examples of manually filling TH1D's and verifying wl.GetHisto and wl.npTH1D.\n\n # ROOT output\n h1 = TH1D(\"h1\",\"photoelectric\",nB,pLo,pHi) # [cm^2 / kg]\n h2 = TH1D(\"h2\",\"axioelectric\",nB,pLo,pHi) # [cm^2 / kg]\n h3 = TH1D(\"h3\",\"axion flux, gae=1\",nB,pLo,pHi) # [cts / (keV cm^2 d)]\n h4 = TH1D(\"h4\",\"convolved flux\",nB,pLo,pHi) # [cts / (keV d kg)]\n h5 = TH1D(\"h5\",\"tritium\",nB,pLo,pHi) # [cts] (normalized to 1)\n\n # manually fill ROOT histos (don't normalize yet)\n for iB in range(nB+1):\n ctr = (iB + 0.5)*ppb + pLo\n bLo, bHi = ctr - ppb/2, ctr + ppb/2\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\",category=RuntimeWarning)\n\n # if ma>0, we ignore entries with E <= m.\n ma=0 # this used to be a parameter but it's deprecated.\n\n # photoelectric x-section [cm^2 / kg]\n idx = np.where((phoData[:,0] >= bLo) & (phoData[:,0] < bHi))\n pho = np.mean(phoData[idx][:,1]) * 1000\n if np.isnan(pho) or len(phoData[idx][:,1]) == 0: pho = 0.\n if phoData[idx][:,1].any() <= ma: pho = 0.\n h1.SetBinContent(iB+1,pho)\n\n # axioelectric x-section [cm^2 / kg]\n if ctr > ma: axio = pho * wl.sig_ae(ctr, ma)\n else: axio=0.\n h2.SetBinContent(iB+1,axio)\n\n # axion flux [flux / (cm^2 d keV)]\n idx = np.where((axData[:,0] >= bLo) & (axData[:,0] < bHi))\n flux = np.mean(axData[idx][:,1]) * redondoScale\n if np.isnan(flux): flux = 0.\n h3.SetBinContent(iB+1, flux)\n # YES, adding 1 here. keeps the 6.6 keV line in the proper place for all binnings.\n # it must have to do w/ the way i'm reading in the data from the text files ...\n\n # axion flux PDF [flux / (keV d kg)]\n axConv = axio * flux\n h4.SetBinContent(iB+1, axConv)\n\n # tritium\n idx = np.where((tritData[:,0] >= bLo) & (tritData[:,0] <= bHi))\n trit = np.mean(tritData[idx][:,1])\n if np.isnan(trit): trit = 0.\n h5.SetBinContent(iB+1, trit)\n\n # Pb210 (from separate file)\n tf2 = TFile(\"%s/data/Pb210PDFs.root\" % dsi.latSWDir)\n h6 = tf2.Get(\"hPb210TDL\") # with TDL\n h7 = tf2.Get(\"hPb210\") # without TDL\n h6.SetName(\"h6\")\n h7.SetName(\"h7\")\n\n if makePlots:\n\n # === 1. verify the numpy histogram and ROOT histogram give the same output. OK\n\n x, h210, xpb = wl.npTH1D(h7)\n iE = np.where((x > 45) & (x < 48))\n plt.plot(x[iE], h210[iE], ls='steps', lw=3, c='b')\n plt.xlabel(\"Energy (keV)\", ha='right', x=1)\n plt.tight_layout()\n plt.savefig(\"%s/plots/sf-pk210.pdf\" % dsi.latSWDir)\n\n from ROOT import TCanvas\n c = TCanvas()\n h7.GetXaxis().SetTitle(\"Energy (keV)\")\n h7.GetXaxis().SetRangeUser(45, 48)\n h7.Draw('hist')\n c.Print('%s/plots/sf-pb210th1d.pdf' % dsi.latSWDir)\n\n # === 2. print ROOT histos to match w/ numpy histos\n\n c.Clear(); h1.Draw(\"hist\"); c.Print(\"%s/plots/root-sigGe.pdf\" % dsi.latSWDir)\n c.Clear(); h2.Draw(\"hist\"); c.Print(\"%s/plots/root-sigAe.pdf\" % dsi.latSWDir)\n c.Clear(); h3.Draw(\"hist\"); c.Print(\"%s/plots/root-axFlux.pdf\" % dsi.latSWDir)\n c.Clear(); h4.Draw(\"hist\"); c.Print(\"%s/plots/root-axPDF.pdf\" % dsi.latSWDir)\n c.Clear(); h5.Draw(\"hist\"); c.Print(\"%s/plots/root-trit.pdf\" % dsi.latSWDir)\n c.Clear(); h6.Draw(\"hist\"); c.Print(\"%s/plots/root-pb210TDL.pdf\" % dsi.latSWDir)\n c.Clear(); h7.Draw(\"hist\"); c.Print(\"%s/plots/root-pb210.pdf\" % dsi.latSWDir)\n\n gROOT.cd(td.GetPath())\n h1.Write()\n h2.Write()\n h3.Write()\n h4.Write()\n h5.Write()\n h6.Write()\n h7.Write()\n tf.Close()", "def PDF(gal_index,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n # PDF PLACEHOLDER\n lognHs = np.linspace(-5,8,200)\n total_PDF = np.zeros(len(lognHs))\n\n # READ CELL DATA\n gal_ob = gal.galaxy(gal_index)\n df = gal_ob.cell_data.get_dataframe()\n\n bins = 50\n\n # READ FIT PARAMS OF PDF\n if '_arepoPDF' in p.table_ext:\n fit_params_SFR = np.load(p.d_table+'fragment/PDFs%s_%ipc.npy' % (p.table_ext,p.res),allow_pickle=True).item()\n fit_params = fit_params_SFR['fit_params']\n\n # OPTIONAL : SELECT PART OF FITS\n # fit_params_SFR['SFR_bins'] = fit_params_SFR['SFR_bins'][0:-2]\n # fit_params = fit_params[:,0:-2,:]\n # fit_params_collapse = fit_params_collapse[:,0:-2,:]\n\n fit_lognH_bins = fit_params_SFR['n_vw_bins'] # log\n fit_nSFR_bins = fit_params_SFR['SFR_bins'] # log\n fit_lognH_bins_c = fit_lognH_bins[0:-1] + (fit_lognH_bins[-1]-fit_lognH_bins[-2])/2\n fit_nSFR_bins_c = fit_nSFR_bins[0:-1] + (fit_nSFR_bins[-1]-fit_nSFR_bins[-2])/2\n lognSFR_bins = fit_nSFR_bins#np.linspace(fit_nSFR_bins.min(),fit_nSFR_bins.max(),bins)\n print('log nH bins:')\n print(fit_lognH_bins_c)\n print('log SFR bins:')\n print(fit_nSFR_bins_c)\n if '_arepoPDF' not in p.table_ext:\n lognSFR_bins = np.linspace(-10,1,bins)\n\n # BIN CELL DATA TO REDUCE COMPUTATION TIME\n lognH_bins = np.linspace(-8,2,bins)\n lognH_bins_c = lognH_bins[0:-1] + (lognH_bins[1] - lognH_bins[0])/2\n lognSFR_bins_c = lognSFR_bins[0:-1] + (lognSFR_bins[1] - lognSFR_bins[0])/2\n\n # ADD THIS LOWER VALUE TO INCLUDE ALL CELLS (except density = 0)\n lognH_bins[0] = -30\n lognSFR_bins[0] = -30\n lognSFR_bins[-1] = 10\n\n df.SFR_density[df.SFR_density <= 10.**lognSFR_bins.min()] = 10.**(lognSFR_bins.min()+1)\n df.SFR_density[np.isnan(df.SFR_density)] = 10.**(lognSFR_bins.min()+1)\n\n if not p.add:\n fig = plt.figure(figsize=(15,6))\n ax = fig.add_subplot(1,2,1)\n\n print('Number of cells: ',len(df))\n if p.ow == False:\n try:\n PDF = pd.read_pickle(p.d_XL_data + 'data/cell_data/PDFs/%s%s_%s%s_%s' % (p.sim_name,p.sim_run,gal_ob.name,p.table_ext,p.res))\n total_PDF = PDF['total_PDF'].values\n lognHs = PDF['lognHs'].values\n except:\n p.ow = True\n if p.ow == True:\n print('Re-calculating PDF')\n i = 0\n poly1 = 0\n N_cells = 0\n \n for i_lognH in range(len(lognH_bins)-1):\n for i_lognSFR in range(len(lognSFR_bins)-1):\n \n df_cut = df[(df.nH >= 10**(lognH_bins[i_lognH])) & \\\n (df.nH < 10**(lognH_bins[i_lognH+1]))].reset_index(drop=True)\n if i_lognSFR > 0:\n # (for the first bin in nSFR, doesn't matter if cell has no nSFR)\n df_cut = df_cut[(df_cut.SFR_density >= 10**(lognSFR_bins[i_lognSFR])) & \\\n (df_cut.SFR_density < 10**(lognSFR_bins[i_lognSFR+1]))].reset_index(drop=True)\n N_cells += len(df_cut)\n lognH_mean, lognSFR = lognH_bins_c[i_lognH], lognSFR_bins_c[i_lognSFR]\n \n if '_arepoPDF' in p.table_ext:\n # print(lognH_mean,lognSFR,len(df_cut))\n if (lognH_bins[i_lognH] >= fit_lognH_bins[0]):\n print(lognH_bins[i_lognH],len(df_cut))\n i_fit_lognH_bins = np.argmin(np.abs(fit_lognH_bins_c - lognH_mean))\n i_fit_lognSFR_bins = np.argmin(np.abs(fit_nSFR_bins_c - lognSFR))\n fit_params_1 = fit_params[i_fit_lognH_bins,i_fit_lognSFR_bins,:]\n print(lognH_mean,lognSFR,fit_params_1)\n \n if np.sum(fit_params_1) != 0:\n PDF_integrated = 10.**aux.parametric_PDF(lognHs,lognH_mean,fit_params_1[1],fit_params_1[2])\n if fit_params_1[2] == -1.5:\n PDF_integrated = 10.**aux.parametric_PDF(lognHs,fit_params_1[0],fit_params_1[1],fit_params_1[2])\n poly1 += 1\n \n if np.sum(fit_params_1) == 0:\n print('uhoh',lognH_mean,lognSFR)\n PDF_integrated = aux.lognormal_PDF(10.**lognHs,10.**lognH_mean,Mach=1)\n \n if (lognH_mean < fit_lognH_bins[0]):\n PDF_integrated = aux.lognormal_PDF(10.**lognHs,10.**lognH_mean,Mach=10)\n PDF_integrated[np.isnan(PDF_integrated)] = 0\n if (lognH_mean < -4):\n PDF_integrated = aux.lognormal_PDF(10.**lognHs,10.**lognH_mean,Mach=1)\n PDF_integrated[np.isnan(PDF_integrated)] = 0\n \n if p.table_ext == '_M10':\n PDF_integrated = aux.lognormal_PDF(10.**lognHs,10.**lognH_mean,Mach=10)\n PDF_integrated[np.isnan(PDF_integrated)] = 0\n \n # Add to total PDF, weigthed by the mass of that cell\n total_PDF += PDF_integrated * np.sum(df_cut.m)/np.sum(df.m)\n if not p.add: ax.plot(10.**lognHs,PDF_integrated * np.sum(df_cut.m)/np.sum(df.m),color='grey',lw=1,alpha=0.3)\n if np.isnan(np.sum(total_PDF)):\n print(np.sum(df_cut.m)/np.sum(df.m),PDF_integrated)\n pdb.set_trace()\n i += 1\n # if i == 10: pdb.set_trace()\n \n print('Total number of cells processed: ',N_cells)\n print('Total number of bins: ',bins**2)\n print('Number of bins with parametric PDFs: %i' % (poly1))\n total_PDF = total_PDF / np.sum(total_PDF)\n PDF = pd.DataFrame({'lognHs':lognHs,'total_PDF':total_PDF})\n PDF.to_pickle(p.d_XL_data + 'data/cell_data/PDFs/%s%s_%s%s_%s' % (p.sim_name,p.sim_run,gal_ob.name,p.table_ext,p.res))\n\n print('TEST!!!')\n total_PDF = total_PDF[(lognHs >= -4) & (lognHs <= 7)]\n lognHs = lognHs[(lognHs >= -4) & (lognHs <= 7)]\n total_PDF = total_PDF / np.sum(total_PDF)\n if not p.add:\n # First figure: One panel of individual binned PDFs and one panel of total PDF\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel(getlabel('lnH'))\n ax.set_ylabel('dM/dlognH')\n ax.set_ylim([1e-12,1e-1])\n ax.set_xlim([1e-4,1e7])\n \n ax2 = fig.add_subplot(1,2,2)\n ax2.plot(10.**lognHs,total_PDF)\n ax2.set_xscale('log')\n ax2.set_yscale('log')\n ax2.set_xlabel(getlabel('lnH'))\n ax2.set_ylabel('dM/dlognH')\n ax2.set_ylim([1e-4,1e-1])\n ax2.set_xlim([1e-4,1e5])\n \n if not os.path.isdir(p.d_plot + 'cell_data/PDFs/'): os.mkdir(p.d_plot + 'cell_data/PDFs/') \n plt.savefig(p.d_plot + 'cell_data/PDFs/PDF_%s%s_%s.png' % (gal_ob.name,p.table_ext,p.res), format='png', dpi=250, facecolor='w')\n\n labels = {'_M10':'Mach = 10','_arepoPDF_M51':'AREPO-M51 parametrized PDF','_arepoPDF_CMZ':'AREPO-CMZ parametrized PDF'}\n\n # New figure: One panel of PDF and cumulative mass function (optional)\n if p.add:\n ax1 = p.ax#plt.gca()\n else:\n fig,ax1 = plt.subplots(figsize=(8,6))\n ax1.plot(lognHs,total_PDF,ls=p.ls,lw=2.5,color=p.color,label=labels[p.table_ext])\n ax1.set_yscale('log')\n if not p.add:\n ax1.set_xlabel('log nH [cm$^{-3}$]')\n ax1.set_ylabel('Mass fraction per bin')\n ax1.set_xlim([-4,7])\n ax1.set_ylim([1e-4,1e-1])\n ax1.grid(axis='x')\n #if p.add: ax1.legend()\n if not p.add:\n ax2 = ax1.twinx()\n ax2.plot(lognHs,np.cumsum(total_PDF),'--')\n ax2.grid(axis='y')\n ax2.set_ylim([0,1])\n ax2.set_ylabel('Cumulative mass fraction')\n ax2.text(0.4,0.1,'Mass fraction at nH > 1e3: %.1f %%' % (100*np.sum(total_PDF[lognHs >= 3])),\\\n transform=ax1.transAxes,fontsize=15,bbox=dict(facecolor='white', alpha=0.7))\n if not os.path.isdir(p.d_plot + 'cell_data/PDFs'): os.mkdir(p.d_plot + 'cell_data/PDFs') \n if not p.add: plt.savefig(p.d_plot + 'cell_data/PDFs/simple_PDF_%s%s_%s.png' % (gal_ob.name,p.table_ext,p.res), format='png', dpi=250, facecolor='w')\n\n # pdb.set_trace()", "def save_plot_for_figure(figure, file_name, path=None):\n file_extension = '.pdf'\n file_name += file_extension\n if path is not None:\n file_name = os.path.join(path, file_name)\n figure.savefig(file_name, dpi=300, bbox_inches='tight', transparent=True)" ]
[ "0.6579656", "0.64710206", "0.646248", "0.6411696", "0.624837", "0.6235697", "0.62115884", "0.6149735", "0.61176234", "0.6107436", "0.60888684", "0.59961104", "0.5978463", "0.59609264", "0.5945123", "0.5909598", "0.5829063", "0.5823828", "0.581717", "0.58098584", "0.580651", "0.57927084", "0.5772444", "0.5760726", "0.5722316", "0.5713716", "0.5688295", "0.56834435", "0.5682376", "0.5664562" ]
0.6693798
0
Save profile histograms per timestamp to PDF files
def plotPerTimeStamp(options): name = options['name'] + '_' + options['scan'] + '_perTime' if options['extra']: name += '_' + options['extra'] f = openRootFileR(options['name']+'_perTime') histname = plotName(name, timestamp=False) filename = plotName(name, timestamp=True) filepath = plotPath(name, timestamp=True) print '<<< Save plot:', filepath hist = f.Get(histname) hist.SetErrorOption(options['error']) if options['big']: canvas = TCanvas('c', '', 8000, 1200) else: canvas = TCanvas('c', '', 1400, 500) canvas.SetLogy(options['logy']) gStyle.SetOptStat(options['optstat']) hist.Draw() gPad.Update() hist.GetXaxis().SetTimeDisplay(1) hist.GetXaxis().SetTimeFormat('#splitline{%d.%m.%y}{%H:%M:%S}%F1969-12-31' \ +' 22:00:00') hist.GetXaxis().SetLabelOffset(0.03) hist.GetXaxis().SetTitle('') if 'xmin' in options and 'xmax' in options: hist.GetXaxis().SetRangeUser(options['xmin'], options['xmax']) hist.GetYaxis().SetTitle(options['ytitle']) hist.GetYaxis().SetTitleOffset(1.2) for axis in [hist.GetXaxis(), hist.GetYaxis()]: axis.SetTitleFont(133) axis.SetTitleSize(16) axis.SetLabelFont(133) axis.SetLabelSize(12) axis.CenterTitle() if options['big']: axis.SetTickLength(0.01) if options['big']: hist.GetYaxis().SetTitleOffset(0.25) drawSignature(filename) gPad.Modified() gPad.Update() if options['retrn']: return [canvas, hist, f] else: canvas.Print(filepath) canvas.Close() closeRootFile(f, options['name']+'_perTime')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeProfile(fname,prof):\n t = np.linspace(0,1,prof.shape[0],endpoint=False)\n fh = open(fname,'w')\n for x in range(prof.shape[0]):\n fh.write('%.7e %.7e\\n' % (t[x],prof[x]))\n fh.close()", "def save_histogram(\n self, filename: [str, Path, BinaryIO], bins: int = 10, **kwargs\n ) -> None:\n self.plot_histogram(bins, show=False)\n plt.savefig(filename, **kwargs)\n if not isinstance(filename, BytesIO):\n print(f\"Picket fence histogram saved to: {osp.abspath(filename)}\")", "def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)", "def save_histogram(self, step, tensors):\n\n # Save\n with self.summary_writer.as_default():\n for name, tensor in tensors.items():\n tf.summary.histogram(name, tensor, step)", "def file_histogram(self):\n def histogram():\n return {'count': 0, 'size': 0, 'date': None}\n _file_histogram = defaultdict(histogram)\n\n for s in self.subjects:\n for sa in s.samples:\n for blob in sa.blobs.values():\n time_created = str(blob['time_created'])\n date_created = datetime.fromisoformat(time_created).date().isoformat()\n _file_histogram[date_created]['count'] += 1\n _file_histogram[date_created]['date'] = date_created\n _file_histogram[date_created]['size'] += blob['size']\n return _file_histogram", "def output_files(self,positions, num_trials):\r\n output_text = open('results.txt', 'w')\r\n result = self.simulation(positions, num_trials)\r\n for pos in positions:\r\n position_value = 1000 / pos\r\n mean = np.mean(result[pos])\r\n std = np.std(result[pos])\r\n plt.hist(result[pos],100,range=[-1,1])\r\n plt.savefig(\"histogram_\"+str(pos).zfill(4)+\"_pos.pdf\")\r\n plt.close()\r\n output_text.write('For position : {0} with position Value: {1} '.format(pos,position_value))\r\n output_text.write(' The mean is: {0} The standard deviation: {1} \\n'.format(mean,std))\r\n output_text.close()", "def save_histogram(hist, name):\n plt.clf()\n plt.plot(hist, color='k')\n plt.savefig('output/' + name + '.png')", "def plotandsavepooledbargraph(p_peakf_file, roi):\n \n dictdata = genplotlib.gendict(p_peakf_file)\n dictmeans = genplotlib.genlist(dictdata)\n keylist = genplotlib.genkeylist(dictdata)\n genplotlib.plotdata(dictdata, dictmeans, keylist, 'b', ylabel='Hz', ftitle='Mean pumping ' + \n 'frequency '+roi)\n plt.savefig('pooled_dftf_freq_bar_'+roi)", "def save_hist(data, fname, title=''):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n im = ax.hist(data.flatten(),bins=100,range=[0,1])\n plt.savefig(fname, dpi=100)\n plt.close(fig)", "def histogram(histo,nbr_launch,file):\n with open(\"Results/Histogram_{}_{}.txt\".format(nbr_launch,file.strip(\".yaml\")),'w') as f:\n f.write(\"mgm results :\"+\"\\n\")\n for val,occur in histo[\"mgm\"].items():\n f.write(\"value \"+str(val)+\" : \"+str(occur[0])+\" \"+\"Initial costs : \"+str(occur[1]).strip(\"[\"+\"]\")+\"\\n\")\n f.write(\"\\n\")\n f.write(\"mcs_mgm results :\" + \"\\n\")\n for val, occur in histo[\"mcs_mgm\"].items():\n f.write(\"value \" + str(val) + \" : \" + str(occur[0])+\" \"+\"Initial costs : \"+str(occur[1]).strip(\"[\"+\"]\")+\"\\n\")\n f.write(\"\\n\")\n f.write(\"gca_mgm results :\" + \"\\n\")\n for val, occur in histo[\"gca_mgm\"].items():\n f.write(\"value \" + str(val) + \" : \" + str(occur[0])+\" \"+\"Initial costs : \"+str(occur[1]).strip(\"[\"+\"]\")+\"\\n\")", "def save_hists_to_file(hists, filen, year, trigger, top_dir, pt):\n logging.info('Saving histograms to \\'{}\\''.format(filen))\n pserver = PlotServer(filen, 'update')\n for frame in hists:\n for var in hists[frame]:\n for state in hists[frame][var]:\n pserver.store_hist(hists[frame][var][state], top_dir, year,\n trigger, pt, var, frame, state)", "def hist_save(self, d, bin1, name, no):\n\t\tfor i in range(0,no):\n\t\t\ts = d[:,i]\n\t\t\tplt.hist(s, bin1, normed=True, color='c')\t# Extracting the parameters from the histogram\n\t\t\tplt.title('Probability Distribution Fnction of %s' %name, fontsize=20)\n\t\t\tplt.xlabel(\"Filter tap values\", fontsize=20)\n\t\t\tplt.ylabel(\"Probability Distribution\", fontsize=20)\n#\t\t\tplt.xlim(0,0.10)\n\t\t\tplt.ylim(0,100)\n#\t\t\tplt.legend(fontsize = 'xx-large')\n\t\t\tplt.savefig('/home/abhishek/Results/comparison_all_sets/Curve fitting/test/set_1/hist_%s_index_%d' %(name,i))\n\t\t\tplt.close()", "def write_hist_img_file(lengths, labels):\n import matplotlib.pyplot as plt\n\n # Find the max and min values for plotting.\n max_length = max(max(i) for i in lengths)\n min_length = min(min(i) for i in lengths)\n bin_size = int(0.025*max_length)\n\n # Make histogram\n colors = ['r', 'g', 'b']\n plt.hist(\n lengths,\n bins=range(min_length, max_length+bin_size, bin_size),\n color=colors[:len(lengths)],\n label=[ntpath.basename(l) for l in labels]\n )\n plt.legend()\n plt.title('Gap Length Histogram')\n plt.xlabel('Gap Length (b)')\n plt.ylabel('Frequency')\n plt.savefig(os.getcwd() + '/gap_stats_hist.pdf')", "def plot_save_dat(counter, out_fname, img_name, xlabel, ylabel):\n with open(out_fname, 'w') as fid:\n for ele in counter.most_common():\n fid.writelines('%s %d\\n' % (ele[0], ele[1]))\n logging.info('Wrote to file: {}'.format(out_fname))\n plt.clf()\n # Histogram plot\n plt.hist(np.array(list(counter.values())), bins=100, normed=True)\n plt.xlabel(xlabel)\n plt.yscale('log')\n plt.ylabel(ylabel)\n plt.savefig(img_name)", "def WriteCurrentHists( filename='hist.root') :\n\n file = ROOT.TFile.Open( filename, 'RECREATE')\n\n for hist, samp in samples.samples.iteritems() :\n newhist = samp.hist.Clone(hist)\n file.cd()\n newhist.Write()\n\n file.Close()", "def save_hist(cls,\n history):\n\n new_hist = {}\n for key in list(history.history.keys()):\n if type(history.history[key]) == np.ndarray:\n new_hist[key] == history.history[key].tolist()\n elif type(history.history[key]) == list:\n if type(history.history[key][0]) == np.float64:\n new_hist[key] = list(map(float, history.history[key]))\n\n tmst = datetime.datetime.now().strftime('%d-%b-%Y-%H_%M_%S_%f')\n np.savetxt(fname='./saved_hist/history-' + tmst + '.csv',\n X=(np.array([new_hist[k] for k in new_hist.keys()])).transpose(),\n newline='\\n',\n delimiter=',',\n fmt='%0.8f',\n header=','.join(list(new_hist.keys())),\n comments='',\n encoding='utf-8')\n return", "def PlotProfile():\n (metadata, data) = Parse('/tmp/sdcard-scalability.txt')\n gp = Gnuplot.Gnuplot(persist=1)\n gp('set data style impulses')\n gp('set xtics 1')\n gp('set pointsize 2')\n gp.clear()\n gp.xlabel('writer process')\n gp.ylabel('duration in second')\n gp.title(metadata.AsTitle())\n\n dataset = data[0]\n x = numpy.array(dataset.time, dtype='int_')\n d = Gnuplot.Data(x, dataset.data,\n title=dataset.name,\n with_='linespoints')\n gp.replot(d)\n gp.hardcopy('/tmp/%s-%s-%f.png' %\n (metadata.name, metadata.kernel, metadata.duration),\n terminal='png')", "def saveHistogram(x,\n y1,\n y2=None,\n y3=None,\n color1=Constants.colorBluePlotly,\n color2=Constants.colorOrangePlotly,\n color3=Constants.colorGreenPlotly,\n name1=\"\",\n name2=\"\",\n name3=\"\",\n percent=False,\n xlabel=\"\",\n ylabel=\"\",\n typeyaxis=\"linear\",\n name=\"Graphe Sans Titre\",\n filename=\"untitledPlot\"):\n if x is None or y1 is None:\n print \"error : no data to draw\"\n return\n with open(filename+\".txt\", 'w') as openfile:\n openfile.write(\"name:\"+name+\"\\n\")\n openfile.write(\"xlabel:\"+xlabel+\"\\n\")\n openfile.write(\"ylabel:\"+ylabel+\"\\n\")\n openfile.write(\"typeyaxis:\"+typeyaxis+\"\\n\")\n Utils.drawArray(openfile, x, \"x\")\n Utils.drawArray(openfile, y1, \"y1\")\n openfile.write(\"name1:\"+name1+\"\\n\")\n openfile.write(\"percent:\"+str(percent)+\"\\n\")\n if y2 is not None:\n Utils.drawArray(openfile, y2, \"y2\")\n openfile.write(\"name2:\"+name2+\"\\n\")\n if y3 is not None:\n Utils.drawArray(openfile, y3, \"y3\")\n openfile.write(\"name3:\"+name3+\"\\n\")", "def fn_photonflux_hist(file_name,folder,mean_photons_per_sec):\n import numpy as np\n import matplotlib.pyplot as plt\n from scipy.stats import lognorm\n from pylab import text\n \n n_molecules=len(mean_photons_per_sec)\n \n #Plot photon flux\n figure_name=file_name+'_photonsPerSecond'\n ax = plt.subplot(111)\n num_bins = np.linspace(int(min(mean_photons_per_sec)), int(max(mean_photons_per_sec)), int(np.sqrt(len(mean_photons_per_sec))*4))\n ax.hist(mean_photons_per_sec, bins=num_bins, density=True, color='darkorange',edgecolor='black')\n \n #Fit curve\n sigma,loc,mean = lognorm.fit(mean_photons_per_sec, floc=0)\n pdf = lognorm.pdf(num_bins, sigma, loc, mean) #sigma=shape, mu=np.log(scale)\n ax.plot(num_bins, pdf, 'k',linestyle='--')\n \n #Edit plot\n plt.xlabel('Photon flux ($s^{-1}$)', fontname='Arial', fontsize=12)\n plt.ylabel('Probability density', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial', fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n text(0.75, 0.95,'μ='+str(round(mean,2))+' photons $s^{-1}$',horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n text(0.40, 0.95,'N='+str(n_molecules),horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '.png', dpi=500)\n \n return (plt.show())", "def fn_total_ontime_hist(file_name,folder,total_ontime):\n import numpy as np\n import matplotlib.pyplot as plt\n from scipy.stats import lognorm\n from pylab import text\n \n n_molecules=len(total_ontime)\n \n #Plot histogram\n figure_name=file_name+'_totalOntime'\n ax = plt.subplot(111)\n num_bins = np.linspace(int(min(total_ontime)), int(max(total_ontime)), int(np.sqrt(len(total_ontime))*3))\n ax.hist(total_ontime, bins=num_bins, density=True,color='firebrick',edgecolor='black')\n\n #Fit curve\n sigma,loc,mean = lognorm.fit(total_ontime, floc=0)\n pdf = lognorm.pdf(num_bins, sigma, loc, mean) #sigma=shape, mu=np.log(scale)\n ax.plot(num_bins, pdf, 'k',linestyle='--')\n\n #Edit plot\n plt.xlabel('Total on time (s)', fontname='Arial', fontsize=12)\n plt.ylabel('Probability density', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial',fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n text(0.75, 0.95,'μ='+str(round(mean,2))+' s',horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n text(0.40, 0.95,'N='+str(n_molecules),horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '.png', dpi=500)\n \n return (plt.show())", "def afterLoop(self):\n\t\tself.output_file.cd()\n\t\tfor histogram in self.histograms.itervalues():\n\t\t\thistogram.Write()", "def construct_figure( fig_file_folder, header, cov_values ):\n\t\n\tfig, ax = plt.subplots()\n\tax.hist( cov_values, bins=10000 )\n\tax.set_xlim( 0, 200 )\n\tax.set_xlabel( \"coverage\" )\n\tax.set_ylabel( \"number of positions\" )\n\tax.set_title( header + \" len=\" + str( len( cov_values ) ) )\n\tfig.savefig( fig_file_folder + header + \".pdf\", dpi=300 )\n\t\n\tplt.close( \"all\" )", "def plot_and_save(data, prefix, name):\n plt.figure()\n plt.hist(data)\n plt.title(name)\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n plt.savefig(prefix + name + '.png')\n plt.close()", "def writeStatsToFile( gfname, sfname, tgraph ):\n ParProbG = graphWithCutoff(gfname, 0.0)\n with open(sfname,'wb') as ofile:\n for u,v in itertools.combinations( tgraph.nodes(), 2 ):\n ofile.write(\"{0} {1}\\n\".format( ParProbG[u][v]['weight'] if ParProbG.has_edge(u,v) else 0.0, 1 if tgraph.has_edge(u,v) else 0) )", "def write_hist_text_file(lengths, labels):\n for lengths_list, label in zip(lengths, labels):\n hist_file_name = label[:label.rfind('.')] + '.all_lengths.txt'\n with open(os.getcwd() + '/' + ntpath.basename(hist_file_name), 'w') as out_file:\n out_file.write(ntpath.basename(label) + '\\n')\n for length in sorted(lengths_list):\n out_file.write(str(length) + '\\n')", "def save_hist(self, file_name):\n file_ext = file_name.split(\".\")[-1]\n assert file_ext == \"npy\", \"The file extension has to be npy (numpy file)\"\n np.save(file_name, self.hist)", "def make_and_save_histogramsX(pred_steerings, real_steerings,\n img_name = \"histogramsX.png\"):\n pred_steerings = np.array(pred_steerings)\n real_steerings = np.array(real_steerings)\n max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))\n min_h = np.minimum(np.min(pred_steerings), np.min(real_steerings))\n bins = np.linspace(min_h, max_h, num=50)\n plt.hist(pred_steerings, bins=bins, alpha=0.5, label='Predicted', color='b')\n plt.hist(real_steerings, bins=bins, alpha=0.5, label='Real', color='r')\n #plt.title('Steering angle')\n plt.legend(fontsize=10)\n plt.savefig(img_name, bbox_inches='tight')", "def generate_activity_histogram(messages, filename):\n times = range(24)\n fig, ax = plt.subplots()\n ax.hist([message.time.hour for message in messages], times, density=True)\n ax.set_xlabel(\"Time\")\n ax.set_xlim(min(times), max(times))\n ax.set_xticks(times)\n ax.set_xticklabels(f\"{item}\" for item in times)\n ax.set_ylabel(\"Messages / Total Messages\")\n ax.set_ylim(0, 0.2)\n fig.savefig(filename)\n plt.close(fig)", "def makePdf(sources):\n pdf = PdfPages(\"sample_features.pdf\")\n classnames = []\n classname_dict = {}\n x = 2 # number of subplot columns\n y = 3 # number of subplot rows\n for source in sources:\n lc = source.lcs[0]\n\n if lc.classname not in classnames:\n classnames.append(lc.classname)\n classname_dict[lc.classname] = [lc]\n else:\n classname_dict[lc.classname].append(lc)\n\n if len(classname_dict[lc.classname]) < 3:\n\n label = lc.classname + \"; ID: \" + lc.id\n # all_times histogram:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(label)\n ax.axis('off')\n\n ax1 = fig.add_subplot(321)\n ax2 = fig.add_subplot(322)\n ax2.axis('off')\n ax3 = fig.add_subplot(323)\n ax4 = fig.add_subplot(324)\n ax4.axis('off')\n ax5 = fig.add_subplot(325)\n ax6 = fig.add_subplot(326)\n ax6.axis('off')\n\n hist, bins, other = ax1.hist(lc.all_times, 50, normed=True)\n ax1.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Histogram (normed) of all $\\Delta$Ts')\n\n ax2.text(0.0, 0.9, (r'$\\bullet$med time to next obs: ' +\n str(np.round(lc.cads_med, 4))))\n ax2.text(0.0, 0.75, (r'$\\bullet$avg time to next obs: ' +\n str(np.round(lc.avgt, 4))))\n ax2.text(0.0, 0.6, (r'$\\bullet$std dev of time to next obs: ' +\n str(np.round(lc.cads_std, 4))))\n ax2.text(0.0, 0.45, (r'$\\bullet$med of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_med, 4))))\n ax2.text(0.0, 0.3, (r'$\\bullet$avg of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_avg, 4))))\n ax2.text(0.0, 0.15, (r'$\\bullet$std dev of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_std, 4))))\n\n hist, bins, other = ax3.hist(lc.cads, 50)\n ax3.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Hist of time to next obs')\n\n ax6.text(\n 0.0, 0.9, r'$\\bullet$Number of epochs: ' + str(lc.n_epochs))\n ax6.text(0.0, 0.75, (r'$\\bullet$Time b/w first & last obs (days): ' +\n str(np.round(lc.total_time, 2))))\n ax6.text(0.0, 0.6, (r'$\\bullet$Average error in mag: ' +\n str(np.round(lc.avg_err, 4))))\n ax6.text(0.0, 0.45, (r'$\\bullet$Median error in mag: ' +\n str(np.round(lc.med_err, 4))))\n ax6.text(0.0, 0.3, (r'$\\bullet$Std dev of error: ' +\n str(np.round(lc.std_err, 4))))\n ax6.text(0.0, 0.15, '')\n\n ax5.scatter(lc.epochs, lc.mags)\n\n ax4.text(0.0, 0.9, (r'$\\bullet$Avg double to single step ratio: ' +\n str(np.round(lc.avg_double_to_single_step, 3))))\n ax4.text(0.0, 0.75, (r'$\\bullet$Med double to single step: ' +\n str(np.round(lc.med_double_to_single_step, 3))))\n ax4.text(0.0, 0.6, (r'$\\bullet$Std dev of double to single step: ' +\n str(np.round(lc.std_double_to_single_step, 3))))\n ax4.text(\n 0.0, 0.45,\n (r'$\\bullet$1st peak to 2nd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_2, 3))))\n ax4.text(\n 0.0, 0.3,\n (r'$\\bullet$2ndt peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_2_to_3, 3))))\n ax4.text(\n 0.0, 0.15,\n (r'$\\bullet$1st peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_3, 3))))\n\n pdf.savefig(fig)\n\n pdf.close()\n\n pdf = PdfPages('feature_plots.pdf')\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n\n plt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n classnamenum = 0\n\n colors = ['red', 'yellow', 'green', 'blue', 'gray', 'orange', 'cyan',\n 'magenta']\n for classname, lcs in list(classname_dict.items()):\n classnamenum += 1\n print(classname, len(lcs), 'light curves.')\n attr1 = []\n attr2 = []\n attr3 = []\n attr4 = []\n attr5 = []\n attr6 = []\n attr7 = []\n attr8 = []\n for lc in lcs:\n attr1.append(lc.n_epochs)\n attr2.append(lc.avgt)\n attr3.append(lc.cads_std)\n attr4.append(lc.total_time)\n attr5.append(lc.all_times_hist_peak_val)\n attr6.append(lc.cad_probs[5000])\n attr7.append(lc.all_times_nhist_peak_1_to_3)\n attr8.append(lc.all_times_nhist_peak_val)\n\n ax2.scatter(attr1, attr2, color=colors[classnamenum], label=classname)\n ax1.scatter(attr3, attr4, color=colors[classnamenum], label=classname)\n ax2.set_xlabel('N Epochs')\n ax2.set_ylabel('Avg time to next obs')\n ax1.set_xlabel('Standard dev. of time to next obs')\n ax1.set_ylabel('Time b/w first and last obs')\n\n ax3.scatter(attr5, attr6, color=colors[classnamenum], label=classname)\n ax4.scatter(attr7, attr8, color=colors[classnamenum], label=classname)\n ax3.set_xlabel(r'All $\\Delta$T hist peak val')\n ax3.set_ylabel('Prob time to next obs <= 5000 min')\n ax4.set_xlabel(r'$\\Delta$Ts normed hist peak 1 to peak 3')\n ax4.set_ylabel(r'Peak val of all $\\Delta$Ts normed hist')\n\n #ax1.legend(bbox_to_anchor=(1.1, 1.1),prop={'size':6})\n ax2.legend(bbox_to_anchor=(1.1, 1.1), prop={'size': 6})\n #ax3.legend(loc='upper right',prop={'size':6})\n #ax4.legend(loc='upper right',prop={'size':6})\n\n pdf.savefig(fig)\n\n pdf.close()\n return 0", "def save_ttest_metrics(self, ttest_metrics, fname, no_genes=20):\n\n top_genes = self.fetch_gene_descriptions(ttest_metrics, nih_fetch_num=no_genes, printme=False)\n eids = [int(i[0]) for i in top_genes]\n myfig = self.effect_size_distr(ttest_metrics, genes_of_interest=eids[0:no_genes], return_fig=True)\n plt.savefig(fname+'.png')\n\n with open(fname+'.csv', 'wb') as csvfile:\n writer = csv.writer(csvfile)\n for i in top_genes:\n writer.writerow([i[0], i[3], i[1], i[2], i[4]])" ]
[ "0.62776744", "0.6167443", "0.61183095", "0.6048312", "0.5998362", "0.5981554", "0.5956513", "0.59421986", "0.5930612", "0.5912754", "0.5900994", "0.58697546", "0.5851763", "0.58115876", "0.57516927", "0.57259107", "0.57156265", "0.5687887", "0.568231", "0.5666851", "0.5620672", "0.5618388", "0.55856353", "0.55464387", "0.5533626", "0.5518683", "0.5495481", "0.54883194", "0.5487955", "0.54830897" ]
0.62126917
1
Camera projection mode, 0 for perspective and 1 for orthogonal.
def projection_mode(self, mode): self.ptr.projection_mode(mode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def projection_matrix(self) -> TransformationMatrixType:\n if self._projection_matrix is None:\n if self.projection_mode == Projection.TOP_DOWN:\n self._projection_matrix = self.orthographic_matrix\n else:\n self._projection_matrix = self.perspective_matrix\n\n return self._projection_matrix", "def parallel_projection(self):\n return self.camera.parallel_projection", "def projection_matrix(self):\n scene = self.figure.scene\n scene_size = tuple(scene.get_size())\n aspect_ratio = float(scene_size[0]) / float(scene_size[1])\n p = scene.camera.get_perspective_transform_matrix(\n aspect_ratio, -1, 1).to_array().astype(np.float32)\n return p", "def color_mode(self):\n return ids_core.Camera.color_mode.__get__(self)", "def perspective_matrix(self) -> TransformationMatrixType:\n z_near, z_far = self._clipping[self.projection_mode.value]\n return perspective_matrix(\n math.radians(self.fov), self.aspect_ratio, z_near, z_far\n )", "def cameraType(self):\r\n cls = mxs.classof(self._nativePointer)\r\n if cls in (mxs.FreeCamera, mxs.TargetCamera):\r\n return CameraType.Standard\r\n\r\n elif cls == mxs.Physical:\r\n return CameraType.Physical\r\n\r\n elif cls == mxs.VRayPhysicalCamera:\r\n return CameraType.Physical\r\n return 0", "def __set_perspective(self):\n\n src = np.float32([[(.42 * self.img_shape[1],.65 * self.img_shape[0] ),\n (.58 * self.img_shape[1], .65 * self.img_shape[0]),\n (0 * self.img_shape[1],self.img_shape[0]),\n (1 * self.img_shape[1], self.img_shape[0])]])\n\n dst = np.float32([[0,0],\n [self.img_shape[1],0],\n [0,self.img_shape[0]],\n [self.img_shape[1],self.img_shape[0]]])\n\n self.M = cv2.getPerspectiveTransform(src, dst)\n self.M_inv = cv2.getPerspectiveTransform(dst, src)", "def build_perspective_camera(field_of_view=60.0,\n aspect_ratio=1.0,\n near_plane=0.01,\n far_plane=1000.0,\n position=(0.0, 0.0, 5.0),\n enable_zoom=False):\n context = build_context()\n camera = context.THREE.PerspectiveCamera.new_object(field_of_view,\n aspect_ratio, near_plane,\n far_plane)\n camera.position.set(*position)\n controls = context.THREE.OrbitControls.new_object(camera)\n controls.enableZoom = enable_zoom\n return camera", "def _get_camera(self):\n rect = (self._dim[0], self._dim[2], self._dim[1] - self._dim[0],\n self._dim[3] - self._dim[2])\n flip = (False, type(self).__name__ == 'ImageObj', False)\n return scene.cameras.PanZoomCamera(rect=rect, flip=flip)", "def projection(self):\n self.projection = Projection(self)\n return self.projection", "def camera(self):\n return self.__camera", "def orthographic_matrix(self) -> TransformationMatrixType:\n near, far = self._clipping[self.projection_mode.value]\n return orthographic_matrix(self.fov, self.aspect_ratio, near, far)", "def get_projection(self):\n return self.projection", "def set_projection_from_camera(K, width, height):\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n \n fx = K[0,0]\n fy = K[1,1]\n fovy = 2*np.arctan(0.5*height/fy)*180/np.pi\n aspect = (width*fy)/(height*fx)\n\n # define the near and far clipping planes\n near = 0.1\n far = 100.0\n\n # set perspective\n # Need to apt-get install freeglut3 and freeglut3-dev\n # https://github.com/thecountoftuscany/PyTeapot-Quaternion-Euler-cube-rotation/issues/1\n gluPerspective(fovy,aspect,near,far)\n glViewport(0,0,width,height)", "def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!", "def setup_camera(self) -> None:\n self.world.camera.update(\n cam_base_pos=(0, -3, 0),\n cam_dist=1.2*self.world.env_dim,\n cam_yaw=0,\n cam_pitch=-60\n )", "def __init__(self, at=(0, 0, 0), eye=(0, 0, -0.1), lens=None,\r\n is_3d=True, scale=1.0):\r\n super(Camera, self).__init__()\r\n\r\n self.at = at\r\n self.start_eye = eye # for reset with different lens settings\r\n self.eye = [eye[0], eye[1], eye[2]]\r\n if lens == None:\r\n from pi3d.Display import Display\r\n lens = [Display.INSTANCE.near, Display.INSTANCE.far, Display.INSTANCE.fov,\r\n Display.INSTANCE.width / float(Display.INSTANCE.height)]\r\n self.lens = lens\r\n self.view = _LookAtMatrix(at, eye, [0, 1, 0])\r\n if is_3d:\r\n self.projection = _ProjectionMatrix(lens[0], lens[1], lens[2] / scale, lens[3])\r\n else:\r\n self.projection = _OrthographicMatrix(scale=scale)\r\n self.model_view = dot(self.view, self.projection)\r\n # Apply transform/rotation first, then shift into perspective space.\r\n self.mtrx = array(self.model_view, copy=True)\r\n # self.L_reflect = _LookAtMatrix(at,eye,[0,1,0],reflect=True)\r\n self.rtn = [0.0, 0.0, 0.0]\r\n\r\n self.was_moved = True", "def _get_camera(self, mode):\n cam_bp = self.blueprint_lib.find(f\"sensor.camera.{mode}\")\n cam_bp.set_attribute(\"image_size_x\", f\"{self.img_x}\")\n cam_bp.set_attribute(\"image_size_y\", f\"{self.img_y}\")\n cam_bp.set_attribute(\"fov\", f\"{self.img_fov}\")\n cam = self.world.spawn_actor(cam_bp, self.transform, attach_to=self.vehicle) # spawing isn't expected to fail\n \n return cam", "def camera(self):\n return self._camera", "def camera(self):\n return self._camera", "def computeMVP(self):\n projMat = self.converterYUR\n modelViewMat = self.transforMat.invertCompose(\n Globals.render.getTransform(self.cameraNode)).getMat()\n return UnalignedLMatrix4f(modelViewMat * projMat)", "def world_to_camera(self, X):\n raise NotImplementedError", "def enable_parallel_projection(self):\n # Fix the 'reset camera' effect produced by the VTK when parallel\n # projection is enabled.\n angle = np.radians(self.camera.view_angle)\n self.camera.parallel_scale = self.camera.distance * np.sin(0.5 * angle)\n\n self.camera.enable_parallel_projection()\n self.Modified()", "def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result", "def polarCameraToCartesian(self):\n x = self.cameraPolar[0] * np.sin(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n y = self.cameraPolar[0] * np.cos(self.cameraPolar[2] * np.pi / 180)\n z = self.cameraPolar[0] * np.cos(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n self.cameraPosition = [x, y, z]", "def projection(self):\n return self._map_projection", "def projection(self):\n return self._map_projection", "def world_projection(self, aspect):\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n if aspect < 1:\n gluOrtho2D(\n -self.scale,\n +self.scale,\n -self.scale / aspect,\n +self.scale / aspect)\n else:\n gluOrtho2D(\n -self.scale * aspect,\n +self.scale * aspect,\n -self.scale,\n +self.scale)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(\n self.x, self.y, +1.0,\n self.x, self.y, -1.0,\n sin(self.angle), cos(self.angle), 0.0)", "def setMode(self, mode):\n if mode == 0 or mode == 1:\n with self.lock:\n self.mode = mode\n else:\n raise FliError(\"FLISetCameraMode failed\")", "def camera(*args, aspectRatio: Union[float, bool]=0.0, cameraScale: Union[float, bool]=0.0,\n centerOfInterest: Union[float, bool]=0.0, clippingPlanes: bool=True, depthOfField:\n bool=True, displayFieldChart: bool=True, displayFilmGate: bool=True,\n displayFilmOrigin: bool=True, displayFilmPivot: bool=True, displayGateMask:\n bool=True, displayResolution: bool=True, displaySafeAction: bool=True,\n displaySafeTitle: bool=True, fStop: Union[float, bool]=0.0, farClipPlane:\n Union[float, bool]=0.0, farFocusDistance: Union[float, bool]=0.0, filmFit:\n Union[AnyStr, bool]=\"\", filmFitOffset: Union[float, bool]=0.0, filmRollOrder:\n Union[AnyStr, bool]=\"\", filmRollValue: Union[float, bool]=0.0, filmTranslateH:\n Union[float, bool]=0.0, filmTranslateV: Union[float, bool]=0.0, focalLength:\n Union[float, bool]=0.0, focusDistance: Union[float, bool]=0.0, homeCommand:\n Union[AnyStr, bool]=\"\", horizontalFieldOfView: Union[float, bool]=0.0,\n horizontalFilmAperture: Union[float, bool]=0.0, horizontalFilmOffset: Union[float,\n bool]=0.0, horizontalPan: Union[float, bool]=0.0, horizontalRollPivot: Union[float,\n bool]=0.0, horizontalShake: Union[float, bool]=0.0, journalCommand: bool=True,\n lensSqueezeRatio: Union[float, bool]=0.0, lockTransform: bool=True, motionBlur:\n bool=True, name: Union[AnyStr, bool]=\"\", nearClipPlane: Union[float, bool]=0.0,\n nearFocusDistance: Union[float, bool]=0.0, orthographic: bool=True,\n orthographicWidth: Union[float, bool]=0.0, overscan: Union[float, bool]=0.0,\n panZoomEnabled: bool=True, position: Union[List[float, float, float], bool]=None,\n postScale: Union[float, bool]=0.0, preScale: Union[float, bool]=0.0, renderPanZoom:\n bool=True, rotation: Union[List[float, float, float], bool]=None, shakeEnabled:\n bool=True, shakeOverscan: Union[float, bool]=0.0, shakeOverscanEnabled: bool=True,\n shutterAngle: Union[float, bool]=0.0, startupCamera: bool=True,\n stereoHorizontalImageTranslate: Union[float, bool]=0.0,\n stereoHorizontalImageTranslateEnabled: bool=True, verticalFieldOfView: Union[float,\n bool]=0.0, verticalFilmAperture: Union[float, bool]=0.0, verticalFilmOffset:\n Union[float, bool]=0.0, verticalLock: bool=True, verticalPan: Union[float, bool]=0.0,\n verticalRollPivot: Union[float, bool]=0.0, verticalShake: Union[float, bool]=0.0,\n worldCenterOfInterest: Union[List[float, float, float], bool]=None, worldUp:\n Union[List[float, float, float], bool]=None, zoom: Union[float, bool]=0.0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass" ]
[ "0.6761293", "0.644626", "0.6436744", "0.61794835", "0.6093503", "0.6069873", "0.60338855", "0.60327417", "0.59998", "0.5971919", "0.59537846", "0.593361", "0.59260374", "0.5902397", "0.5899235", "0.58973503", "0.58923215", "0.5890294", "0.5875006", "0.5875006", "0.5871123", "0.58607197", "0.5850966", "0.58503276", "0.581656", "0.5814369", "0.5814369", "0.5813135", "0.57697684", "0.5769567" ]
0.75059366
0
A FuzzyDateField can be assigned a value without anything blowing up
def test_fuzzy_date_field(self): fdf = FuzzyDateField(FuzzyDate(10, 2013))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fuzzy_date_field_exception(self):\n with self.assertRaises(ValueError):\n fdf = FuzzyDateField(FuzzyDate(59, 2013))", "def test_fuzzy_date_input(self):\n fdi = FuzzyDateInput(required=False)\n\n tests = (\n ([None, None], None),\n ([], None),\n ([9, 2013], FuzzyDate(9,2013)),\n )\n for t in tests:\n comp = fdi.compress(t[0])\n self.assertEqual(comp, t[1])", "def test_fuzzy_date_widget(self):\n fdw = FuzzyDateWidget(attrs={'required' : False})\n tests = (\n (None, [None, None]),\n ('', [None, None]),\n (FuzzyDate(9, 2013), [9, 2013]),\n )\n for t in tests:\n decomp = fdw.decompress(t[0])\n self.assertEqual(decomp, t[1])", "def _check_value(value, field):\n if not value:\n return False\n\n if field.get('date', False):\n # Get date format\n date_format = field.get('date_format', False) or json_pattern_part.get('date_format', False) or self.master_json_pattern.get('date_format', False)\n if date_format:\n value = datetime.strptime(value, date_format)\n\n if field.get('name'):\n field_name = field.get('name')\n # Get the type of the column and cast if necessary\n field_type = model_obj._columns[field_name]._type\n if field_type == 'integer':\n try:\n value = int(value)\n except (TypeError, ValueError), e:\n logger.warning(\"Cannot convert value of integer field to int : %s for field %s\" % (value, field_name))\n logger.warning(e)\n logger.warn(\"Cannot convert value of integer field to int : %s for field %s\" % (value, field_name))\n elif field_type == 'float':\n try:\n value = float(value)\n except (TypeError, ValueError), e:\n logger.warning(\"Cannot convert value of float field to float : %s for field %s\" % (value, field_name))\n logger.warning(e)\n logger.warn(\"Cannot convert value of float field to float : %s for field %s\" % (value, field_name))\n return value", "def test_date_field():", "def test_unique_for_date_with_nullable_date(self):\n FlexibleDatePost.objects.create(\n title=\"Django 1.0 is released\",\n slug=\"Django 1.0\",\n subtitle=\"Finally\",\n posted=datetime.date(2008, 9, 3),\n )\n p = FlexibleDatePost(title=\"Django 1.0 is released\")\n p.full_clean()\n\n p = FlexibleDatePost(slug=\"Django 1.0\")\n p.full_clean()\n\n p = FlexibleDatePost(subtitle=\"Finally\")\n p.full_clean()", "def date_temporal_paradox_free(self):\n valid_date = True\n new_val = self.date_edit.text()\n datetime_object = datetime.strptime(new_val, \"%Y-%m-%d\")\n\n if datetime_object > datetime.now():\n valid_date = False\n return valid_date", "def dateFieldValidator(field):\n if not (field[\"type\"] == \"datetime\" or field[\"type\"] == \"date\"):\n raise ValueError(\"DateFieldValidator error: field type \" + field[\"type\"])\n if \"format\" in field:\n format_string = field[\"format\"]\n # The following is borrowed from datapackage.py...\n\n # Order of the replacements is important since month and minutes\n # can be denoted in a similar fashion\n replacement_order = [('hh', '%m'), (':mm', ':%M'), ('ss', '%S'),\n ('yyyy', '%Y'), ('yy', '%y'), ('mm', '%m'),\n ('dd', '%d')]\n\n # For each replacement we substitute (and ignore the case)\n for (old, new) in replacement_order:\n format_string = re.sub(\"(?i)%s\" % old, new, format_string)\n if field[\"type\"] == \"datetime\":\n return lambda x: datetime.datetime.strptime(x, format_string)\n else:\n return lambda x: datetime.datetime.strptime(x, format_string).date()\n else:\n if field[\"type\"] == \"datetime\":\n return lambda x: datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S%Z')\n else:\n return lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date()", "def validate_future(value: date):\n if value < date.today():\n err = f\"{value} est déjà passé\"\n raise ValidationError(err)", "def test_validate_date_entry_returns_correct_outOfBounds_if_future(self):\n date_string = \"3018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_text = \"dates in the future are not permitted\"\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)", "def autocast(o_key, value):\n if not isinstance(o_key, (str, unicode)):\n return value\n key, _ = self.attr_name_map[object_class].get(o_key, (o_key, None))\n # handle dates\n if (\"date\" in key and \"relative\" not in key) or \\\n key in [\"end_date\", \"start_date\"]:\n if isinstance(value, datetime.date):\n return value\n try:\n month, day, year = [int(part) for part in value.split(\"/\")]\n return datetime.date(year, month, day)\n except Exception:\n raise BadQueryException(\"Field \\\"{}\\\" expects a MM/DD/YYYY date\"\n .format(o_key))\n # fallback\n return value", "def validate_date(value):\n if date_regex.fullmatch(value):\n return True\n else:\n return False", "def temp_validator(cls, value, field):\n if value == \"U\":\n LOGGER.warning(\"{field.name} value is 'U'. Setting to None.\")\n return None\n return value", "def test_datetime_field():", "def _validate_on_value(self, value: Any) -> None:\n if not self._is_nullable and value is None:\n msg = \"Non-nullable field cannot have None value\"\n if self._resolve_field_name() is not None:\n msg += f\" (field name = '{self._resolve_field_name()}')\"\n raise FieldValueValidationError(msg)", "def clean_date(self):\n input_day = self.cleaned_data.get('day')\n input_date = self.cleaned_data.get('date')\n if input_date < datetime.date.today():\n raise forms.ValidationError(\"Can not create a lesson in the past.\")\n elif input_date.strftime(\"%A\").lower() != input_day:\n raise forms.ValidationError(input_date.strftime(\"%d-%m-%Y\")+\" does not fall on a \"+input_day.title()+\".\")\n return input_date", "def test_bounded_python(self):\n class main_class(HasStrictTraits):\n value = Bounded(date(2007,12, 18),\n date(2003,12, 18),\n date(2010,12, 18))\n\n instance = main_class()\n self.assertEqual(instance.value, date(2007,12, 18))\n with self.assertRaises(TraitError):\n instance.value = 0.2\n\n instance.value = date(2008,12, 18)\n self.assertEqual(instance.value, date(2008,12, 18))", "def _validate(self):\n _models = {'hrrr', 'hrrrak', 'rap'}\n _fields = {'prs', 'sfc', 'nat', 'subh'}\n \n self.date = pd.to_datetime(self.date)\n \n if self.model == 'alaska':\n self.model == 'hrrrak'\n\n assert self.fxx in range(49), \"Forecast lead time `fxx` is too large\"\n assert self.model in _models, f\"`model` must be one of {_models}\"\n if self.model in ['hrrr', 'hrrrak']:\n assert self.field in _fields, f\"`field must be one of {_fields}\"\n else:\n # field is not needed for RAP model.\n self.field = ''\n \n if isinstance(self.priority, str):\n self.priority = [self.priority]\n \n self.priority = [i.lower() for i in self.priority]\n\n # Don't look for data from NOMADS if requested date is earlier\n # than yesterday. NOMADS doesn't keep data that old.\n if 'nomads' in self.priority:\n yesterday = datetime.utcnow() - timedelta(hours=24)\n yesterday = pd.to_datetime(f\"{yesterday:%Y-%m-%d}\")\n if self.date < yesterday:\n self.priority.remove('nomads')", "def validate_date_field(self, field: dict, value: str):\n if field.get(\"required\") and value.strip() == \"\":\n return f\"{field.get('label')} is required!\"\n\n try:\n datetime.datetime.strptime(value, self.config.get(\"date_format\"))\n except ValueError:\n return f\"{field.get('label')} should be a date with the format provided in \" \\\n f\"config {self.config.get('date_format')}\"\n\n return \"\"", "def test_form_date_validation(self):\n\n form = My_add_data_form(data={'date': date(1800, 05, 03)})\n self.assertEqual(form.errors['date'], ['You already dead now'])\n form = My_add_data_form(data={'date': date(2200, 05, 03)})\n self.assertEqual(form.errors['date'], ['You not born yet'])", "def test_date_of_birth_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_date_of_birth(input_val)\n self.assertEqual(output_val, self.line.date_of_birth)", "def convert_values(self, value, field):\n if value is None:\n return None\n if field and field.get_internal_type() == 'DateTimeField':\n if isinstance(value, string_types) and value:\n value = parse_datetime(value)\n return value\n elif field and field.get_internal_type() == 'DateField':\n if isinstance(value, datetime.datetime):\n value = value.date() # extract date\n elif isinstance(value, string_types):\n value = parse_date(value)\n elif field and field.get_internal_type() == 'TimeField':\n if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):\n value = value.time() # extract time\n elif isinstance(value, string_types):\n # If the value is a string, parse it using parse_time.\n value = parse_time(value)\n # Some cases (for example when select_related() is used) aren't\n # caught by the DateField case above and date fields arrive from\n # the DB as datetime instances.\n # Implement a workaround stealing the idea from the Oracle\n # backend. It's not perfect so the same warning applies (i.e. if a\n # query results in valid date+time values with the time part set\n # to midnight, this workaround can surprise us by converting them\n # to the datetime.date Python type).\n elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:\n value = value.date()\n # Force floats to the correct type\n elif value is not None and field and field.get_internal_type() == 'FloatField':\n value = float(value)\n return value", "def test_date_of_birth_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_date_of_birth(val))", "def date(self, value):\n self.date_value = value", "def init_value(self, value, strict: bool = True):\n if isinstance(value, str):\n value = datetime.datetime.fromisoformat(value)\n elif isinstance(value, float):\n value = datetime.datetime.fromtimestamp(value)\n return super().init_value(value, strict)", "def test_patient_one_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, datetime.date(2000, 2, 13))", "def form_DateDifferentEmpty(request):\n schema = schemaish.Structure()\n schema.add('myDateField', schemaish.Date())\n form = formish.Form(schema, 'form')\n form['myDateField'].widget = formish.Input(empty=datetime.date.today())\n return form", "def val_future_time(value):\n today = timezone.now()\n if value < today:\n raise ValidationError('Datetime should be a future Date and time')", "def clean_date(self):\r\n from datetime import datetime\r\n\r\n date = self.cleaned_data[\"date\"]\r\n if date < datetime.now():\r\n self.add_error(\"date\", \"You cannot add a date for the past.\")\r\n return date", "def date_of_birth(self, value: str) -> None:\n self._date_of_birth = datetime.strptime(value, '%Y-%m-%d')" ]
[ "0.7273849", "0.7094691", "0.64702415", "0.5972673", "0.59609586", "0.5846607", "0.5500081", "0.54843104", "0.5471241", "0.5446694", "0.5375746", "0.528465", "0.5279556", "0.5270848", "0.5266763", "0.5193815", "0.5167447", "0.5148224", "0.51431924", "0.5132247", "0.51303554", "0.5112779", "0.5094594", "0.50792736", "0.50695074", "0.5064209", "0.50402415", "0.50326675", "0.5032242", "0.503163" ]
0.8278529
0
Find a particular attribute in a device retrieved from database. Return the attribute, if found, 'None' otherwise
def find_attribute(orm_device, attr_name, attr_type): for template_id in orm_device['attrs']: for attr in orm_device['attrs'][template_id]: if (attr['label'] == attr_name) and (attr['type'] == attr_type): LOGGER.debug(f" retrieving attribute {attr}") return attr return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_attr(attributes, name):\n try:\n return attributes.getValue(name)\n except KeyError:\n return None", "def getAttribute(self, username, attribute):\n if username in self.contents:\n if attribute in self.contents[username]:\n return self.contents[username][attribute]\n else:\n self.log.warning(\"Attribute '{}' not found for user '{}'\".format(attribute, username))\n return None\n else:\n self.log.warning(\"User '{}' not found\".format(username))", "def get_attr(name):\n userDoc = get_user()\n _idx = userDoc.index.get(name, None)\n\n if _idx is not None:\n return userDoc.attributes[_idx]\n else:\n return None", "def get(self, attr):\n try:\n return getattr(self, attr)\n except:\n print(\"%s is not an attribute of this instance\" % attr)\n return None", "def retrieve_device_color(self, device_id, attribute):\n if device_id is None:\n self.log_error(MongoDatabase.retrieve_device_color.__name__ + \"Unexpected empty object: device_id\")\n return None\n if attribute is None:\n self.log_error(MongoDatabase.retrieve_device_color.__name__ + \"Unexpected empty object: attribute\")\n return None\n\n try:\n device = self.devices_collection.find_one({\"device_id\": device_id})\n if device is not None:\n if \"colors\" in device:\n colors = device[\"colors\"]\n if attribute in colors:\n return colors[attribute]\n except:\n traceback.print_exc(file=sys.stdout)\n self.log_error(sys.exc_info()[0])\n return None", "def find_element(self, attrib_key, attrib_value, match_option=None):\n selector = UiSelector()\n selector.attributes(attrib_key, attrib_value, match_option)\n return UiObject(selector, self.android_device_driver) if UiObject(\n selector, self.android_device_driver).verify_exist() else None", "def try_to_get_attribute(self, entry, *args):\n try:\n result = entry\n for arg in args:\n result = result[arg]\n return result\n except AttributeError:\n return None", "def get_attr(self, location, attr, default=None):\r\n return self.get_attrs(location).get(attr, default)", "def map_get(node, path):\n if path not in find_map:\n raise errors.RadistPathError(\"Invalid attribute specification\", path)\n attr = map_get_unsafe(node, path)\n if attr is not None:\n return attr\n else:\n raise errors.RadistPathError(\"Attribute isn't defined\", path)", "def get_attribute_by_name(attributes, attributeName):\n for attrib in attributes:\n if attrib['name'] == attributeName:\n return attrib\n return None", "def find_info( attr, kw, metadata, default='' ):\n str_attr = str(attr)\n return kw.get( str_attr, metadata.get( str_attr, default ) )", "def __getattr__(self, attr):\r\n\t\tif (attr in ['firmware', 'vfull', 'ifull', 'lifetime']):\r\n\t\t\treturn self.issue_command (command_id=attr, ch=None, operator='?', n_lines_requested=1)[0][0]", "def getattribute(objeto, name: str):\r\n # Get internal dict value matching name.\r\n value = objeto.__dict__.get(name)\r\n if not value:\r\n # Raise AttributeError if attribute value not found.\r\n return None\r\n # Return attribute value.\r\n return value", "def get_attr(self, server, attribute):\n\t\tattribute = str(attribute)\n\t\tcfg = self.get_cfg(server)\n\t\tif cfg:\n\t\t\treturn cfg.get(attribute)", "def GetCharacterAttribute(self, characterId, attr):\n for charEntry in ClientAPI.GetCharacterEntries():\n if charEntry.CharacterId == characterId:\n return charEntry[attr]\n return None", "def __getitem__(self, key):\n if isinstance(key, int):\n return self.attributes[key]\n\n assert isinstance(key, basestring)\n attr = dwarf_attribute.name_to_value[key]\n for c in self.attributes:\n if c.attr == attr:\n return c\n raise KeyError('No such attribute: {}'.format(attr))", "def _get_attr(self, attr, root=None):\n with self._h5file('r') as h5file:\n if root is None:\n obj = h5file\n else:\n obj = h5file[root]\n return get_decoded(obj.attrs, attr)[attr]", "def __h5_attr(self, attr_name, ds_name):\n if ds_name is not None:\n dset = self.fid['/PRODUCT/{}'.format(ds_name)]\n if attr_name not in dset.attrs.keys():\n return None\n\n attr = dset.attrs[attr_name]\n else:\n if attr_name not in self.fid.attrs:\n return None\n\n attr = self.fid.attrs[attr_name]\n\n if isinstance(attr, bytes):\n return attr.decode('ascii')\n\n return attr", "def get_attribute(self, attr):\n logger.debug(\"GET ATTRIBUTE {}\".format(attr))", "def attributes_get(self, attr_name):\n if not self.sqs_attr:\n return None\n\n if attr_name not in self.sqs_attr:\n return None\n\n return self.sqs_attr[attr_name]", "def get_attribute(self, attribute):\r\n return self.connection.get_instance_attribute(self.id, attribute)", "def find(self, line):\n return self._extract_by_key(line, self._attr_key)", "def get_attribute_from_attid(self, attid):\n if len(self.hash_oid_name.keys()) == 0:\n self._populate_oid_attid()\n if self.get_oid_from_attid(attid) in self.hash_oid_name:\n return self.hash_oid_name[self.get_oid_from_attid(attid)]\n else:\n return None", "def get_attribute(self, selector, attribute):\n el = self.locate_element(selector)\n return el.get_attribute(attribute)", "def getAttribute(self, element_tuple, attribute):\n result = self.CORE.find_element(*self.format_element(element_tuple)).get_attribute(attribute)\n self.log_info(f\"Browser.getAttribute: {attribute} attribute of {element_tuple} is {result}\")\n return result", "def find_cms_attribute(attrs, name):\n for attr in attrs:\n if attr['type'].native == name:\n return attr['values']\n raise KeyError(f'Unable to locate attribute {name}.')", "def getAttributeByName(self, name):\n\n for eachAttribute in self._attributes:\n if eachAttribute.getName() == name:\n return eachAttribute\n\n return None", "def GetAttribute(self, attr):\n return self._attrs[attr]", "def get_item_attrib(item, attrib):\n\n item_type = item.type\n\n if item_type == dtf.core.item.TYPE_MODULE:\n table = \"modules\"\n elif item_type == dtf.core.item.TYPE_LIBRARY:\n table = \"libraries\"\n elif item_type == dtf.core.item.TYPE_BINARY:\n table = \"binaries\"\n elif item_type == dtf.core.item.TYPE_PACKAGE:\n table = \"packages\"\n else:\n log.e(TAG, \"Unknown type '%s' in getItem Attribute. Returning\"\n % item_type)\n return None\n\n dtf_db = sqlite3.connect(DTF_DB)\n\n cur = dtf_db.cursor()\n\n sql = (\"SELECT %s \"\n \"FROM %s \"\n \"WHERE name='%s' \"\n 'LIMIT 1' % (attrib, table, item.name))\n\n cur.execute(sql)\n\n try:\n return cur.fetchone()[0]\n except TypeError:\n return None", "def get_attr(self, attr_name, ds_name=None):\n if self.science_product:\n return self.__nc_attr(attr_name, ds_name)\n\n return self.__h5_attr(attr_name, ds_name)" ]
[ "0.6901232", "0.67547685", "0.66523165", "0.65910554", "0.65739346", "0.64429885", "0.64263874", "0.641367", "0.6385312", "0.6384112", "0.63745815", "0.6365594", "0.6340235", "0.6321027", "0.6303907", "0.62828785", "0.62793463", "0.62613136", "0.6258598", "0.62290764", "0.6216731", "0.6178566", "0.61734676", "0.613143", "0.6130407", "0.61247295", "0.6114336", "0.6111508", "0.6095095", "0.6075716" ]
0.79836303
0
Creates a new device id
def generate_device_id(): # TODO this is awful, makes me sad, but for now also makes demoing # easier We might want to look into an auto-configuration feature for # devices, such that ids are not input manually on devices _attempts = 0 generated_id = '' while _attempts < 10 and len(generated_id) == 0: _attempts += 1 new_id = create_id() if Device.query.filter_by(id=new_id).first() is None: LOGGER.debug(f" Generated a new device id {new_id}") return new_id LOGGER.error(f" Failed to generate unique device_id") raise HTTPRequestError(500, "Failed to generate unique device_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_device(name, device_type, runtime):\n command = 'create \"%s\" \"%s\" \"%s\"' % (\n name, device_type.identifier, runtime.identifier)\n device_id = _run_command(command)\n\n # The device ID has a new line at the end. Strip it when returning.\n return device_id[:-1]", "def test_create_device(self):\n pass", "def test_create_device(self):\n pass", "def create(cls, imei, device_id):\n try:\n imei_device = cls(imei, device_id)\n imei_device.save()\n except Exception:\n raise Exception", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create_tag_id():\n return uuid.uuid1().int", "def flask_create_device():\n try:\n # retrieve the authorization token\n token = retrieve_auth_token(request)\n\n params = {\n 'count': request.args.get('count', '1'),\n 'verbose': request.args.get('verbose', 'false'),\n 'content_type': request.headers.get('Content-Type'),\n 'data': request.data\n }\n\n result = DeviceHandler.create_device(params, token)\n devices = result.get('devices')\n deviceId = devices[0].get('id')\n LOGGER.info(f' Creating a new device with id {deviceId}.')\n return make_response(jsonify(result), 200)\n except HTTPRequestError as e:\n LOGGER.error(f' {e.message} - {e.error_code}.')\n if isinstance(e.message, dict):\n return make_response(jsonify(e.message), e.error_code)\n\n return format_response(e.error_code, e.message)", "def create_device(self, device_dict):\n devices = {'devices': [device_dict]}\n url = '{}/iot/devices'.format(self.url)\n return self.post(url, data=json.dumps(devices), headers=self.headers)", "def register_device(project_id, credentials, device_model_id, device_id):\n base_url = '/'.join([DEVICE_API_URL, 'projects', project_id, 'devices'])\n device_url = '/'.join([base_url, device_id])\n session = google.auth.transport.requests.AuthorizedSession(credentials)\n r = session.get(device_url)\n print(device_url, r.status_code)\n if r.status_code == 404:\n print('Registering....', end='', flush=True)\n r = session.post(base_url, data=json.dumps({\n 'id': device_id,\n 'model_id': device_model_id,\n 'client_type': 'SDK_LIBRARY'\n }))\n if r.status_code != 200:\n raise Exception('failed to register device: ' + r.text)\n print('\\rDevice registered.')", "def create_device(jwt: str, template_id: str, label: str) -> str:\n LOGGER.debug(\"Creating template...\")\n\n args = {\n \"url\": \"{0}/device\".format(CONFIG['dojot']['url']),\n \"headers\": {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {0}\".format(jwt),\n },\n \"data\": json.dumps({\n \"templates\": [template_id],\n \"attrs\": {},\n \"label\": label,\n }),\n }\n\n res = DojotAPI.call_api(requests.post, args)\n\n LOGGER.debug(\"... created the template\")\n return res[\"devices\"][0][\"id\"]", "def test_create_device1(self):\n pass", "def RegisterDevice(self, device_id, machine_id, type, username):\n dmtoken_chars = []\n while len(dmtoken_chars) < 32:\n dmtoken_chars.append(random.choice('0123456789abcdef'))\n dmtoken = ''.join(dmtoken_chars)\n allowed_policy_types = {\n dm.DeviceRegisterRequest.BROWSER: [\n 'google/chrome/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.USER: [\n 'google/chromeos/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.DEVICE: [\n 'google/chromeos/device',\n 'google/chromeos/publicaccount',\n 'google/chrome/extension',\n 'google/chromeos/signinextension'\n ],\n dm.DeviceRegisterRequest.ANDROID_BROWSER: [\n 'google/android/user'\n ],\n dm.DeviceRegisterRequest.TT: ['google/chromeos/user',\n 'google/chrome/user'],\n }\n if machine_id in KIOSK_MACHINE_IDS:\n enrollment_mode = dm.DeviceRegisterResponse.RETAIL\n else:\n enrollment_mode = dm.DeviceRegisterResponse.ENTERPRISE\n self._registered_tokens[dmtoken] = {\n 'device_id': device_id,\n 'device_token': dmtoken,\n 'allowed_policy_types': allowed_policy_types[type],\n 'machine_name': 'chromeos-' + machine_id,\n 'machine_id': machine_id,\n 'enrollment_mode': enrollment_mode,\n 'username': username,\n }\n self.WriteClientState()\n return self._registered_tokens[dmtoken]", "def test_add_device(self):\n\n pass", "def _increment_device_id(self, device_id):\n base = device_id[0:-1]\n letter = device_id[-1]\n\n # AWS-specific munging\n # Perhaps should be moved to the interface anyway does not work for openstack\n log.debug(\"Cloud type is: %s\", self.app.config.cloud_type)\n if self.app.config.cloud_type == 'ec2':\n log.debug('Applying AWS-specific munging to next device id calculation')\n if base == '/dev/xvd':\n base = '/dev/sd'\n if letter < 'f':\n letter = 'e'\n\n # Get the next device in line\n new_id = base + chr(ord(letter) + 1)\n return new_id", "def test_create_device_data(self):\n pass", "def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))", "def create(cls, device_id, values, **kwargs):\n salt = get_salt()\n master_key = os.urandom(32)\n\n device = Device.objects.get(pk=device_id)\n device_key = public_key_encrypt(device.loaded_public_key, master_key)\n\n encrypted = {\n key: fernet_encrypt(value, master_key, salt)\n for key, value in values.items()}\n\n keys = {device.id.hex: force_text(base64.b64encode(device_key))}\n\n return cls.objects.create(salt=salt, keys=keys, values=encrypted, **kwargs)", "def create_device(self, name: str, app_eui: str, app_key: str, dev_eui: str):\n return create_device(self.api_key, name, app_eui, app_key, dev_eui)", "def create_device(self, app_name='FooBar', device_type='Raspberry Pi 2'):\n\n app = self.resin.models.application.create(app_name, device_type)\n return app, self.resin.models.device.register(app['id'], self.resin.models.device.generate_uuid())", "def setDeviceID(self, id, unitCode=0):\n resp = self.XAPCommand('DID', id, unitCode=unitCode)\n return int(resp)", "def create_new_measurement(self) -> str:\n try:\n self.measurement.create()\n except ValueError:\n # Handling if existing token is invalid\n self.__setup()\n self.measurement.create()\n\n self.measurement_id = self.measurement.measurement_id\n return self.measurement_id", "def register_device():\n payload = request.get_json()\n return _register_device(payload)", "def create_entity(data: dict) -> str:\n new_uuid = str(uuid4())\n Entity.create(uuid=new_uuid, data=data[\"data\"])\n return new_uuid", "def makeid(cls):\n return str(uuid.uuid4().hex)", "def _new_device(device):\n key = f\"{DOMAIN}.{device.name}\"\n hass.data[DOMAIN][key] = device\n ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)\n discovery.load_platform(\n hass,\n ent_platform,\n DOMAIN,\n discovered={ATTR_NEW: [key]},\n hass_config=base_config,\n )", "def generate_id():\n return uuid4().get_hex()", "def device_id(self):\n return self.unique_id", "def m_create_identity(DID, domain_name, website, commercial_name, parent_node_account, password, overwrite):\n\n error, didDoc = create_identity(\n DID, domain_name, website, commercial_name, parent_node_account, password, overwrite)\n if error is not None:\n print(error)\n\n print(f\"Created\")", "def _add_id(self, attrs):\n _id = {}\n _id['id'] = str(attrs.get('name', ''))\n _id['valid_from'] = (\n _get_date_from_string(attrs.get('validFrom', '')))\n _id['created'] = (\n _get_date_from_string(attrs.get('created', '')))\n _id['device'] = str(attrs.get('device', ''))\n self._ids[str(attrs.get('name', ''))] = _id" ]
[ "0.7249364", "0.651375", "0.651375", "0.6454852", "0.64354026", "0.64354026", "0.6417076", "0.6392928", "0.63136935", "0.6307749", "0.6264777", "0.62547284", "0.6227422", "0.62167674", "0.6171738", "0.6131766", "0.6115982", "0.6034566", "0.60262775", "0.6024409", "0.59283864", "0.5922264", "0.590439", "0.58870065", "0.5881216", "0.58554393", "0.5836607", "0.58354914", "0.58351463", "0.583192" ]
0.70444405
1
Fetches the list of known device ids.
def list_ids(token): init_tenant_context(token, db) data = [] LOGGER.debug(f" Fetching list with known devices") for id in db.session.query(Device.id).all(): data.append(id[0]) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def find_devices() -> List[DeviceInfo]:\n return await Discovery.search_devices()", "def get_device_ids(self) -> list[bluetooth.BluetoothUuid]:\n return [bluetooth.BluetoothUuid(i) for i in self.deviceIds()]", "def get_devices(jwt: str) -> List:\n LOGGER.debug(\"Retrieving devices...\")\n\n args = {\n \"url\": \"{0}/device\".format(CONFIG['dojot']['url']),\n \"headers\": {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {0}\".format(jwt),\n },\n }\n\n res = DojotAPI.call_api(requests.get, args)\n\n devices_ids = [device['id'] for device in res['devices']]\n\n LOGGER.debug(\"... retrieved the devices\")\n\n return devices_ids", "def get_devices(self):\n return self.api_request('GET', self.url + '/device', {})", "def load_devices(self):\n response = self.oauth.get(url=f'{self.base_url}/json/devices/list')\n\n result = response.json()['device']\n return [(device['id'], device['name'], device['state']) for device in result]", "def list_devices():\r\n DeviceManagerCLI.BuildDeviceList()\r\n return DeviceManagerCLI.GetDeviceList()", "def list_devices(self):\n return [x for x in self.devices.keys()]", "def getDeviceList(self):\n return defer.succeed(self.discovered)", "def get_devices():\n devices = []\n for device_id in range(pm.lib.Pm_CountDevices()):\n devices.append(DeviceInfo(device_id))\n\n return devices", "async def async_get_devices(self) -> list[dict[str, Any]]:\n return await self.aiolivisi.async_get_devices()", "def devices(self):\n return self.enumerate_devices()", "def GetAllDevices(self):\n\n return list(self.YieldAllDevices())", "def enumerate_devices(vendor_id: int = 0x2C97) -> List[bytes]:\n devices: List[bytes] = []\n\n for hid_device in hid.enumerate(vendor_id, 0):\n if (hid_device.get(\"interface_number\") == 0 or\n # MacOS specific\n hid_device.get(\"usage_page\") == 0xffa0):\n devices.append(hid_device[\"path\"])\n\n assert len(devices) != 0, (\n f\"Can't find Ledger device with vendor_id {hex(vendor_id)}\")\n\n return devices", "def get_devices(self):\n return get_devices(self.api_key)", "def get_switch_ids():\n\n device_id_list = []\n url = 'https://' + APIC_EM + '/network-device'\n header = {'accept': 'application/json', 'X-Auth-Token': APIC_EM_TICKET}\n device_response = requests.get(url, headers=header, verify=False)\n device_json = device_response.json()\n device_info = device_json['response']\n for items in device_info:\n if items.get('family') == 'Switches and Hubs':\n device_id = items.get('id')\n device_id_list.append(device_id)\n return device_id_list", "def list_devices():\n return _lib.SeaTeaseAPI().list_devices()", "def get_devices(self):\n devices = self.get(\"event/device\")", "def _get_device_list(self):\n if self.app.config.cloud_type == 'ec2':\n # c5/m5 on AWS mounts EBS volumes as NVMe:\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html\n for itype in ['c5', 'm5']:\n if itype in self.app.cloud_interface.get_type():\n return frozenset(glob('/dev/nvme[0-26]n1'))\n return frozenset(glob('/dev/*d[a-z]'))", "def list_devices(self):\n response = self.oauth.get(url=f'{self.base_url}/json/devices/list')\n\n result = response.json()['device']\n for device in result:\n print(device)", "def devices(self, **kwargs):\n return self._get(API.DEVICES.value, check_202=True, **kwargs)", "def get_ids(self) -> List[str]:", "def get_devices(self):\n\n \"\"\"\n # Note: This code is no longer required with the latest spt updates.\n # But that said, leaving for now so I don't risk breaking folks!\n if not self._use_lsscsi:\n message = \"Find Number of IOM's\"\n command = \"lsscsi | fgrep enclo | egrep 'HGST|WDC' | wc -l\"\n pdata = self._run_command(command=command, message=message, logger=self._logger, shell=True)\n ioms = (int)(pdata['stdout'].strip())\n if ioms > 1:\n self._use_lsscsi = True\n if not self._use_lsscsi and os.path.exists('/etc/multipath.conf'):\n self._use_lsscsi = True\n \"\"\"\n # Allow above logic or options to override lsscsi vs. spt usage.\n if not self._use_lsscsi or self._force_spt:\n self.get_devices_spt()\n else:\n self.get_devices_lsscsi()\n return", "def do_list(self, _):\n devices = []\n for source in self._target.devices:\n devices.append({\n 'name': source.device['name'],\n 'path': source.device['path'],\n })\n return devices", "def _get_ids_from_hostname(self, hostname):\r\n results = self.list_hardware(hostname=hostname, mask=\"id\")\r\n return [result['id'] for result in results]", "def list_devices(cls):\n # get all matching devices\n return usb.core.find(\n find_all=True,\n custom_match=lambda dev: (\n dev.idVendor == cls.vendor_id and dev.idProduct in cls.product_ids\n ),\n )", "def get_device_list(ip_address, headers):\n\tome_device_list = []\n\tnext_link_url = 'https://%s/api/DeviceService/Devices' % ip_address\n\twhile next_link_url is not None:\n\t\tdevice_response = requests.get(next_link_url, headers=headers, verify=False)\n\t\tnext_link_url = None\n\t\tif device_response.status_code == 200:\n\t\t\tdev_json_response = device_response.json()\n\t\t\tif dev_json_response['@odata.count'] <= 0:\n\t\t\t\tprint(\"No devices found at \", ip_address)\n\t\t\t\treturn\n\n\t\t\tif '@odata.nextLink' in dev_json_response:\n\t\t\t\tnext_link_url = 'https://%s/' %ip_address + dev_json_response['@odata.nextLink']\n\n\t\t\tif dev_json_response['@odata.count'] > 0:\n\t\t\t\tome_device_list = ome_device_list + [x['Id'] for x in dev_json_response['value']]\n\t\telse:\n\t\t\tprint(\"No devices found at \", ip_address)\n\n\treturn ome_device_list", "async def get_discovered_device_names(self):\n json = self._api_call(\"app/monitors/%s/devices\" % self.sense_monitor_id)\n self._devices = await [entry[\"name\"] for entry in json]\n return self._devices", "def get_devices(self):\n devices = []\n for i in self.devices:\n devices.append(self.devices[i])\n\n return devices", "def get_socket_ids() -> List[int]:\n socket_id_list = []\n for cpu_id in cpu_ids():\n api_file = open('/sys/devices/system/cpu/cpu' + str(cpu_id) + '/topology/physical_package_id')\n socket_id_list.append(int(api_file.readline().strip()))\n return list(set(socket_id_list))", "def list_devices(arn=None, nextToken=None):\n pass" ]
[ "0.7168416", "0.710372", "0.70740366", "0.70475334", "0.6912452", "0.68758357", "0.6864888", "0.6851874", "0.6843571", "0.6794221", "0.67756575", "0.677536", "0.6742102", "0.67125595", "0.6695285", "0.66945463", "0.6691035", "0.6688566", "0.6623563", "0.65172327", "0.6511924", "0.6493586", "0.64257574", "0.6423736", "0.64189965", "0.63828367", "0.63773143", "0.63745165", "0.6371287", "0.63697916" ]
0.8147232
0
Validates if the device id follows the rules implemented by dojot
def validate_device_id(device_id): regex = re.compile(r'^[0-9a-fA-F]{2,6}$') if regex.match(device_id) == None: raise ValidationError('Device ID must be 2-6 characters and must be hexadecimal (0-9,a-f,A-F).')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _valid_device(device):\n required_fields = ('name', 'type', 'group', 'canonical_name')\n if all(field in device for field in required_fields):\n return True\n return False", "def is_valid_pci_device_vendor_id(id):\n val = id.replace('0x', '').strip()\n if not is_valid_hex(id):\n return False\n if (len(val) > 4):\n return False\n return True", "def test_get_device_by_id(self):\n pass", "def test_device_id_validation_value_between_signed_unsigned_64b_int_maximums(self):\n\t\tserializer = GCMDeviceSerializer(data={\n\t\t\t\"registration_id\": \"foobar\",\n\t\t\t\"name\": \"Nexus 5\",\n\t\t\t\"device_id\": \"e87a4e72d634997c\",\n\t\t\t\"application_id\": \"XXXXXXXXXXXXXXXXXXXX\",\n\t\t})\n\t\tself.assertTrue(serializer.is_valid())", "def test_device_id_validation_value_between_signed_unsigned_64b_int_maximums(self):\n\t\tserializer = GCMDeviceSerializer(data={\n\t\t\t\"registration_id\": \"foobar\",\n\t\t\t\"name\": \"Nexus 5\",\n\t\t\t\"device_id\": \"e87a4e72d634997c\",\n\t\t})\n\t\tself.assertTrue(serializer.is_valid())", "def _check_validdevice(self, symbol):\n if symbol.type == self.scanner.KEYWORD and \\\n symbol.id in self.validdeviceids:\n\n return True\n else:\n return False", "def legacy_opid_syntax(self):\n returned = False\n if self.get_a_device_id():\n if self.valid_status_code(falcon.GetDeviceDetailsV1(ids=DEVICE_ID)):\n returned = True\n return returned", "def test_get_device_by_id1(self):\n pass", "def check_id(self, id):", "def validate_identifier(self, identifier):\n pass", "def redirected_opid_syntax(self):\n returned = self.get_a_device_id()\n if returned:\n if not self.valid_status_code(falcon.GetDeviceDetails(DEVICE_ID)):\n returned = False\n if not self.valid_status_code(falcon.GetDeviceDetails(ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(falcon.GetDeviceDetails(parameters={\"ids\": [DEVICE_ID]})):\n returned = False\n if not self.valid_status_code(falcon.GetDeviceDetails(body={\"ids\": [DEVICE_ID]})):\n returned = False\n return returned", "def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght \n counter = 0\n for char in id:\n counter += 1\n print counter , lib.constants._ATTR_ID_LENGHT\n if counter > lib.constants._ATTR_ID_LENGHT :\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' exceeded the given length i.e Max Length = '%s'\" % \n\t\t\t(id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n return 0\n return 0", "def new_opid_syntax(self):\n returned = False\n if self.get_a_device_id():\n if self.valid_status_code(falcon.GetDeviceDetailsV2(ids=DEVICE_ID)):\n returned = True\n return returned", "def test_uidvalidity(self):\n d = self._examineOrSelect()\n self._response(b'* OK [UIDVALIDITY 12345] UIDs valid')\n self.assertEqual(\n self.successResultOf(d),\n {'READ-WRITE': False, 'UIDVALIDITY': 12345})", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False", "def is_order_id_valid(self):\n \n if not self.order_id:\n self.error_message = jsonify({'status':'error', 'message': 'orderId parameter missing'})\n return False\n if not re.match('^[a-f0-9]{32}$', self.order_id):\n self.error_message = jsonify({'status': 'error', 'message': 'orderId must be set to (hex) UUID'})\n return False\n return True", "def check_device(self, class_id, vendor_id, product_id):\n if len(self.class_id) > 0 and class_id != self.class_id:\n return False\n\n if len(self.vendor_id) > 0 and vendor_id != self.vendor_id:\n return False\n\n if len(self.devices) > 0 and product_id not in self.devices:\n return False\n\n return True", "def test_by_device_id(self):\n device = Device.objects.by_device_id(self.user, self.device.device_id)\n self.assertEqual(device, self.device)", "def test_update_device_by_id(self):\n pass", "def handle_device_selection(selected_device_id_str, valid_devices):\n if selected_device_id_str is None:\n # no ID specified\n valid_device_selected = False\n else:\n # device ID is specified, check if valid\n selected_device_id = int(selected_device_id_str)\n if selected_device_id in valid_devices:\n valid_device_selected = True\n else:\n valid_device_selected = False\n print('Invalid device ID given (' + str(selected_device_id) + ').')\n\n if not valid_device_selected:\n # device ID invalid or not specified, prompt for selection\n print('Available devices:')\n print_device_dict(valid_devices)\n\n selected_device_id = int(input('Choose a device from the list above: '))\n while selected_device_id not in valid_devices:\n selected_device_id = int(input('Invalid device ID, please try again: '))\n\n return selected_device_id", "def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False", "def uber_syntax(self):\n returned = self.get_a_device_id()\n if returned:\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", body={\"ids\": [DEVICE_ID]})):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", body={\"ids\": DEVICE_ID})):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", parameters={\"ids\": DEVICE_ID})):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetailsV1\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetailsV2\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", body={\"ids\": [DEVICE_ID]})):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", parameters={\"ids\": [DEVICE_ID]})):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", ids=[DEVICE_ID])):\n returned = False\n\n return returned", "def check_osd_id(osd_id):\n if not re.match(r'^[0-9]+$', osd_id):\n raise Error('osd id is not numeric', osd_id)", "def test_update_device_by_id1(self):\n pass", "def identify_id(id: str) -> bool:\n return validate_handle(id)", "def test_delete_device_by_id(self):\n pass", "def _check_lidvid_field(self, doi: Doi):\n\n vid: Optional[str]\n if \"::\" in doi.pds_identifier:\n lid, vid = doi.pds_identifier.split(\"::\")\n else:\n lid = doi.pds_identifier\n vid = None\n\n lid_tokens = lid.split(\":\")\n\n try:\n # Make sure the prescribed static fields are correct\n required_prefix_elements = [\"urn\", \"nasa\", \"pds\"]\n if lid_tokens[:3] != required_prefix_elements:\n raise InvalidIdentifierException(f\"LIDVID must start with elements {required_prefix_elements}\")\n\n # Make sure we got the minimum number of fields, and that\n # the number of fields is consistent with the product type\n if not MIN_LID_FIELDS <= len(lid_tokens) <= MAX_LID_FIELDS:\n raise InvalidIdentifierException(\n f\"LIDVID must contain only between {MIN_LID_FIELDS} \"\n f\"and {MAX_LID_FIELDS} colon-delimited fields, \"\n f\"got {len(lid_tokens)} field(s)\"\n )\n\n # Now check each field for the expected set of characters\n token_regex = re.compile(r\"[a-z0-9-._]*\")\n\n for index, token in enumerate(lid_tokens):\n if not token_regex.fullmatch(token):\n raise InvalidIdentifierException(\n f\"LID field {index + 1} ({token}) is invalid. \"\n f\"Fields must only consist of lowercase letters, digits, \"\n f\"hyphens (-), underscores (_) or periods (.), per PDS SR Sec. 6D.2\"\n )\n\n # Make sure the VID conforms to a version number\n version_regex = re.compile(r\"^\\d+\\.\\d+$\")\n\n if vid and not version_regex.fullmatch(vid):\n raise InvalidIdentifierException(\n f\"Parsed VID ({vid}) does not conform to a valid version identifier. \"\n \"Version identifier must consist only of a major and minor version \"\n \"joined with a period (ex: 1.0), per PDS SR Sec. 6D.3\"\n )\n\n # Finally, ensure the whole identifier conforms to the length constraint\n identifier_max_length = 255\n if not len(doi.pds_identifier) <= identifier_max_length:\n raise InvalidIdentifierException(\n f\"LIDVID {doi.pds_identifier} does not conform to PDS identifier max length constraint \"\n f\"({identifier_max_length}), per PDS SR Sec. 6D\"\n )\n except InvalidIdentifierException as err:\n raise InvalidIdentifierException(\n f\"The record identifier {doi.pds_identifier} (DOI {doi.doi}) \"\n f\"does not conform to a valid LIDVID format.\\n\"\n f\"Reason: {str(err)}\\n\"\n \"If the identifier is not intended to be a LIDVID, use the \"\n \"--force option to bypass the results of this check.\"\n )", "def _check_identifier_fields(self, doi: Doi):\n # Make sure we have an identifier to key off of\n if not doi.pds_identifier:\n raise InvalidRecordException(\n \"Record provided with missing PDS identifier field. \"\n \"Please ensure a LIDVID or similar identifier is provided for \"\n \"all DOI requests.\"\n )\n\n # Make sure the doi and id fields are consistent, if present\n if doi.doi and doi.id:\n prefix, suffix = doi.doi.split(\"/\")\n\n if suffix != doi.id:\n raise InvalidRecordException(\n f\"Record for {doi.pds_identifier} has inconsistent \"\n f\"DOI ({doi.doi}) and ID ({doi.id}) fields. Please reconcile \"\n \"the inconsistency and resubmit the request.\"\n )", "def _get_device_id_from_registered(api) -> str:\n\n try:\n api.oauth_login(\"bad\")\n except InvalidDeviceId as original_exception:\n error_message = original_exception.args[0]\n\n device_ids_str = error_message.split(\"Your valid device IDs are:\")[-1]\n device_ids = device_ids_str.split(\"\\n\")\n device_ids = [device_id.replace(\"* \", \"\") for device_id in device_ids]\n return device_ids[-1]", "def test_get_device_presence(self):\n\n device_id = self.properties['device1.id']\n response = self.api.get_device_presence(device_id)\n\n self.assertEqual(device_id, response.sdid, 'Sdids must match')\n self.assertIsNotNone(response.data.last_seen_on, 'last_seen_on')\n self.assertIsNotNone(response.data.connected, 'connected')" ]
[ "0.6638264", "0.6557552", "0.6472533", "0.64402276", "0.64393127", "0.64205253", "0.6330009", "0.62676156", "0.61864066", "0.61861867", "0.6160878", "0.615324", "0.61483675", "0.6136146", "0.61361164", "0.611015", "0.6078391", "0.6041878", "0.60346705", "0.6021835", "0.6003507", "0.5893877", "0.58725315", "0.5866727", "0.5862497", "0.5846503", "0.5840282", "0.5823168", "0.5801488", "0.57875943" ]
0.73607147
0
Associates given template with device
def add_template_to_device(token, device_id, template_id): tenant = init_tenant_context(token, db) orm_device = assert_device_exists(device_id) orm_template = assert_template_exists(template_id) orm_device.templates.append(orm_template) try: db.session.commit() except IntegrityError as error: handle_consistency_exception(error) result = { 'message': 'device updated', 'device': serialize_full_device(orm_device, tenant) } return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_device_template(self):\n pass", "def test_update_device_template(self):\n pass", "def test_get_device_template(self):\n pass", "def create_device(jwt: str, template_id: str, label: str) -> str:\n LOGGER.debug(\"Creating template...\")\n\n args = {\n \"url\": \"{0}/device\".format(CONFIG['dojot']['url']),\n \"headers\": {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {0}\".format(jwt),\n },\n \"data\": json.dumps({\n \"templates\": [template_id],\n \"attrs\": {},\n \"label\": label,\n }),\n }\n\n res = DojotAPI.call_api(requests.post, args)\n\n LOGGER.debug(\"... created the template\")\n return res[\"devices\"][0][\"id\"]", "def template(self, template):\n self._template = template", "def attach_template(self, api, workdir, ext_name, templates_iter, target_uuid_set=None):\n def load_template_input(template_name, saved_id, target_id):\n if target_id is None:\n self.log_debug('Skip %s, saved template is not on target node', template_name)\n return None\n\n saved_values = DeviceTemplateValues.load(workdir, ext_name, template_name, saved_id)\n if saved_values is None:\n self.log_error('DeviceTemplateValues file not found: %s, %s', template_name, saved_id)\n return None\n if saved_values.is_empty:\n self.log_debug('Skip %s, saved template has no attachments', template_name)\n return None\n\n target_attached_uuid_set = {uuid for uuid, _ in DeviceTemplateAttached.get_raise(api, target_id)}\n if target_uuid_set is None:\n allowed_uuid_set = target_attached_uuid_set\n else:\n saved_attached = DeviceTemplateAttached.load(workdir, ext_name, template_name, saved_id)\n if saved_attached is None:\n self.log_error('DeviceTemplateAttached file not found: %s, %s', template_name, saved_id)\n return None\n saved_attached_uuid_set = {uuid for uuid, _ in saved_attached}\n allowed_uuid_set = target_uuid_set & saved_attached_uuid_set - target_attached_uuid_set\n\n input_list = saved_values.input_list(allowed_uuid_set)\n if len(input_list) == 0:\n self.log_debug('Skip %s, no devices to attach', template_name)\n return None\n\n return input_list\n\n def is_template_cli(template_name, saved_id):\n return DeviceTemplate.load(workdir, ext_name, template_name, saved_id, raise_not_found=True).is_type_cli\n\n template_input_list = [\n (name, target_id, load_template_input(name, saved_id, target_id), is_template_cli(name, saved_id))\n for name, saved_id, target_id in templates_iter\n ]\n return self._place_requests(api, template_input_list, is_edited=target_uuid_set is None)", "def template(self, template):\n\n self._template = template", "def template(self, template):\n\n self._template = template", "def test_get_device_templates(self):\n pass", "def setTemplate(self, template):\n self.template = template", "def set_template(self, name, value):\n\n self.templates[name] = value", "def test_delete_device_template(self):\n pass", "def template_spec(self, template_spec):\n\n self._template_spec = template_spec", "def create(self, template, print_cmd=False):\n brand_mapping = {'solaris11' : 'SYSsolaris', 'solaris' : 'SYSsolaris', 'solaris10' : 'SYSsolaris10'}\n if brand_mapping.has_key(template):\n template = brand_mapping[template]\n\n return self._create_minimal(template, print_cmd)\n\n #self._write_sysidcfg()", "def test_share_template_registration(self):\n pass", "def test_otoroshi_controllers_adminapi_templates_controller_create_from_template_tcp(self):\n pass", "def post_service_template_create(self, resource_dict):\n pass", "def test_update_template_registration(self):\n pass", "def test_register_template(self):\n pass", "def test_retrieve_template_registration(self):\n pass", "def attached_devices(template):\n\n url = base_url + \"/template/device/config/attached/{0}\".format(template)\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get template details\")\n exit()\n\n headers = [\"Host Name\", \"Device IP\", \"Site ID\", \"Host ID\", \"Host Type\"]\n table = list()\n\n for item in items:\n tr = [item['host-name'], item['deviceIP'], item['site-id'], item['uuid'], item['personality']]\n table.append(tr)\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))", "def pre_service_template_create(self, resource_dict):\n pass", "def remove_template_from_device(token, device_id, template_id):\n tenant = init_tenant_context(token, db)\n updated_device = assert_device_exists(device_id)\n relation = assert_device_relation_exists(device_id, template_id)\n\n # Here (for now) there are no more validations to perform, as template\n # removal cannot violate attribute constraints\n\n db.session.delete(relation)\n db.session.commit()\n result = {\n 'message': 'device updated',\n 'device': serialize_full_device(updated_device, tenant)\n }\n\n return result", "def update_gateway_template(\n templates: Dict[str, Any], source_data: str,\n namespace: Optional[str], purpose: str,\n) -> None:\n gateway_templates = templates['gateway-templates']\n assert isinstance(gateway_templates, list)\n for gateway_template in gateway_templates:\n if (\n gateway_template.get('namespace') == namespace\n and gateway_template.get('purpose') == purpose\n ):\n gateway_template['template'] = source_data\n return\n gateway_templates.append({\n 'namespace': namespace,\n 'purpose': purpose,\n 'template': source_data,\n })", "async def test_setting_attribute_with_template(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_setting_attribute_with_template(\n hass, mqtt_mock_entry, select.DOMAIN, DEFAULT_CONFIG\n )", "def _new_device(device):\n key = f\"{DOMAIN}.{device.name}\"\n hass.data[DOMAIN][key] = device\n ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)\n discovery.load_platform(\n hass,\n ent_platform,\n DOMAIN,\n discovered={ATTR_NEW: [key]},\n hass_config=base_config,\n )", "def create_device(self, device_dict):\n devices = {'devices': [device_dict]}\n url = '{}/iot/devices'.format(self.url)\n return self.post(url, data=json.dumps(devices), headers=self.headers)", "def reattach_template(self, api, templates_iter):\n def get_template_input(template_id):\n uuid_list = [uuid for uuid, _ in DeviceTemplateAttached.get_raise(api, template_id)]\n values = DeviceTemplateValues(api.post(DeviceTemplateValues.api_params(template_id, uuid_list),\n DeviceTemplateValues.api_path.post))\n return values.input_list()\n\n def is_template_cli(template_id):\n return DeviceTemplate.get_raise(api, template_id).is_type_cli\n\n template_input_list = [\n (template_name, template_id, get_template_input(template_id), is_template_cli(template_id))\n for template_name, template_id in templates_iter\n ]\n return self._place_requests(api, template_input_list, is_edited=True)", "def register_template(self, name, template):\n key = name, len(template.args)\n existing = self.templates.get(key)\n if existing:\n raise mio.MIOException('The template \"%s/%d\" is already registered' % (name, len(template.args)))\n self.templates[key] = template", "def _create_from_template(self):\n template_file = self._helper._get_template_file_path()\n self._engine.open_file_by_path(template_file)\n self._save_current_as_new()" ]
[ "0.7093963", "0.6741128", "0.6444731", "0.6202347", "0.5982737", "0.59544533", "0.58833015", "0.58833015", "0.58812666", "0.5825397", "0.57730305", "0.5758544", "0.5596477", "0.5593448", "0.55852723", "0.5574858", "0.5542204", "0.5521828", "0.5485388", "0.545827", "0.5449561", "0.5448409", "0.54429775", "0.5412514", "0.53936476", "0.53885084", "0.5357586", "0.5307504", "0.5296294", "0.5294167" ]
0.7004287
1
Disassociates given template with device
def remove_template_from_device(token, device_id, template_id): tenant = init_tenant_context(token, db) updated_device = assert_device_exists(device_id) relation = assert_device_relation_exists(device_id, template_id) # Here (for now) there are no more validations to perform, as template # removal cannot violate attribute constraints db.session.delete(relation) db.session.commit() result = { 'message': 'device updated', 'device': serialize_full_device(updated_device, tenant) } return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_device_template(self):\n pass", "def test_unshare_template_registration(self):\n pass", "def test_unregister_template(self):\n pass", "def delete_template(self):\n try:\n os.remove(self.path)\n except Exception:\n pass", "def untag_resource(resourceArn=None, tagKeys=None):\n pass", "def detach(target, sysip):\n click.secho(\"Attempting to detach template.\")\n\n payload = {\n \"deviceType\":\"vedge\",\n \"devices\":[ \n {\n \"deviceId\":str(target),\n \"deviceIP\":str(sysip)\n }\n ]\n }\n\n url = base_url + \"/template/config/device/mode/cli\"\n\n response = requests.post(url=url, data=json.dumps(payload), headers=header, verify=False)\n if response.status_code == 200:\n id = response.json()[\"id\"]\n url = base_url + \"/device/action/status/\" + str(id)\n while(1):\n status_res = requests.get(url,headers=header,verify=False)\n if status_res.status_code == 200:\n push_status = status_res.json()\n if push_status['summary']['status'] == \"done\":\n if 'Success' in push_status['summary']['count']:\n print(\"Changed configuration mode to CLI\")\n elif 'Failure' in push_status['summary']['count']:\n print(\"Failed to change configuration mode to CLI\")\n exit()\n break\n else:\n print(\"Failed to detach template with error \" + response.text)\n exit()", "def unlink(self):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n self.client.post_linked_resource(\n self.resource, RelationType.UNLINK_FROM_TEMPLATE,\n EntityType.ROLE.value, None)", "def erase_device(device):\n command = 'erase \"%s\"' % (device.udid,)\n _run_command(command)", "def untag_resource(ResourceArn=None, TagKeys=None):\n pass", "def untag_resource(ResourceArn=None, TagKeys=None):\n pass", "def untag_resource(ResourceArn=None, TagKeys=None):\n pass", "def test_delete_template_subscription(self):\n pass", "def delete_custom_template(self, name, filename, context):\n pass", "def delete(device):\n delete_subject(device)\n return redirect_back('index')", "def disableCustomItemTemplate(self):\n self.getItemTemplate().delete()", "def test_delete_namespaced_template(self):\n pass", "def untag_resource(Resource=None, TagKeys=None):\n pass", "def deactivate(self):\n if self.parents[0].type == 'dm-multipath':\n devmap = block.getMap(major=self.major, minor=self.minor)\n if devmap:\n try:\n block.removeDeviceMap(devmap)\n except Exception as e:\n raise errors.DeviceTeardownError(\"failed to tear down device-mapper partition %s: %s\" % (self.name, e))\n udev.settle()", "def test_delete_subscription_template(self):\n pass", "def deauthentication_from_ap(self):\n # send deauthentication\n self.ap1.tx_msdu(da=self.host.macaddr, body=wifi.deauthenticate(), \n fctl=wifi.fctl_deauthentication)\n\n # expect a disassociation indication with a correct status\n assert(self.a.nxapi_disassociate_ind() == True)\n \n # generate a random frame\n msdu = self.host.tx_msdu(da=self.ap1.macaddr, length=1000, prio=1)\n \n # wait for data send confirmation (not in the air)\n self.a.host_send_data_cfm(msdu)", "def delete_template():\n posted_json = request.get_json(force=True)\n try:\n name = posted_json['template_name']\n except KeyError:\n print(\"Not all required keys are present!\")\n r = jsonify(message=\"Not all required keys for add template are present\", success=False, status_code=400)\n r.status_code = 400\n return r\n\n if bootstrapper_utils.delete_template(name):\n return jsonify(success=True, message='Deleted Template Successfully', status_code=200)\n else:\n r = jsonify(success=False, message='Could not delete template', status_code=500)\n r.status_code = 500\n return r", "def delete_device(self):\n # PROTECTED REGION ID(AsyncTabata.delete_device) ENABLED START #\n # PROTECTED REGION END # // AsyncTabata.delete_device", "def deregister(self, device_token):\n url = DEVICE_TOKEN_URL + device_token\n status, response = self._request('DELETE', '', url, None)\n if status != 204:\n raise AirshipFailure(status, response)", "def delete_gating_templates(self, template_name: str) -> None:\n for g in self.gating_templates:\n if template_name == 'all' or g.template_name == template_name:\n g.delete()\n if template_name == 'all':\n self.gating_templates = []\n else:\n self.gating_templates = [g for g in self.gating_templates if g.template_name != template_name]\n self.save()", "def dmcrypt_unmap(\n _uuid\n ):\n args = [\n 'cryptsetup',\n 'remove',\n _uuid\n ]\n\n try:\n command_check_call(args)\n\n except subprocess.CalledProcessError as e:\n raise Error('unable to unmap device', _uuid, e)", "def destroy(self, context=None):\n self.dbapi.destroy_nic(self.uuid)\n self.obj_reset_changes()", "def post_service_template_delete(self, resource_id, resource_dict):\n pass", "def disassociation_from_ap(self):\n # send disassociation\n self.ap1.tx_msdu(da=self.host.macaddr, body=wifi.disassociation(), \n fctl=wifi.fctl_disassociation)\n \n # expect a deauth frame\n mpdu = self.ap1.rx_mpdu(wifi.AIR_MGMT)\n \n # sanity checks\n assert(mpdu.typesubtype == wifi.fctl_deauthentication)\n \n # expect a disassociation indication with a correct status\n assert(self.a.nxapi_disassociate_ind() == True)\n \n # generate a random frame\n msdu = self.host.tx_msdu(da=self.ap1.macaddr, length=1000, prio=1)\n \n # wait for data send confirmation (not in the air)\n self.a.host_send_data_cfm(msdu)", "def deallocate_for_instance(self, context, instance, **kwargs):\n args = kwargs\n args['instance_id'] = instance['id']\n args['project_id'] = instance['project_id']\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'deallocate_for_instance',\n 'args': args})", "def unpair_devices(device_pair):\n command = 'unpair \"%s\"' % (device_pair.identifier,)\n _run_command(command)" ]
[ "0.68235356", "0.64119965", "0.6262007", "0.5936887", "0.58706874", "0.58450484", "0.57327724", "0.5726559", "0.5720364", "0.5720364", "0.5720364", "0.57185036", "0.570645", "0.56290376", "0.56244653", "0.5588681", "0.55878305", "0.5567942", "0.55649894", "0.5563578", "0.5558373", "0.55198133", "0.5491745", "0.54893184", "0.5465126", "0.5439462", "0.53976125", "0.53846407", "0.53636986", "0.5358406" ]
0.6834273
0
Return a list of devices that have a particular template associated to it
def get_by_template(token, params, template_id): tenant = init_tenant_context(token, db) page = ( db.session.query(Device) .join(DeviceTemplateMap) .filter_by(template_id=template_id) .paginate(page=params.get('page_number'), per_page=params.get('per_page'), error_out=False) ) devices = [] for d in page.items: devices.append(serialize_full_device(d, tenant)) result = { 'pagination': { 'page': page.page, 'total': page.pages, 'has_next': page.has_next, 'next_page': page.next_num }, 'devices': devices } return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_device_templates(self):\n pass", "def attached_devices(template):\n\n url = base_url + \"/template/device/config/attached/{0}\".format(template)\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get template details\")\n exit()\n\n headers = [\"Host Name\", \"Device IP\", \"Site ID\", \"Host ID\", \"Host Type\"]\n table = list()\n\n for item in items:\n tr = [item['host-name'], item['deviceIP'], item['site-id'], item['uuid'], item['personality']]\n table.append(tr)\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))", "def findPeasWithTemplate(templateType):\n\ttemplateType=templateType.upper()\n\tresults=[]\n\t#Search through the currently loaded master pod\n\tfor peaName in masterPod.currentMasterPod.peas:\n\t\tpeaObject=masterPod.currentMasterPod.peas[peaName]\n\t\t#If the attribute matches then add to results\n\t\tif peaObject.templateType.upper() == templateType:\n\t\t\tresults.append(peaObject)\n\treturn results", "def test_get_device_template(self):\n pass", "def template_list(call=None):\n templates = {}\n session = _get_session()\n vms = session.xenapi.VM.get_all()\n for vm in vms:\n record = session.xenapi.VM.get_record(vm)\n if record[\"is_a_template\"]:\n templates[record[\"name_label\"]] = record\n return templates", "def find_template(self, name=None, hv=None):\n if len(self.templates) <= 0:\n self.get_hypervisors()\n if name is not None and hv is not None:\n template_list = filter(\n lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates\n )\n elif name is not None and hv is None:\n template_list = filter(\n lambda x: name in x.descr, self.templates\n )\n elif name is None and hv is not None:\n template_list = filter(\n lambda x: x.hypervisor == self.hypervisors[hv], self.templates\n )\n else:\n raise Exception('Error, no pattern defined')\n return template_list", "def get_devices(token, params, sensitive_data=False):\n tenant = init_tenant_context(token, db)\n\n pagination = {'page': params.get('page_number'), 'per_page': params.get('per_page'), 'error_out': False}\n\n SORT_CRITERION = {\n 'label': Device.label,\n None: Device.id\n }\n sortBy = SORT_CRITERION.get(params.get('sortBy'))\n\n attr_filter = []\n query = params.get('attr')\n\n for attr_label_item in query:\n parsed = re.search('^(.+){1}=(.+){1}$', attr_label_item)\n attr_label = []\n attr_label.append(DeviceAttr.label == parsed.group(1))\n # static value must be the override, if any\n attr_label.append(text(\"coalesce(overrides.static_value, attrs.static_value)=:static_value \").bindparams(static_value=parsed.group(2)))\n attr_filter.append(and_(*attr_label))\n\n query = params.get('attr_type')\n for attr_type_item in query:\n attr_filter.append(DeviceAttr.value_type == attr_type_item)\n\n label_filter = []\n target_label = params.get('label')\n if target_label:\n label_filter.append(Device.label.like(\"%{}%\".format(target_label)))\n\n template_filter = []\n target_template = params.get('template')\n if target_template:\n template_filter.append(DeviceTemplateMap.template_id == target_template)\n\n if (attr_filter): #filter by attr\n LOGGER.debug(f\" Filtering devices by {attr_filter}\")\n\n page = db.session.query(Device) \\\n .join(DeviceTemplateMap, isouter=True)\n\n page = page.join(DeviceTemplate) \\\n .join(DeviceAttr, isouter=True) \\\n .join(DeviceOverride, (Device.id == DeviceOverride.did) & (DeviceAttr.id == DeviceOverride.aid), isouter=True)\n\n page = page.filter(*label_filter) \\\n .filter(*template_filter) \\\n .filter(*attr_filter) \\\n .order_by(sortBy) \\\n .paginate(**pagination)\n\n\n elif label_filter or template_filter: # only filter by label or/and template\n if label_filter:\n LOGGER.debug(f\"Filtering devices by label: {target_label}\")\n\n if template_filter:\n LOGGER.debug(f\"Filtering devices with template: {target_template}\") \n \n page = db.session.query(Device) \\\n .join(DeviceTemplateMap, isouter=True)\n\n if sensitive_data: #aditional joins for sensitive data\n page = page.join(DeviceTemplate) \\\n .join(DeviceAttr, isouter=True) \\\n .join(DeviceOverride, (Device.id == DeviceOverride.did) & (DeviceAttr.id == DeviceOverride.aid), isouter=True)\n\n page = page.filter(*label_filter) \\\n .filter(*template_filter) \\\n .order_by(sortBy) \\\n .paginate(**pagination)\n\n else:\n LOGGER.debug(f\" Querying devices sorted by device id\")\n page = db.session.query(Device).order_by(sortBy).paginate(**pagination)\n\n devices = []\n \n if params.get('idsOnly').lower() in ['true', '1', '']:\n return DeviceHandler.get_only_ids(page)\n\n for d in page.items:\n devices.append(serialize_full_device(d, tenant, sensitive_data))\n\n\n result = {\n 'pagination': {\n 'page': page.page,\n 'total': page.pages,\n 'has_next': page.has_next,\n 'next_page': page.next_num\n },\n 'devices': devices\n }\n\n return result", "def find_dev_by_prefix(devices, prefix):\n result = []\n for dev in devices.values():\n if dev.unique_id.startswith(prefix):\n result.append(dev)\n return result", "def find_devices (devicelist):\n vprint(\"\\nFind known devices:\")\n for device in devicelist:\n if find_device(device) is not None :\n vprint(\"\\tFound :\", device)\n else:\n vprint(\"\\tNOT found:\", device )\n vprint(\"..........\") \n return", "def get_schemas(self):\n templates = [['Template GUID']]\n r = self.system_cursor.execute('{Call wtGetTemplateList(%s)}' % (self.dsn['ProfileGuid'],))\n for row in r.fetchall():\n templates.append([row.TEMPLATE_GUID])\n return templates", "def get_templates(templates_path_pattern):\n templates_paths = glob.glob(templates_path_pattern)\n cars = []\n notcars = []\n for template_path in templates_paths:\n if 'non-vehicles' in template_path:\n notcars.append(template_path)\n else:\n cars.append(template_path)\n return cars, notcars", "def find_templates(self, name):\n script = (\n 'Get-SCVMTemplate -Name \\\"{}\\\" -VMMServer $scvmm_server')\n data = self.get_json(script.format(name))\n # Check if the data returned to us was a list or 1 dict. Always return a list\n if not data:\n return []\n elif isinstance(data, list):\n return [SCVMTemplate(system=self, raw=tmpl_data) for tmpl_data in data]\n return [SCVMTemplate(system=self, raw=data)]", "def find(ctx, name):\n conf = settings.devices.get(name, dict())\n if conf.get('type') == 'command':\n return conf, name, name\n\n uuids = ctx.obj['uuids']\n context = Context()\n for dev in iter(context.list_devices()):\n if 'ID_FS_TYPE' in dev:\n if name == uuids.get(dev.get('ID_FS_UUID')):\n return (settings.devices[name], dev['DEVNAME'],\n settings.devices[name].get('label',\n dev.get('ID_FS_LABEL')))\n\n print('Device \"%s\" not found.' % name)\n sys.exit(1)", "def get_templates(template_folder, search_term=''):\n return [template for template in os.listdir(template_folder)\n if search_term in template]", "def get(self):\n try:\n queryset_devices = DevicesTmp.objects.filter(operation_id=self.operation_id)\n field_relation_ships = {\n 'hostname': 'hostname',\n 'ip': 'ip',\n 'telnet_port': 'telnet_port',\n 'snmp_port': 'snmp_port',\n 'snmp_community': 'snmp_community',\n 'snmp_version': 'snmp_version',\n 'login_expect': 'login_expect',\n 'device_type': 'device_type',\n 'telnet_status': 'telnet_status',\n 'status_type': 'snmp_status',\n 'group_name': 'group_name',\n 'ostype': 'ostype',\n }\n query_data = {\n 'hostname': self.hostname,\n 'ip': self.ip,\n 'telnet_port': self.telnet_port,\n 'snmp_port': self.snmp_port,\n 'snmp_community': self.snmp_community,\n 'snmp_version': self.snmp_version,\n 'login_expect': self.login_expect,\n 'device_type': self.device_type,\n 'telnet_status': self.telnet_status,\n 'snmp_status': self.status_type,\n 'group_name': self.group_name,\n 'ostype': self.ostype_name,\n }\n search_fields = ['hostname', 'ip', 'telnet_port', 'snmp_port', 'snmp_community', 'snmp_version',\n 'login_expect', 'device_type', 'telnet_status', 'status_type', 'group_name', 'ostype']\n sorts, search_conditions = views_helper.get_search_conditions(self.request, field_relation_ships,\n query_data, search_fields)\n if sorts != []:\n if 'ostype' in sorts:\n sorts = ['ostype__name' if x == 'ostype' else x for x in sorts]\n if '-ostype' in sorts:\n sorts = ['-ostype__name' if x == '-ostype' else x for x in sorts]\n else:\n sorts = ['device_id']\n if search_conditions:\n ostype_condition = search_conditions.get('ostype__contains')\n if ostype_condition is not None:\n ostype_list = Ostype.objects.filter(**{\"name__contains\": ostype_condition})\n queryset_devices = queryset_devices.filter(**{'ostype__in': ostype_list})\n del search_conditions['ostype__contains']\n queryset_devices = queryset_devices.filter(**search_conditions).order_by(*sorts)\n else:\n queryset_devices = queryset_devices.order_by(*sorts)\n serializer = DevicesTmpSerializer(queryset_devices, many=True)\n paginator = Paginator(serializer.data, int(self.max_size_per_page))\n contacts = paginator.page(int(self.page_from))\n data = {\n 'data': contacts.object_list,\n 'operation_id': self.operation_id,\n 'new_token': self.new_token,\n 'num_page': paginator.num_pages,\n 'page_range': list(paginator.page_range),\n 'page_has_next': contacts.has_next(),\n 'total_num': len(queryset_devices),\n 'current_page_num': contacts.number,\n constants.STATUS: {\n constants.STATUS: constants.TRUE,\n constants.MESSAGE: constants.SUCCESS\n },\n }\n return api_return(data=data)\n except Exception, e:\n print e\n raise e", "def get_template(self ,template_name):\n\n found = False\n for template in self.templates:\n if template['name'] == template_name:\n found = True\n return template\n if not found:\n return None", "def get_queryset(self):\n return Template.objects.all()", "def retrieve_templates(self, category):\n\t\ttemplate_list_pool = self.template_list_pool\n\t\tfrom_redis = False\n\t\tclass_id = category + '#*'\n\t\tfirst_type_code,second_type_code = category.split('.')\n\n\t\t# if class_id not in template_dic_pool.keys():\n\t\t# Get template from redis at first.\n\t\tif template_redis.get(class_id) is not None:\n\t\t\ttemplate_list_pool[class_id] = ujson.loads(template_redis.get(class_id))\n\t\t\tfrom_redis = True\n\t\telse:\n\t\t\ttemplate_list_pool[class_id] = None\n\n\t\t# Search template from database when template is not in redis.\n\t\tif template_list_pool[class_id] is None:\n\t\t\t# import pdb;pdb.set_trace()\n\t\t\ttemplates = AssemblyTemplate.retrieve_lv2assembly_template_list( category )\n\t\t\ttemplate_list_pool[class_id] = templates\n\n\t\t\t# Store template in redis.\n\t\t\ttemplate_redis.delete(class_id)\n\t\t\ttemplate_redis.set(class_id, ujson.dumps(template_list_pool[class_id]))\n\n\t\treturn template_list_pool[class_id],from_redis", "def devices_list_view(request):\n return read_json(request.registry.settings['devices_path'], [])", "def _get_tuya_devices_filtered(self, types, exclude_mode=False, type_prefix=True):\n config_list = {}\n types_filter = set(types)\n tuya = self.hass.data[DOMAIN][TUYA_DATA]\n devices_list = tuya.get_all_devices()\n for device in devices_list:\n dev_type = device.device_type()\n exclude = (\n dev_type in types_filter\n if exclude_mode\n else dev_type not in types_filter\n )\n if exclude:\n continue\n dev_id = device.object_id()\n if type_prefix:\n dev_id = f\"{dev_type}-{dev_id}\"\n config_list[dev_id] = f\"{device.name()} ({dev_type})\"\n\n return config_list", "def list_templates(context):\n templates = get_oneoffixx_templates()\n template_group = context.REQUEST.form.get('form.widgets.template_group')\n terms = []\n\n for template in templates:\n terms.append(SimpleVocabulary.createTerm(\n template, template.template_id, template.title))\n\n # We filter templates when template_group has been selected\n if template_group is not None:\n favorites = get_oneoffixx_favorites()\n # Favorites are a special case\n if favorites and template_group[0] == favorites.get('id'):\n terms = [\n SimpleVocabulary.createTerm(\n OneOffixxTemplate(\n template, favorites.get('localizedName', '')),\n template.get('id'),\n template.get('localizedName'),\n )\n for template in favorites.get('templates')\n ]\n elif template_group[0] != '--NOVALUE--':\n terms = [term for term in terms if term.value.group == template_group[0]]\n\n return MutableObjectVocabulary(terms)", "def get_queryset(self):\n return models.Device.objects.filter(uuid=self.kwargs[\"uuid\"])", "def get_list_devices(self, verbose=False):\n # TODO: refresh region_names if more regions get devices available\n self.backends = {}\n region_names = ['us-west-1', 'us-east-1']\n for region in region_names:\n client = boto3.client(\n 'braket',\n region_name=region,\n aws_access_key_id=self._credentials['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=self._credentials['AWS_SECRET_KEY'],\n )\n filters = []\n devicelist = client.search_devices(filters=filters)\n for result in devicelist['devices']:\n if result['deviceType'] not in ['QPU', 'SIMULATOR']:\n continue\n if result['deviceType'] == 'QPU':\n device_capabilities = json.loads(\n client.get_device(deviceArn=result['deviceArn'])['deviceCapabilities']\n )\n self.backends[result['deviceName']] = {\n 'nq': device_capabilities['paradigm']['qubitCount'],\n 'coupling_map': device_capabilities['paradigm']['connectivity']['connectivityGraph'],\n 'version': device_capabilities['braketSchemaHeader']['version'],\n 'location': region, # deviceCapabilities['service']['deviceLocation'],\n 'deviceArn': result['deviceArn'],\n 'deviceParameters': device_capabilities['deviceParameters']['properties']['braketSchemaHeader'][\n 'const'\n ],\n 'deviceModelParameters': device_capabilities['deviceParameters']['definitions'][\n 'GateModelParameters'\n ]['properties']['braketSchemaHeader']['const'],\n }\n # Unfortunately the Capabilities schemas are not homogeneus for real devices and simulators\n elif result['deviceType'] == 'SIMULATOR':\n device_capabilities = json.loads(\n client.get_device(deviceArn=result['deviceArn'])['deviceCapabilities']\n )\n self.backends[result['deviceName']] = {\n 'nq': device_capabilities['paradigm']['qubitCount'],\n 'coupling_map': {},\n 'version': device_capabilities['braketSchemaHeader']['version'],\n 'location': 'us-east-1',\n 'deviceArn': result['deviceArn'],\n 'deviceParameters': device_capabilities['deviceParameters']['properties']['braketSchemaHeader'][\n 'const'\n ],\n 'deviceModelParameters': device_capabilities['deviceParameters']['definitions'][\n 'GateModelParameters'\n ]['properties']['braketSchemaHeader']['const'],\n }\n\n if verbose:\n print('- List of AWSBraket devices available:')\n print(list(self.backends))\n\n return self.backends", "def get_devices():\n global managed_objects\n global devices_by_adr\n \n devices_by_adr = {}\n \n r = re.compile(\"\\/org\\/bluez\\/hci\\d*\\/dev\\_(.*)\")\n # e.g., match a string like this:\n # /org/bluez/hci0/dev_58_C9_35_2F_A1_EF\n \n for key, value in managed_objects.items():\n # print(\"key=\", key)\n m = r.match(key)\n if m is not None:\n dev_str = m.group(1) # we have a device string!\n # print(\"dev_str=\", dev_str)\n # let's flatten that dict a bit\n devices_by_adr[dev_str] = value[\"org.bluez.Device1\"]", "def list_templates(request):\n templates = models.Template.all().order('name')\n return utility.respond(request, 'admin/list_templates', {'templates': templates})", "def test_create_device_template(self):\n pass", "def list_vm_template(client, private_cloud, resource_pool, location):\n return client.list(private_cloud, location, resource_pool)", "def _findCompatibleFactory(self, caps):\n self.debug(\"caps:%s\" % caps.to_string())\n res = []\n for factory in self._factories:\n for template in factory.get_static_pad_templates():\n if template.direction == gst.PAD_SINK:\n intersect = caps.intersect(template.static_caps.get())\n if not intersect.is_empty():\n res.append(factory)\n break\n self.debug(\"returning %r\" % res)\n return res", "def get_devices(self):\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.network.networkdevice.1.0+xml'})\n\n\t\tresp = self.ise.get('{0}/config/networkdevice'.format(self.url_base))\n\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tjson_res = ERS._to_json(resp.text)['ns3:searchResult']\n\n\t\tif resp.status_code == 200 and int(json_res['@total']) > 1:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = [(i['@name'], i['@id'])\n\t\t\t\t\t\t\t\t for i in json_res['ns3:resources']['ns5:resource']]\n\t\t\treturn result\n\n\t\telif resp.status_code == 200 and int(json_res['@total']) == 1:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = [(json_res['ns3:resources']['ns5:resource']['@name'],\n\t\t\t\t\t\t\t\t json_res['ns3:resources']['ns5:resource']['@id'])]\n\t\t\treturn result\n\n\t\telif resp.status_code == 200 and int(json_res['@total']) == 0:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = []\n\t\t\treturn result\n\n\t\telse:\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result", "def test_get_all_as_user_returns_accessible_templates(self):\n mock_request = create_mock_request(user=self.user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 2)\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))" ]
[ "0.6678167", "0.6473679", "0.62452865", "0.61165714", "0.58044285", "0.5667819", "0.564642", "0.56240433", "0.5623741", "0.553225", "0.5455962", "0.543212", "0.5390477", "0.534885", "0.53479457", "0.52998805", "0.52833533", "0.5272643", "0.5270288", "0.523041", "0.52300704", "0.52232707", "0.51974696", "0.5183736", "0.51569355", "0.5138453", "0.51270276", "0.5118884", "0.5110015", "0.51059085" ]
0.67967194
0
Copies a pre shared key from a device attribute to another
def copy_psk(cls, token, src_device_id, src_attr, dest_device_id, dest_attr): tenant = init_tenant_context(token, db) src_device_orm = assert_device_exists(src_device_id, db.session) if not src_device_orm: raise HTTPRequestError(404, "No such device: {}".format(src_device_id)) src_device = serialize_full_device(src_device_orm, tenant, True) found_attr = False src_attr_ref = None for template_id in src_device["templates"]: for attr in src_device["attrs"][template_id]: if attr["label"] == src_attr: if attr["value_type"] == "psk": found_attr = True src_attr_ref = attr break else: raise HTTPRequestError(400, "Attribute {} is not a 'psk' type_value".format(src_attr)) if not found_attr: raise HTTPRequestError(404, "Not found attributes {}".format(src_attr)) dest_device_orm = assert_device_exists(dest_device_id, db.session) if not dest_device_orm: raise HTTPRequestError(404, "No such device: {}".format(dest_device_id)) dest_device = serialize_full_device(dest_device_orm, tenant, True) found_attr = False dest_attr_ref = None for template_id in dest_device["templates"]: for attr in dest_device["attrs"][template_id]: if attr["label"] == dest_attr: if attr["value_type"] == "psk": found_attr = True dest_attr_ref = attr break else: raise HTTPRequestError(400, "Attribute {} is not a 'psk' type_value".format(dest_attr)) if not found_attr: raise HTTPRequestError(404, "Not found attributes {}".format(dest_attr)) # copy the pre shared key src_psk_entry = DeviceAttrsPsk.query.filter_by(device_id=src_device["id"], attr_id=src_attr_ref["id"]).first() if not src_psk_entry: raise HTTPRequestError(400, "There is not a psk generated to {}".format(src_attr)) dest_psk_entry = DeviceAttrsPsk.query.filter_by(device_id=dest_device["id"], attr_id=dest_attr_ref["id"]).first() if not dest_psk_entry: db.session.add(DeviceAttrsPsk(device_id=dest_device["id"], attr_id=dest_attr_ref["id"], psk=src_psk_entry.psk)) else: dest_psk_entry.psk = src_psk_entry.psk dest_device_orm.updated = datetime.now() db.session.commit() dest_attr_ref['static_value'] = src_attr_ref['static_value'] # send an update message on kafka kafka_handler_instance = cls.kafka.getInstance(cls.kafka.kafkaNotifier) kafka_handler_instance.update(dest_device, meta={"service": tenant})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shared_key(private_key,public_key):\n\treturn private_key.exchange(public_key)", "def genKey(self, otherKey):\n self.sharedSecret = self.genSecret(self.privateKey, otherKey)\n #print(\"Shared secret:\")\n #print(self.sharedSecret)\n s = hashlib.sha256()\n s.update(bytes(str(self.sharedSecret).encode()))\n self.key = s.digest()", "def create_shared_key(self, scalar: bytes, point: bytes) -> bytes:", "def mifare_change_keys(self,address,key_a,key_b):\n if address < 128:\n trailer_address = address | 3\n else:\n trailer_address = address | 15\n data = self.mifare_read(trailer_address)\n data = key_a + data[6:10] + key_b\n self.mifare_write_standard(trailer_address,data)", "def create_key ():", "def from_key(self, public_id, key):\n otp = self.get_otp(key)\n from_key = modhex_encode(public_id.encode('hex')) + modhex_encode(otp.encode('hex'))\n return from_key", "def copy_key(self, frm, to):\n frm_location = self.s3_location(frm)\n copy_source = {\n 'Bucket': frm_location.bucket,\n 'Key': frm_location.key[1:]\n }\n\n to_location = self.s3_location(to)\n bucket = self.get_bucket(to_location.bucket)\n\n log.info(\"Copying %s to %s\", frm, to)\n bucket.copy(copy_source, to_location[1:])", "def gen_key(app):\n\tos.system('lxc-attach -n %s -- ssh-keygen -t rsa -N \"\" -f key' % app)", "def copy(self, key, new_key=None):\n\n if new_key is None:\n new_key = self.processing_buffer\n\n self[new_key] = self[key].copy()", "def dmcrypt_map(\n rawdev,\n keypath,\n _uuid,\n ):\n dev = '/dev/mapper/' + _uuid\n args = [\n 'cryptsetup',\n '--key-file',\n keypath,\n '--key-size', '256',\n 'create',\n _uuid,\n rawdev,\n ]\n try:\n command_check_call(args)\n return dev\n\n except subprocess.CalledProcessError as e:\n raise Error('unable to map device', rawdev, e)", "def __init__(self, uid, key, initial_prng):\n self.uid = uid\n self.key = key\n Crypto1.__init__(self, key, initial_prng)", "def prepare_key (self, key, for_seq):\n r_key = \"%s:%d:%s\" % (self.classkey, for_seq, key)\n return r_key", "def copy_key(self, key):\n # Get the TTL for the key\n ttl = self.src.ttl(key)\n\n # -2 means the key doesn't actually exist and is a lie\n if ttl == -2:\n # self.log.debug(f\"TTL -2: {key}\")\n return\n\n # -1 means the key has no expiration and we set it to 90 days\n if ttl == -1:\n # self.log.debug(f\"TTL -1: {key}\")\n ttl = 60*60*24*90\n self.log.ttl(key)\n\n # restore uses TTL in ms\n ttl = ttl * 1000\n\n # Get the original value\n value = self.src.dump(key)\n\n # Set the key on our destination\n self.dest.restore(key, ttl, value, replace=True)", "def update_key(self):\n self.__prev_key = self.__new_key", "def get_key(self, key_value):\n # Storing the correct key value back to the self.key attributes.\n self.key=key_value\n self.cryptor=Fernet(self.key)", "def _update_prepend_key(self):\n self.prepend_key -= 1", "def conditional_copy(self, other, key, altkey=None):\n if hasattr(self, key):\n possible = getattr(self, key)\n if possible:\n usekey = {True: altkey, False: key}[altkey is not None]\n if hasattr(other, usekey):\n exists = getattr(other, usekey)\n if exists:\n return\n if isinstance(possible, list):\n setattr(other, usekey, [deepcopy(i) for i in possible])\n else:\n setattr(other, usekey, deepcopy(possible))", "def genKey(self, privateKey,otherKey):\n\t\tself.sharedSecret = self.genSecret(privateKey, otherKey)\n\n\t\t# Convert the shared secret (int) to an array of bytes in network order\n\t\t# Otherwise hashlib can't hash it.\n\t\ttry:\n\t\t\t_sharedSecretBytes = self.sharedSecret.to_bytes(\n\t\t\t\tself.sharedSecret.bit_length() // 8 + 1, byteorder=\"big\")\n\t\texcept AttributeError:\n\t\t\t_sharedSecretBytes = str(self.sharedSecret)\n\n\t\ts = hashlib.sha256()\n\t\ts.update(bytes(_sharedSecretBytes))\n\t\tself.key = s.digest()", "def load_key():", "def hash_code(sender: Key.__class__, **kwargs):\n new_key = kwargs['instance']\n new_key.code = randint(MIN_CODE, MAX_CODE)\n new_key.hash_code = sha1(str(new_key.code).encode('utf-8')).hexdigest()", "def add_key(mu_key):\n params['key'] = mu_key", "def _key_share(self, grease):\n ext = b\"\\x00\\x33\"\n\n # Add grease value if necessary.\n if grease:\n share_ext = self._choose_grease()\n share_ext += b\"\\x00\\x01\\x00\"\n else:\n share_ext = b\"\"\n\n group = b\"\\x00\\x1d\"\n share_ext += group\n key_exchange_length = b\"\\x00\\x20\"\n share_ext += key_exchange_length\n share_ext += os.urandom(32)\n second_length = len(share_ext)\n first_length = second_length+2\n ext += struct.pack(\">H\", first_length)\n ext += struct.pack(\">H\", second_length)\n ext += share_ext\n\n return ext", "def keyEquivalent( self ):\n\t\treturn None", "def add_key(self, device, key):\n if not self.enabled:\n return\n self.keys[device] = key\n fh = open(self.path, \"w\")\n json.dump(self.keys, fh)\n fh.close()\n os.chmod(self.path, 0o600)", "def makeKey( self, bSerial, sVersion, bNumcam, sMac ):\n\n\t\tbSeed = 0\n\t\tbSeed = self._setSerial( bSeed, bSerial )\n\t\tbSeed = self._setVersion( bSeed, sVersion )\n\t\tbSeed = self._setNumcam( bSeed, bNumcam )\n\t\tbSeed = self._setMac( bSeed, sMac )\n\n\t\tsKey = commands.getoutput( '/usr/local/bin/make-key -s %s' % bSeed )\n\t\tif len( sKey ) != 24:\n\t\t\traise Exception, 'make-key did not return a valid key [%s]' % sKey\n\n\t\treturn sKey", "def load_device_key(self, filename):\n pass", "def make_external_key(self, data):\n return data['key']", "def _set_key(self, key):\n\n # select 56 bits from the 64-bit key\n key = self._permutate(self.__pc1, self._string_to_bitlist(key))\n self.L = key[:28]\n self.R = key[28:]\n for i in range(0, 16):\n for j in range(0, self.__left_rotations[i]):\n self.L.append(self.L[0])\n del self.L[0]\n self.R.append(self.R[0])\n del self.R[0]\n # select 48 bits from 56 bits\n self.Kn[i] = self._permutate(self.__pc2, self.L + self.R)", "def transfer_debug(self, key, other):\n # ensure lower case key\n if is_str(key):\n key = key.lower()\n # transfer debug information\n self.debug[key] = other.get_debug(key)", "def __KSA(self, S: bytearray):\n self.__S = S.copy() # copy S\n j = 0\n for i in range(256):\n j = (j + self.__S[i] + self.__key[i % len(self.__key)]) % 256\n # swap S[i] and S[j]\n tmp = self.__S[i]\n self.__S[i] = self.__S[j]\n self.__S[j] = tmp" ]
[ "0.61895716", "0.60756516", "0.591246", "0.5636808", "0.56331825", "0.56129754", "0.5557372", "0.5489743", "0.5350086", "0.52919835", "0.5271251", "0.5263501", "0.52357495", "0.52305025", "0.5219102", "0.5215605", "0.51871455", "0.5154822", "0.5146113", "0.5121871", "0.5118032", "0.5117026", "0.5112594", "0.51081663", "0.5107998", "0.51032907", "0.50958246", "0.5085261", "0.50694555", "0.5059573" ]
0.6738275
0
Fetches known devices, potentially limited by a given value. Ordering might be userconfigurable too. Check API description for more information about request parameters and headers.
def flask_internal_get_devices(): try: # retrieve the authorization token token = retrieve_auth_token(request) # retrieve pagination page_number, per_page = get_pagination(request) params = { 'page_number': page_number, 'per_page': per_page, 'sortBy': request.args.get('sortBy', None), 'attr': request.args.getlist('attr'), 'attr_type': request.args.getlist('attr_type'), 'label': request.args.get('label', None), 'template': request.args.get('template', None), 'idsOnly': request.args.get('idsOnly', 'false'), } result = DeviceHandler.get_devices(token, params, True) LOGGER.info(f' Getting known internal devices.') return make_response(jsonify(result), 200) except HTTPRequestError as e: LOGGER.error(f' {e.message} - {e.error_code}.') if isinstance(e.message, dict): return make_response(jsonify(e.message), e.error_code) return format_response(e.error_code, e.message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_devices(self):\n return self.api_request('GET', self.url + '/device', {})", "def get_devices(token, params, sensitive_data=False):\n tenant = init_tenant_context(token, db)\n\n pagination = {'page': params.get('page_number'), 'per_page': params.get('per_page'), 'error_out': False}\n\n SORT_CRITERION = {\n 'label': Device.label,\n None: Device.id\n }\n sortBy = SORT_CRITERION.get(params.get('sortBy'))\n\n attr_filter = []\n query = params.get('attr')\n\n for attr_label_item in query:\n parsed = re.search('^(.+){1}=(.+){1}$', attr_label_item)\n attr_label = []\n attr_label.append(DeviceAttr.label == parsed.group(1))\n # static value must be the override, if any\n attr_label.append(text(\"coalesce(overrides.static_value, attrs.static_value)=:static_value \").bindparams(static_value=parsed.group(2)))\n attr_filter.append(and_(*attr_label))\n\n query = params.get('attr_type')\n for attr_type_item in query:\n attr_filter.append(DeviceAttr.value_type == attr_type_item)\n\n label_filter = []\n target_label = params.get('label')\n if target_label:\n label_filter.append(Device.label.like(\"%{}%\".format(target_label)))\n\n template_filter = []\n target_template = params.get('template')\n if target_template:\n template_filter.append(DeviceTemplateMap.template_id == target_template)\n\n if (attr_filter): #filter by attr\n LOGGER.debug(f\" Filtering devices by {attr_filter}\")\n\n page = db.session.query(Device) \\\n .join(DeviceTemplateMap, isouter=True)\n\n page = page.join(DeviceTemplate) \\\n .join(DeviceAttr, isouter=True) \\\n .join(DeviceOverride, (Device.id == DeviceOverride.did) & (DeviceAttr.id == DeviceOverride.aid), isouter=True)\n\n page = page.filter(*label_filter) \\\n .filter(*template_filter) \\\n .filter(*attr_filter) \\\n .order_by(sortBy) \\\n .paginate(**pagination)\n\n\n elif label_filter or template_filter: # only filter by label or/and template\n if label_filter:\n LOGGER.debug(f\"Filtering devices by label: {target_label}\")\n\n if template_filter:\n LOGGER.debug(f\"Filtering devices with template: {target_template}\") \n \n page = db.session.query(Device) \\\n .join(DeviceTemplateMap, isouter=True)\n\n if sensitive_data: #aditional joins for sensitive data\n page = page.join(DeviceTemplate) \\\n .join(DeviceAttr, isouter=True) \\\n .join(DeviceOverride, (Device.id == DeviceOverride.did) & (DeviceAttr.id == DeviceOverride.aid), isouter=True)\n\n page = page.filter(*label_filter) \\\n .filter(*template_filter) \\\n .order_by(sortBy) \\\n .paginate(**pagination)\n\n else:\n LOGGER.debug(f\" Querying devices sorted by device id\")\n page = db.session.query(Device).order_by(sortBy).paginate(**pagination)\n\n devices = []\n \n if params.get('idsOnly').lower() in ['true', '1', '']:\n return DeviceHandler.get_only_ids(page)\n\n for d in page.items:\n devices.append(serialize_full_device(d, tenant, sensitive_data))\n\n\n result = {\n 'pagination': {\n 'page': page.page,\n 'total': page.pages,\n 'has_next': page.has_next,\n 'next_page': page.next_num\n },\n 'devices': devices\n }\n\n return result", "def get_devices(self):\n return get_devices(self.api_key)", "def devices(self, query=None):\n if query is not None:\n query = clean(query, self.devices_parameters)\n query = \"?\" + urllib.parse.urlencode(query)\n else:\n query = \"\"\n return self.get(\"/devices\" + query)", "def user_sends_get_call_to_the_devices():\n web_app.list_devices()", "async def get_devices(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_DEVICES, params=params)", "async def find_devices() -> List[DeviceInfo]:\n return await Discovery.search_devices()", "def devices(self, **kwargs):\n return self._get(API.DEVICES.value, check_202=True, **kwargs)", "def getDeviceList(self):\r\n\r\n self._logger.debug(\"In getDeviceList()...\")\r\n\r\n # update the security token if needed \r\n if self._checkToken():\r\n\r\n response = self._callAPI(_API_GET_DEVICE_LIST, useSession=True)\r\n\r\n if response is not None:\r\n\r\n deviceInfo = response.json()\r\n \r\n if response.status_code == 200 and \"items\" in deviceInfo:\r\n\r\n deviceList = []\r\n\r\n for dev in deviceInfo[\"items\"]:\r\n\r\n # pull out common attributes\r\n deviceID = dev[\"serial_number\"]\r\n deviceType = dev[\"device_family\"]\r\n description = dev.get(\"name\", deviceType + \" \" + deviceID[-4:])\r\n\r\n # uncomment the next line to inspect the devices returned from the MyQ service\r\n self._logger.debug(\"Device Found - Device ID: %s, Device Type: %s, Description: %s\", deviceID, deviceType, description)\r\n\r\n # add device to the list with properties based on type\r\n if deviceType == API_DEVICE_TYPE_GATEWAY:\r\n\r\n # get gateway attributes\r\n online = dev[\"state\"][\"online\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add gateway device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"description\": description,\r\n \"online\": online,\r\n \"last_updated\": lastUpdated\r\n })\r\n\r\n elif deviceType == API_DEVICE_TYPE_OPENER:\r\n \r\n # get the door attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"door_state\"]\r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add garage door opener device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n elif deviceType == API_DEVICE_TYPE_LAMP:\r\n\r\n # get the lamp attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"lamp_state\"] \r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add lamp device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n return deviceList\r\n \r\n elif response.status_code == 401:\r\n \r\n self._logger.error(\"There was an authentication error with the MyQ account: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n \r\n self._logger.error(\"Error retrieving device list: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n # Error logged in _callAPI function\r\n return None\r\n\r\n else:\r\n # Check token failed - wait and see if next call successful\r\n return None", "def flask_get_devices():\n try:\n # retrieve the authorization token\n token = retrieve_auth_token(request)\n\n # retrieve pagination\n page_number, per_page = get_pagination(request)\n\n params = {\n 'page_number': page_number,\n 'per_page': per_page,\n 'sortBy': request.args.get('sortBy', None),\n 'attr': request.args.getlist('attr'),\n 'attr_type': request.args.getlist('attr_type'),\n 'label': request.args.get('label', None),\n 'template': request.args.get('template', None),\n 'idsOnly': request.args.get('idsOnly', 'false'),\n }\n\n result = DeviceHandler.get_devices(token, params)\n LOGGER.info(f' Getting latest added device(s).')\n\n return make_response(jsonify(result), 200)\n except HTTPRequestError as e:\n LOGGER.error(f' {e.message} - {e.error_code}.')\n if isinstance(e.message, dict):\n return make_response(jsonify(e.message), e.error_code)\n\n return format_response(e.error_code, e.message)", "def list_devices(self):\n response = self.oauth.get(url=f'{self.base_url}/json/devices/list')\n\n result = response.json()['device']\n for device in result:\n print(device)", "def get_devices():\n names = devices.list()\n if request.args.get('full') is not None:\n data = {d: devices.show(d) for d in names}\n else:\n data = names\n return jsonify({'devices': data})", "async def get_device_list(self):\n self.logger.debug(\"Retrieving device list information.\")\n #url = 'https://{}/api/user/device'.format(self.apiHost) #suddenly stopped worrking, so use\n '''\n #full version\n url = 'https://{}/api/user/device?lang=en&apiKey={}&getTags=1&version={}&ts={}&nonce={}&appid={}&imei={}&os={}&model={}&romVersion={}&appVersion={}'.format(self.apiHost,\n self.apikey,\n self.timestamp,\n self._version,\n self._nonce,\n self._appid,\n self._imei,\n self._os,\n self._model,\n self._romVersion,\n self._appVersion)\n '''\n url = 'https://{}/api/user/device?version={}&appid={}'.format(self.apiHost, self._version, self._appid)\n headers = {\n 'Authorization': 'Bearer %s' % self.authenticationToken,\n }\n self.logger.debug('url: %s, headers: %s' % (url, headers))\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n json_response = await response.json()\n \n self.logger.debug('received response status: %s' % response.status) \n self.logger.debug('received response: %s' % self.pprint(json_response))\n if response.status != 200:\n self.logger.error('error: %s received' % response.status)\n return\n \n if json_response.get(\"devicelist\"):\n self.logger.info('New response format found')\n json_response = json_response[\"devicelist\"]\n \n self.logger.debug('number of device(s) is: %d' % len(json_response))\n \n self._devices = json_response #list of devices and current configurations\n \n self._create_client_devices()\n \n '''\n Example Response:\n [\n {\n \"__v\": 0,\n \"_id\": \"5becffa6d2b4a3c34cb79b38\",\n \"apikey\": \"530303a6-cf2c-4246-894c-xxxxxxxxxxx\",\n \"brandName\": \"AUTOSLIDE\",\n \"createdAt\": \"2018-11-15T05:09:58.341Z\",\n \"deviceStatus\": \"\",\n \"deviceUrl\": \"\",\n \"deviceid\": \"100050xxxxx\",\n \"devicekey\": \"4123ec79-d2c3-4d32-930a-xxxxxxxxxxxxx\",\n \"extra\": {\n \"_id\": \"xxxxxxxxxxxxxxxx\",\n \"extra\": {\n \"apmac\": \"xx:xx:xx:xx:xx:xx\",\n \"brandId\": \"5a6fcf00f620073c67efc280\",\n \"description\": \"20180813001\",\n \"mac\": \"xx:xx:xx0:xx:xx:xx\",\n \"manufacturer\": \"\\u9752\\u5c9b\\u6fb3\\u601d\\u5fb7\\u667a\\u80fd\\u95e8\\u63a7\\u7cfb\\u7edf\\u6709\\u9650\\u516c\\u53f8\",\n \"model\": \"PSA-BTA-GL\",\n \"modelInfo\": \"5af3f5332c8642b001540dac\",\n \"ui\": \"\\u63a8\\u62c9\\u5ba0\\u7269\\u95e8\",\n \"uiid\": 54\n }\n },\n \"group\": \"\",\n \"groups\": [],\n \"ip\": \"xxx.xx.xx.xxx\",\n \"location\": \"\",\n \"name\": \"Patio Door\",\n \"offlineTime\": \"2018-12-31T07:23:31.018Z\",\n \"online\": true,\n \"onlineTime\": \"2018-12-31T12:19:33.216Z\",\n \"params\": {\n \"a\": \"3\",\n \"b\": \"3\",\n \"c\": \"1\",\n \"d\": \"1\",\n \"e\": \"1\",\n \"f\": \"1\",\n \"fwVersion\": \"2.0.2\",\n \"g\": \"0\",\n \"h\": \"1\",\n \"i\": \"0\",\n \"j\": \"00\",\n \"k\": \"0\",\n \"l\": \"1\",\n \"m\": \"2\",\n \"n\": \"0\",\n \"rssi\": -53,\n \"staMac\": \"xx:xx:xx:xx:xx:xx\"\n },\n \"productModel\": \"WFA-1\",\n \"settings\": {\n \"alarmNotify\": 1,\n \"opsHistory\": 1,\n \"opsNotify\": 0\n },\n \"sharedTo\": [\n {\n \"note\": \"\",\n \"permit\": 15,\n \"phoneNumber\": \"[email protected]\",\n \"shareTime\": 1542259546087\n }\n ],\n \"showBrand\": true,\n \"type\": \"10\",\n \"uiid\": 54\n }\n ]\n \n or New format:\n {\n \"devicelist\": [\n {\n \"__v\": 0,\n \"_id\": \"5c3665d012d28ae6ba4943c8\",\n \"apikey\": \"530303a6-cf2c-4246-894c-50855b00e6d8\",\n \"brandLogoUrl\": \"https://us-ota.coolkit.cc/logo/KRZ54OifuGmjoEMxT1YYM3Ybu2fj5K2C.png\",\n \"brandName\": \"Sonoff\",\n \"createdAt\": \"2019-01-09T21:21:20.402Z\",\n \"devConfig\": {},\n \"devGroups\": [],\n \"deviceStatus\": \"\",\n ... as before\n '''", "def get_devices(self):\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.network.networkdevice.1.0+xml'})\n\n\t\tresp = self.ise.get('{0}/config/networkdevice'.format(self.url_base))\n\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tjson_res = ERS._to_json(resp.text)['ns3:searchResult']\n\n\t\tif resp.status_code == 200 and int(json_res['@total']) > 1:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = [(i['@name'], i['@id'])\n\t\t\t\t\t\t\t\t for i in json_res['ns3:resources']['ns5:resource']]\n\t\t\treturn result\n\n\t\telif resp.status_code == 200 and int(json_res['@total']) == 1:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = [(json_res['ns3:resources']['ns5:resource']['@name'],\n\t\t\t\t\t\t\t\t json_res['ns3:resources']['ns5:resource']['@id'])]\n\t\t\treturn result\n\n\t\telif resp.status_code == 200 and int(json_res['@total']) == 0:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = []\n\t\t\treturn result\n\n\t\telse:\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result", "def device_config(i):\n apipath = \"/targets/devices\"\n url = SERVER + apipath\n params = {\n 'q': '(deviceType:ASA)',\n 'resolve': '[targets/devices.{name,deviceConfig}]',\n 'sort': 'name:desc',\n 'limit': NUM_DEVICES_TO_RETRIEVE_PER_QUERY,\n 'offset': i}\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"bearer {}\".format(token)}\n response = requests.get(url, verify=False, stream=True, headers=headers, params=params)\n getstatuscode = response.status_code\n getresponse = response.json()\n if getstatuscode == 200:\n return getresponse\n else:\n response.raise_for_status()", "def test_get_devices(self):\n print(\"Test Device List\")\n self.mock_api.return_value = call_json.DeviceList.device_list_response()\n self.manager.get_devices()\n all_kwargs = parse_args(self.mock_api)\n assert assert_test(self.manager.get_devices, all_kwargs, None,\n self.write_api, self.overwrite)\n assert len(self.manager.bulbs) == call_json_bulbs.BULBS_NUM\n assert len(self.manager.outlets) == call_json_outlets.OUTLETS_NUM\n assert len(self.manager.fans) == call_json_fans.FANS_NUM\n assert len(self.manager.switches) == call_json_switches.SWITCHES_NUM", "def get_devices(url, token, filter_by_lst=None, filter_by_comp=None):\n assert filter_by_lst is None or filter_by_comp is None\n\n res = []\n for dev in get(url + '/states', token):\n if filter_by_comp is not None:\n comp, _ = dev['entity_id'].split(\".\")\n if comp == filter_by_comp:\n res.append(dev)\n continue\n if filter_by_lst is not None:\n if dev['entity_id'] in filter_by_lst:\n res.append(dev)\n continue\n res.append(dev['entity_id'])\n return res", "def get(self):\n try:\n queryset_devices = DevicesTmp.objects.filter(operation_id=self.operation_id)\n field_relation_ships = {\n 'hostname': 'hostname',\n 'ip': 'ip',\n 'telnet_port': 'telnet_port',\n 'snmp_port': 'snmp_port',\n 'snmp_community': 'snmp_community',\n 'snmp_version': 'snmp_version',\n 'login_expect': 'login_expect',\n 'device_type': 'device_type',\n 'telnet_status': 'telnet_status',\n 'status_type': 'snmp_status',\n 'group_name': 'group_name',\n 'ostype': 'ostype',\n }\n query_data = {\n 'hostname': self.hostname,\n 'ip': self.ip,\n 'telnet_port': self.telnet_port,\n 'snmp_port': self.snmp_port,\n 'snmp_community': self.snmp_community,\n 'snmp_version': self.snmp_version,\n 'login_expect': self.login_expect,\n 'device_type': self.device_type,\n 'telnet_status': self.telnet_status,\n 'snmp_status': self.status_type,\n 'group_name': self.group_name,\n 'ostype': self.ostype_name,\n }\n search_fields = ['hostname', 'ip', 'telnet_port', 'snmp_port', 'snmp_community', 'snmp_version',\n 'login_expect', 'device_type', 'telnet_status', 'status_type', 'group_name', 'ostype']\n sorts, search_conditions = views_helper.get_search_conditions(self.request, field_relation_ships,\n query_data, search_fields)\n if sorts != []:\n if 'ostype' in sorts:\n sorts = ['ostype__name' if x == 'ostype' else x for x in sorts]\n if '-ostype' in sorts:\n sorts = ['-ostype__name' if x == '-ostype' else x for x in sorts]\n else:\n sorts = ['device_id']\n if search_conditions:\n ostype_condition = search_conditions.get('ostype__contains')\n if ostype_condition is not None:\n ostype_list = Ostype.objects.filter(**{\"name__contains\": ostype_condition})\n queryset_devices = queryset_devices.filter(**{'ostype__in': ostype_list})\n del search_conditions['ostype__contains']\n queryset_devices = queryset_devices.filter(**search_conditions).order_by(*sorts)\n else:\n queryset_devices = queryset_devices.order_by(*sorts)\n serializer = DevicesTmpSerializer(queryset_devices, many=True)\n paginator = Paginator(serializer.data, int(self.max_size_per_page))\n contacts = paginator.page(int(self.page_from))\n data = {\n 'data': contacts.object_list,\n 'operation_id': self.operation_id,\n 'new_token': self.new_token,\n 'num_page': paginator.num_pages,\n 'page_range': list(paginator.page_range),\n 'page_has_next': contacts.has_next(),\n 'total_num': len(queryset_devices),\n 'current_page_num': contacts.number,\n constants.STATUS: {\n constants.STATUS: constants.TRUE,\n constants.MESSAGE: constants.SUCCESS\n },\n }\n return api_return(data=data)\n except Exception, e:\n print e\n raise e", "def device_get(self, filters={}):\n return {}", "def list_devices():\n return _lib.SeaTeaseAPI().list_devices()", "def list_devices(arn=None, nextToken=None):\n pass", "def test_get_devices(self):\n pass", "def test_get_devices(self):\n pass", "def get_devices(self):\n devices = self.get(\"event/device\")", "def get_list_devices(self, verbose=False):\n # TODO: refresh region_names if more regions get devices available\n self.backends = {}\n region_names = ['us-west-1', 'us-east-1']\n for region in region_names:\n client = boto3.client(\n 'braket',\n region_name=region,\n aws_access_key_id=self._credentials['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=self._credentials['AWS_SECRET_KEY'],\n )\n filters = []\n devicelist = client.search_devices(filters=filters)\n for result in devicelist['devices']:\n if result['deviceType'] not in ['QPU', 'SIMULATOR']:\n continue\n if result['deviceType'] == 'QPU':\n device_capabilities = json.loads(\n client.get_device(deviceArn=result['deviceArn'])['deviceCapabilities']\n )\n self.backends[result['deviceName']] = {\n 'nq': device_capabilities['paradigm']['qubitCount'],\n 'coupling_map': device_capabilities['paradigm']['connectivity']['connectivityGraph'],\n 'version': device_capabilities['braketSchemaHeader']['version'],\n 'location': region, # deviceCapabilities['service']['deviceLocation'],\n 'deviceArn': result['deviceArn'],\n 'deviceParameters': device_capabilities['deviceParameters']['properties']['braketSchemaHeader'][\n 'const'\n ],\n 'deviceModelParameters': device_capabilities['deviceParameters']['definitions'][\n 'GateModelParameters'\n ]['properties']['braketSchemaHeader']['const'],\n }\n # Unfortunately the Capabilities schemas are not homogeneus for real devices and simulators\n elif result['deviceType'] == 'SIMULATOR':\n device_capabilities = json.loads(\n client.get_device(deviceArn=result['deviceArn'])['deviceCapabilities']\n )\n self.backends[result['deviceName']] = {\n 'nq': device_capabilities['paradigm']['qubitCount'],\n 'coupling_map': {},\n 'version': device_capabilities['braketSchemaHeader']['version'],\n 'location': 'us-east-1',\n 'deviceArn': result['deviceArn'],\n 'deviceParameters': device_capabilities['deviceParameters']['properties']['braketSchemaHeader'][\n 'const'\n ],\n 'deviceModelParameters': device_capabilities['deviceParameters']['definitions'][\n 'GateModelParameters'\n ]['properties']['braketSchemaHeader']['const'],\n }\n\n if verbose:\n print('- List of AWSBraket devices available:')\n print(list(self.backends))\n\n return self.backends", "def get_device_list(ip_address, headers):\n ome_device_list = []\n next_link_url = 'https://%s/api/DeviceService/Devices' % ip_address\n while next_link_url is not None:\n device_response = requests.get(next_link_url, headers=headers, verify=False)\n next_link_url = None\n if device_response.status_code == 200:\n dev_json_response = device_response.json()\n if dev_json_response['@odata.count'] <= 0:\n print(\"No devices found at \", ip_address)\n return\n\n if '@odata.nextLink' in dev_json_response:\n next_link_url = 'https://%s/' % ip_address + dev_json_response['@odata.nextLink']\n\n if dev_json_response['@odata.count'] > 0:\n ome_device_list = ome_device_list + [x['Id'] for x in dev_json_response['value']]\n else:\n print(\"No devices found at \", ip_address)\n\n return ome_device_list", "def getDevicesList(self, serialNum, internal=False):\r\n\r\n self._logger.debug(\"in API getDevicesList()...\")\r\n\r\n # check the auth tokens and TTL unless this is a get state call (a non-polling call)\r\n if not internal:\r\n self._checkTokens()\r\n\r\n # format url parameters\r\n params = {\r\n \"actionID\": \"command\",\r\n \"command\": _SESSION_COMMAND_GET_DEVICES,\r\n \"serial\": serialNum,\r\n \"sessionID\": self._sessionID,\r\n } \r\n\r\n # call the session API with the parameters\r\n response = self._call_api(_API_SESSION, params=params)\r\n \r\n # if data returned, format devices state and return\r\n if response and response.status_code == 200:\r\n\r\n respData = response.json() \r\n return self._buildDevicesState(respData)\r\n\r\n # otherwise return empty dictionary (evaluates to false)\r\n else:\r\n return {}", "def get_queryset(self):\n return models.Device.objects.filter(uuid=self.kwargs[\"uuid\"])", "def refresh_devices(self, devices=(), only_connected=False, expand_vsys=True, include_device_groups=True, add=False):\n logger.debug(self.hostname + \": refresh_devices called\")\n try:\n # Test if devices is iterable\n test_iterable = iter(devices)\n except TypeError:\n # This probably means a single device was passed in, not an iterable.\n # Convert to an iterable with a single item.\n devices = (devices,)\n # Remove None from list of devices\n devices = [x for x in devices if x is not None]\n # Get the list of managed devices\n if only_connected:\n cmd = \"show devices connected\"\n else:\n cmd = \"show devices all\"\n devices_xml = self.op(cmd)\n devices_xml = devices_xml.find(\"result/devices\")\n\n # Filter to only requested devices\n if devices:\n filtered_devices_xml = ET.Element(\"devices\")\n for serial, vsys in [(d.serial, d.vsys) for d in devices]:\n if serial is None:\n continue\n entry = devices_xml.find(\"entry[@name='%s']\" % serial)\n if entry is None:\n raise err.PanDeviceError(\"Can't find device with serial %s attached to Panorama at %s\" %\n (serial, self.hostname))\n multi_vsys = yesno(entry.findtext(\"multi-vsys\"))\n # Create entry if needed\n if filtered_devices_xml.find(\"entry[@name='%s']\" % serial) is None:\n entry_copy = deepcopy(entry)\n # If multivsys firewall with vsys defined, erase all vsys in filtered\n if multi_vsys and vsys != \"shared\" and vsys is not None:\n entry_copy.remove(entry_copy.find(\"vsys\"))\n ET.SubElement(entry_copy, \"vsys\")\n filtered_devices_xml.append(entry_copy)\n # Get specific vsys\n if vsys != \"shared\" and vsys is not None:\n vsys_entry = entry.find(\"vsys/entry[@name='%s']\" % vsys)\n if vsys_entry is None:\n raise err.PanDeviceError(\"Can't find device with serial %s and\"\n \" vsys %s attached to Panorama at %s\" %\n (serial, vsys, self.hostname)\n )\n vsys_section = filtered_devices_xml.find(\"entry[@name='%s']/vsys\" % serial)\n vsys_section.append(vsys_entry)\n devices_xml = filtered_devices_xml\n\n # Manipulate devices_xml so each vsys is a separate device\n if expand_vsys:\n original_devices_xml = deepcopy(devices_xml)\n for entry in original_devices_xml:\n multi_vsys = yesno(entry.findtext(\"multi-vsys\"))\n if multi_vsys:\n serial = entry.findtext(\"serial\")\n for vsys_entry in entry.findall(\"vsys/entry\"):\n if vsys_entry.get(\"name\") == \"vsys1\":\n continue\n new_vsys_device = deepcopy(entry)\n new_vsys_device.set(\"name\", serial)\n ET.SubElement(new_vsys_device, \"vsysid\").text = vsys_entry.get(\"name\")\n ET.SubElement(new_vsys_device, \"vsysname\").text = vsys_entry.findtext(\"display-name\")\n devices_xml.append(new_vsys_device)\n\n # Create firewall instances\n firewall_instances = firewall.Firewall.refresh_all_from_xml(devices_xml, refresh_children=not expand_vsys)\n\n if not include_device_groups:\n if add:\n self.removeall(firewall.Firewall)\n self.extend(firewall_instances)\n return firewall_instances\n\n # Create device-groups\n\n # Get the list of device groups\n devicegroup_xml = self.op(\"show devicegroups\")\n devicegroup_xml = devicegroup_xml.find(\"result/devicegroups\")\n\n devicegroup_instances = DeviceGroup.refresh_all_from_xml(devicegroup_xml, refresh_children=False)\n\n for dg in devicegroup_instances:\n dg_serials = [entry.get(\"name\") for entry in devicegroup_xml.findall(\"entry[@name='%s']/devices/entry\" % dg.name)]\n # Find firewall with each serial\n for dg_serial in dg_serials:\n all_dg_vsys = [entry.get(\"name\") for entry in devicegroup_xml.findall(\"entry[@name='%s']/devices/entry[@name='%s']\"\n \"/vsys/entry\" % (dg.name, dg_serial))]\n # Collect the firewall serial entry to get current status information\n fw_entry = devicegroup_xml.find(\"entry[@name='%s']/devices/entry[@name='%s']\" % (dg.name, dg_serial))\n if not all_dg_vsys:\n # This is a single-context firewall\n dg_vsys = \"vsys1\"\n fw = next((x for x in firewall_instances if x.serial == dg_serial and x.vsys == dg_vsys), None)\n if fw is None:\n # It's possible for device-groups to reference a serial/vsys that doesn't exist\n continue\n # Move the firewall to the device-group\n dg.add(fw)\n firewall_instances.remove(fw)\n fw.state.connected = yesno(fw_entry.findtext(\"connected\"))\n fw.state.unsupported_version = yesno(fw_entry.findtext(\"unsupported-version\"))\n fw.state.set_shared_policy_synced(fw_entry.findtext(\"shared-policy-status\"))\n else:\n # This is a multi-context firewall\n for dg_vsys in all_dg_vsys:\n fw = next((x for x in firewall_instances if x.serial == dg_serial and x.vsys == dg_vsys), None)\n if fw is None:\n # It's possible for device-groups to reference a serial/vsys that doesn't exist\n continue\n # Move the firewall to the device-group\n dg.add(fw)\n firewall_instances.remove(fw)\n fw.state.connected = yesno(fw_entry.findtext(\"connected\"))\n fw.state.unsupported_version = yesno(fw_entry.findtext(\"unsupported-version\"))\n fw.state.set_shared_policy_synced(fw_entry.findtext(\"shared-policy-status\"))\n\n if add:\n for dg in devicegroup_instances:\n found_dg = self.find(dg.name, DeviceGroup)\n if found_dg is not None:\n # Move the firewalls to the existing devicegroup\n found_dg.removeall(firewall.Firewall)\n found_dg.extend(dg.children)\n else:\n # Devicegroup doesn't exist, add it\n self.add(dg)\n # Add firewalls that are not in devicegroups\n self.removeall(firewall.Firewall)\n self.extend(firewall_instances)\n\n return firewall_instances + devicegroup_instances", "def list_devices(cls, filters={}):\n return cls.dbdriver.list_devices(filters)" ]
[ "0.69358677", "0.6624522", "0.6621723", "0.66123044", "0.65878403", "0.6565869", "0.64921904", "0.64532816", "0.6450756", "0.64152664", "0.63179874", "0.625757", "0.62422895", "0.6160705", "0.60734993", "0.60325587", "0.6031738", "0.59549993", "0.5930714", "0.5890118", "0.5871597", "0.58713377", "0.58713377", "0.5869401", "0.58624417", "0.58484757", "0.58437043", "0.581803", "0.5810213", "0.5806617" ]
0.6713731
1
Runs layer normalization followed by dropout.
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None, dropout_name=None): output_tensor = layer_norm(input_tensor, name) output_tensor = dropout(output_tensor, dropout_prob, dropout_name=dropout_name) return output_tensor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.):\n for cmd in process_cmd:\n if cmd == \"a\": # add residual connection\n out = out + prev_out if prev_out else out\n elif cmd == \"n\": # add layer normalization\n out = layers.layer_norm(\n out,\n begin_norm_axis=len(out.shape) - 1,\n param_attr=fluid.initializer.Constant(1.),\n bias_attr=fluid.initializer.Constant(0.))\n elif cmd == \"d\": # add dropout\n if dropout_rate:\n out = layers.dropout(\n out,\n dropout_prob=dropout_rate,\n seed=dropout_seed,\n is_test=False)\n return out", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def batch_normalization(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 30)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('l1_reg', 0.00)\n hp('l2_reg', 0.0001)\n hp('patience', 5000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(hp.batch_size, 28 * 28), input_var=input_var, name='Input')\n # Batch Normalization\n l_bn1 = BatchNormalization(incoming=l_in, name='Batch Normalization 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_bn1, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 1')\n # Batch Normalization\n l_bn2 = BatchNormalization(incoming=l_hid1, name='Batch Normalization 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_bn2, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 2')\n # Batch Normalization\n l_bn3 = BatchNormalization(incoming=l_hid2, name='Batch Normalization 3')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_bn3, n_class=10, l1=hp.l1_reg,\n l2=hp.l2_reg, name='Logistic regression')\n\n # Create network and add layers\n net = Network('mlp with batch normalization')\n net.add(l_in)\n net.add(l_bn1)\n net.add(l_hid1)\n net.add(l_bn2)\n net.add(l_hid2)\n net.add(l_bn3)\n net.add(l_out)\n\n return net, hp", "def standardize(layer, offset, scale, shared_axes='auto'):\n # Subtract the offset\n layer = BiasLayer(layer, -offset, shared_axes)\n # Do not optimize the offset parameter\n layer.params[layer.b].remove('trainable')\n # Divide by the scale\n layer = ScaleLayer(layer, floatX(1.)/scale, shared_axes)\n # Do not optimize the scales parameter\n layer.params[layer.scales].remove('trainable')\n return layer", "def _normal_abnormal(self):\n mp_pool()\n print(\"Classification of WCE to normal vs abnormal --> DONE\")", "def convert_layer_norm(g, op, block):\n\n begin_norm_axis = op.attr(\"begin_norm_axis\")\n epsilon = op.attr(\"epsilon\")\n x = g.get_node(op.input(\"X\")[0])\n bias_input = op.input(\"Bias\")\n scale_input = op.input(\"Scale\")\n\n x_shape = infer_shape(x)\n assert begin_norm_axis in (\n len(x_shape) - 1,\n -1,\n ), \"Support only normalization over last one dimension.\"\n\n if bias_input:\n bias = g.get_node(bias_input[0])\n else:\n bias = _expr.const(np.zeros(x_shape[begin_norm_axis]))\n\n if scale_input:\n scale = g.get_node(scale_input[0])\n else:\n scale = _expr.const(np.ones(x_shape[begin_norm_axis]))\n\n out = _op.nn.layer_norm(\n x, gamma=scale, beta=bias, axis=begin_norm_axis, epsilon=epsilon, center=True, scale=True\n )\n g.add_node(op.output(\"Y\")[0], out)", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def vanilla_unet(input_shape=(512, 512, 3), base_depth=32, drop_rate=0,\n seed=1337):\n input = Input(input_shape)\n\n conv1 = Conv2D(base_depth, 3, activation='relu', padding='same')(input)\n bn1 = BatchNormalization()(conv1)\n drop1 = Dropout(drop_rate, seed=seed)(bn1)\n conv2 = Conv2D(base_depth, 3, activation='relu', padding='same')(drop1)\n bn2 = BatchNormalization()(conv2)\n mp1 = MaxPooling2D(pool_size=(2, 2))(bn2)\n\n conv3 = Conv2D(base_depth*2, 3, activation='relu', padding='same')(mp1)\n bn3 = BatchNormalization()(conv3)\n drop2 = Dropout(drop_rate, seed=seed+1)(bn3)\n conv4 = Conv2D(base_depth*2, 3, activation='relu', padding='same')(drop2)\n bn4 = BatchNormalization()(conv4)\n mp2 = MaxPooling2D(pool_size=(2, 2))(bn4)\n\n conv5 = Conv2D(base_depth*4, 3, activation='relu', padding='same')(mp2)\n bn5 = BatchNormalization()(conv5)\n drop3 = Dropout(drop_rate, seed=seed+2)(bn5)\n conv6 = Conv2D(base_depth*4, 3, activation='relu', padding='same')(drop3)\n bn6 = BatchNormalization()(conv6)\n mp3 = MaxPooling2D(pool_size=(2, 2))(bn6)\n\n conv7 = Conv2D(base_depth*8, 3, activation='relu', padding='same')(mp3)\n bn7 = BatchNormalization()(conv7)\n drop4 = Dropout(drop_rate, seed=seed+3)(bn7)\n conv8 = Conv2D(base_depth*8, 3, activation='relu', padding='same')(drop4)\n bn8 = BatchNormalization()(conv8)\n mp4 = MaxPooling2D(pool_size=(2, 2))(bn8)\n\n conv9 = Conv2D(base_depth*16, 3, activation='relu', padding='same')(mp4)\n bn9 = BatchNormalization()(conv9)\n drop5 = Dropout(drop_rate, seed=seed+4)(bn9)\n deconv0 = Conv2DTranspose(base_depth*16, 3, activation='relu',\n padding='same')(drop5)\n bn10 = BatchNormalization()(deconv0)\n up1 = UpSampling2D(interpolation='bilinear')(bn10)\n\n deconv1 = Conv2DTranspose(base_depth*8, 3, activation='relu',\n padding='same')(up1)\n bn11 = BatchNormalization()(deconv1)\n cat1 = concatenate([bn11, bn8])\n drop6 = Dropout(drop_rate, seed=seed+5)(cat1)\n deconv2 = Conv2DTranspose(base_depth*8, 3, activation='relu',\n padding='same')(drop6)\n bn12 = BatchNormalization()(deconv2)\n up2 = UpSampling2D(interpolation='bilinear')(bn12)\n\n deconv3 = Conv2DTranspose(base_depth*4, 3, activation='relu',\n padding='same')(up2)\n bn13 = BatchNormalization()(deconv3)\n cat2 = concatenate([bn13, bn6])\n drop7 = Dropout(drop_rate, seed=seed+6)(cat2)\n deconv4 = Conv2DTranspose(base_depth*4, 3, activation='relu',\n padding='same')(drop7)\n bn14 = BatchNormalization()(deconv4)\n up3 = UpSampling2D(interpolation='bilinear')(bn14)\n\n deconv5 = Conv2DTranspose(base_depth*2, 3, activation='relu',\n padding='same')(up3)\n bn15 = BatchNormalization()(deconv5)\n cat3 = concatenate([bn15, bn4])\n drop8 = Dropout(drop_rate, seed=seed+7)(cat3)\n deconv6 = Conv2DTranspose(base_depth*2, 3, activation='relu',\n padding='same')(drop8)\n bn16 = BatchNormalization()(deconv6)\n up4 = UpSampling2D(interpolation='bilinear')(bn16)\n\n deconv7 = Conv2DTranspose(base_depth, 3, activation='relu',\n padding='same')(up4)\n bn17 = BatchNormalization()(deconv7)\n cat4 = concatenate([bn17, bn2])\n drop7 = Dropout(drop_rate, seed=seed+8)(cat4)\n deconv8 = Conv2DTranspose(base_depth, 3, activation='relu',\n padding='same')(drop7)\n bn18 = BatchNormalization()(deconv8)\n\n out = Conv2DTranspose(1, 1, activation='sigmoid', padding='same')(bn18)\n\n return Model(input, out)", "def apply_batch_normalization(self, layer):\n if type(layer) is not BatchNormalization:\n raise ValueError('The `layer` must be neoml.Dnn.BatchNormalization.')\n\n self._internal.apply_batch_normalization(layer._internal)", "def reduce_dropout(self):\n def reduce_p(layer):\n if isinstance(layer, nn.Dropout):\n layer.p = 0\n self.apply(reduce_p)", "def layer_norm(input, normalized_shape, weight, bias, eps=1e-5):\n return FunctionLib.apply(\n 'LayerNorm', input.device, [input, weight, bias],\n axis=input.ndimension() - len(normalized_shape), epsilon=eps)", "def _optimization_closure(self, iteration, step):\n aug = self._get_augmentation(iteration)\n if iteration == self.num_iter_per_step - 1:\n reg_noise_std = 0\n aug = 0\n else:\n reg_noise_std = (1 / 1000.) * (iteration // 400)\n # creates left_net_inputs and right_net_inputs by adding small noise\n clean_nets_inputs = [clean_net_input[aug] + (clean_net_input[aug].clone().normal_() * reg_noise_std)\n for clean_net_input in self.clean_nets_inputs]\n watermark_net_input = self.watermark_net_input[aug] # + (self.watermark_net_input[aug].clone().normal_() * reg_noise_std)\n mask_net_input = self.mask_net_input[aug]\n # applies the nets\n self.clean_nets_outputs = [clean_net(clean_net_input) for clean_net, clean_net_input\n in zip(self.clean_nets, clean_nets_inputs)]\n self.watermark_net_output = self.watermark_net(watermark_net_input)\n self.mask_net_output = self.mask_net(mask_net_input)\n self.total_loss = 0\n self.blur = 0\n\n self.total_loss += sum(self.l1_loss(self.watermark_net_output * self.mask_net_output +\n clean_net_output * (1 - self.mask_net_output), image_torch[aug])\n for clean_net_output, image_torch in zip(self.clean_nets_outputs, self.images_torch))\n self.total_loss.backward(retain_graph=True)", "def layer_normalize_(self, ref_point: 'ModelParameters', order=2):\n # in-place normalize each parameter\n for layer_idx, parameter in enumerate(self.parameters, 0):\n parameter *= (ref_point.layer_norm(layer_idx, order) / self.layer_norm(layer_idx, order))", "def remove_norms(self):\n dev = next(self.parameters()).device\n for name, module in self.named_modules():\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_weight_norm(module)\n print(\"Removed wnorm from {}\".format(name))\n except:\n pass\n self.to(device=dev)", "def normalize_layer(tensor, name, norm_use='bn'):\n if norm_use == \"gn\":\n x = GroupNorm(name=name + 'gn', groups=32)(tensor)\n elif norm_use == \"bn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'bn', epsilon=1.001e-5)(tensor)\n elif norm_use == \"rbn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'rbn', epsilon=1.001e-5, renorm=True)(tensor)\n elif norm_use == \"in\":\n x = InstanceNormalization(axis=-1, name=name + 'in')(tensor)\n else:\n x = tensor\n return x", "def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)", "def batch_normalization(x, phase_train, out_size):\n\n\twith tf.variable_scope('bn'):\n\t\tbeta = tf.Variable(tf.constant(0.0, shape=[out_size]), name='beta', trainable=True)\n\t\tgamma = tf.Variable(tf.constant(1.0, shape=[out_size]), name='gamma', trainable=True)\n\t\tbatch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\n\t\tema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n\t\tdef mean_var_with_update():\n\t\t\tema_apply_op = ema.apply([batch_mean, batch_var])\n\t\t\twith tf.control_dependencies([ema_apply_op]):\n\t\t\t\treturn tf.identity(batch_mean), tf.identity(batch_var)\n\n\t\tmean, var = tf.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))\n\t\tnormed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n\treturn normed", "def baseUNet(input_shape,conv_depth,n_classes,init_w,dropout):\n inputs = Input(input_shape)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding='same',\n kernel_initializer=init_w)(inputs)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding=\"same\",\n kernel_initializer=init_w)(c1)\n\n # pool down to next layer\n pool1 = MaxPooling2D((2,2),strides = (2,2))(c1)\n\n conv_depth *= 2\n\n # convolute down again\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool1)\n\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv2)\n \n # pool down again\n pool2 = MaxPooling2D((2,2),strides = (2,2))(conv2)\n\n conv_depth *= 2 \n\n # Convolution\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool2)\n\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv3)\n \n # pool down\n pool3 = MaxPooling2D((2,2),strides = (2,2))(conv3)\n\n conv_depth *= 2 \n # Convolution\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool3)\n\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv4)\n \n # pool down \n pool4 = MaxPooling2D((2,2),strides = (2,2))(conv4)\n\n conv_depth *=2 \n\n # Convolution\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool4)\n\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv5)\n\n drop = Dropout(dropout)(conv5)\n\n conv_depth /= 2\n conv_depth = int(conv_depth) \n # do upsampling\n up1 = UpSampling2D(size = (2,2))(drop)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up1)\n \n # add in skip info\n cat1 = concatenate([conv4,conv6],axis = 3)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat1)\n\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv6)\n\n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up2 = UpSampling2D(size = (2,2))(conv6)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up2)\n \n # add in skip info\n cat2 = concatenate([conv3,conv7],axis = 3)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat2)\n\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv7)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up3 = UpSampling2D(size = (2,2))(conv7)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size=(3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up3)\n \n # add in skip info\n cat3 = concatenate([conv2,conv8],axis = 3)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat3)\n\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv8)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up4 = UpSampling2D(size = (2,2))(conv8)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up4)\n \n # add in skip info\n cat4 = concatenate([c1,conv9],axis = 3)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat4)\n\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv9)\n\n outputs = Conv2D(n_classes, 1, activation = 'softmax')(conv9)\n\n return outputs,inputs", "def norm_layer( x, training, name):\n top = tf.layers.batch_normalization( x, \n axis=3, # channels last \n training=training,\n name=name )\n return top", "def layer_postprocess(layer_input, layer_output, hparams):\n return layer_prepostprocess(\n layer_input,\n layer_output,\n sequence=hparams.layer_postprocess_sequence,\n dropout_rate=hparams.layer_prepostprocess_dropout,\n norm_type=hparams.norm_type,\n depth=None,\n epsilon=hparams.norm_epsilon,\n dropout_broadcast_dims=comma_separated_string_to_integer_list(\n getattr(hparams, \"layer_prepostprocess_dropout_broadcast_dims\", \"\")),\n default_name=\"layer_postprocess\")", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def crossing_minimization(self):\n self.layer_sweep()", "def batch_normalization(x, phase_train, out_size):\r\n with tf.variable_scope('bn'):\r\n beta = tf.Variable(tf.constant(0.0, shape=[out_size]),\r\n name='beta', trainable=True)\r\n gamma = tf.Variable(tf.constant(1.0, shape=[out_size]),\r\n name='gamma', trainable=True)\r\n batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\r\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\r\n\r\n def mean_var_with_update():\r\n ema_apply_op = ema.apply([batch_mean, batch_var])\r\n with tf.control_dependencies([ema_apply_op]):\r\n return tf.identity(batch_mean), tf.identity(batch_var)\r\n\r\n mean, var = tf.cond(phase_train,\r\n mean_var_with_update,\r\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\r\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\r\n return normed", "def remove_norms(model: \"SqueezeWave\") -> \"SqueezeWave\":\n squeeze_wave = model\n for i, wn_layer in enumerate(squeeze_wave.wn_layers):\n squeeze_wave.wn_layers[i] = WN.remove_norms(wn_layer)\n return squeeze_wave", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n \n dims = [input_dim] + hidden_dims + [num_classes]\n\n # initialise all parameters (weight, bias, gamma, beta)\n for i in range(len(dims)-1):\n w = 'W' + str(i+1)\n b = 'b' + str(i+1)\n self.params[w] = np.random.randn(dims[i], dims[i+1])*weight_scale\n self.params[b] = np.zeros(dims[i+1])\n \n if self.use_batchnorm:\n for i in range(len(dims)-2):\n #no gamma and beta for last layer\n gamma = 'gamma' + str(i+1)\n beta = 'beta' + str(i+1)\n self.params[gamma] = np.ones(dims[i+1])\n self.params[beta] = np.zeros(dims[i+1])\n \n \n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n \n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.use_batchnorm:\n self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers - 1)]\n \n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True" ]
[ "0.7032795", "0.6391331", "0.6328661", "0.62512356", "0.6220091", "0.6030707", "0.60122514", "0.58638924", "0.58638924", "0.58625156", "0.58561295", "0.58298075", "0.5814626", "0.5718249", "0.5709967", "0.56730735", "0.56632066", "0.56548005", "0.5653561", "0.5648844", "0.56356865", "0.56321776", "0.56281394", "0.5602574", "0.5576368", "0.55759454", "0.557152", "0.556975", "0.5561446", "0.55572677" ]
0.66046226
1
Performs several types of attention 1) multiheaded attention from `from_tensor` to `to_tensor`. By default, this is an implementation of multiheaded attention based on "Attention is all you Need". If `from_tensor` and `to_tensor` are the same, then this is selfattention. Each timestep in `from_tensor` attends to the corresponding sequence in `to_tensor`, and returns a fixedwith vector. This function first projects `from_tensor` into a "query" tensor and `to_tensor` into "key" and "value" tensors. These are (effectively) a list of tensors of length `num_attention_heads`, where each tensor is of shape [batch_size, seq_length, size_per_head]. Then, the query and key tensors are dotproducted and scaled. These are softmaxed to obtain attention probabilities. The value tensors are then interpolated by these probabilities, then concatenated back to a single tensor and returned. In practice, the multiheaded attention are done with transposes and reshapes rather than actual separate tensors. 2) mixed attention with spanbased dynamic convolution with `from_tensor` and `to_tensor`. By setting conv_type to "sdconv", the layer will perform mixed attetion which is a mixture of selfattention and spanbased dynamic convolution. If conv_type is set to 'sdconv', this function will additionally generate a spanaware "key" tensor which will be multiplied to the "query" tensor and generate a spanbased dynamic conv kernel. The kernel will then convolve the "value" tensor to produce the output which will be concat with the selfattention heads' output for further processing.
def attention_layer(from_tensor, to_tensor, attention_mask=None, num_attention_heads=1, size_per_head=512, query_act=None, key_act=None, value_act=None, attention_probs_dropout_prob=0.0, initializer_range=0.02, do_return_2d_tensor=False, batch_size=None, from_seq_length=None, to_seq_length=None, conv_kernel_size=9, head_ratio=2, conv_type=1, from_tensor_mask=None, to_tensor_mask=None, conv_method='gate'): def transpose_for_scores(input_tensor, batch_size, num_attention_heads, seq_length, width): output_tensor = tf.reshape( input_tensor, [batch_size, seq_length, num_attention_heads, width]) output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3]) return output_tensor def reshape_for_conv(input_tensor, batch_size, num_attention_heads, seq_length, width): output_tensor = tf.reshape( input_tensor, [batch_size, seq_length, num_attention_heads*width]) return output_tensor from_shape = bert_utils.get_shape_list(from_tensor, expected_rank=[2, 3]) to_shape = bert_utils.get_shape_list(to_tensor, expected_rank=[2, 3]) if len(from_shape) != len(to_shape): raise ValueError( "The rank of `from_tensor` must match the rank of `to_tensor`.") if len(from_shape) == 3: batch_size = from_shape[0] from_seq_length = from_shape[1] to_seq_length = to_shape[1] elif len(from_shape) == 2: if batch_size is None or from_seq_length is None or to_seq_length is None: raise ValueError( "When passing in rank 2 tensors to attention_layer, the values " "for `batch_size`, `from_seq_length`, and `to_seq_length` " "must all be specified.") # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` from_tensor_2d = bert_utils.reshape_to_matrix(from_tensor) to_tensor_2d = bert_utils.reshape_to_matrix(to_tensor) new_num_attention_heads = int(num_attention_heads/head_ratio) if new_num_attention_heads<1: head_ratio=num_attention_heads num_attention_heads=1 else: num_attention_heads=new_num_attention_heads # `query_layer` = [B*F, N*H] query_layer = tf.layers.dense( from_tensor_2d, num_attention_heads * size_per_head, activation=query_act, name="query", kernel_initializer=create_initializer(initializer_range)) # `key_layer` = [B*T, N*H] key_layer = tf.layers.dense( to_tensor_2d, num_attention_heads * size_per_head, activation=key_act, name="key", kernel_initializer=create_initializer(initializer_range)) # `value_layer` = [B*T, N*H] value_layer = tf.layers.dense( to_tensor_2d, num_attention_heads * size_per_head, activation=value_act, name="value", kernel_initializer=create_initializer(initializer_range)) if conv_type in ['sdconv']: # [B,T, N*H] key_conv_attn_layer_input = reshape_for_conv(to_tensor_2d, batch_size, num_attention_heads*head_ratio, to_seq_length, size_per_head) if from_tensor_mask is not None and to_tensor_mask is not None: to_tensor_2d_mask = tf.cast(to_tensor_mask, tf.float32)[:, :, None] from_tensor_2d_mask = tf.cast(from_tensor_mask, tf.float32)[:, :, None] key_conv_attn_layer_input *= to_tensor_2d_mask tf.logging.info("== apply conv seq-masking on sequence padding ==") key_conv_attn_layer = tf.layers.separable_conv1d(key_conv_attn_layer_input, num_attention_heads * size_per_head, conv_kernel_size, padding='same', activation=value_act, depthwise_initializer=create_initializer(1/conv_kernel_size), # depthwise_initializer=create_initializer(initializer_range), pointwise_initializer=create_initializer(initializer_range), name="conv_attn_key") if from_tensor_mask is not None and to_tensor_mask is not None: key_conv_attn_layer *= to_tensor_2d_mask tf.logging.info("== apply conv seq-masking on sequence padding ==") # [B*T, N*H] key_conv_attn_layer = bert_utils.reshape_to_matrix(key_conv_attn_layer) if conv_method == 'dot': conv_attn_layer = tf.multiply(key_conv_attn_layer, query_layer) tf.logging.info("== apply conv dot query_layer ==") elif conv_method == 'gate': query_gate = tf.layers.dense( from_tensor_2d, num_attention_heads * size_per_head, activation=value_act, name="conv_query_gate", kernel_initializer=create_initializer(initializer_range)) conv_gated = tf.nn.sigmoid(tf.nn.dropout(query_gate, 1-attention_probs_dropout_prob)) conv_attn_layer = key_conv_attn_layer * conv_gated + query_layer * (1-conv_gated) tf.logging.info("== apply conv gate query_layer ==") else: conv_attn_layer = tf.multiply(key_conv_attn_layer, query_layer) tf.logging.info("== apply conv dot query_layer ==") # [B*T, N*K] conv_kernel_layer = tf.layers.dense( conv_attn_layer, num_attention_heads * conv_kernel_size, activation=value_act, name="conv_attn_kernel", kernel_initializer=create_initializer(initializer_range)) # [B*T*N,K,1] conv_kernel_layer = tf.reshape(conv_kernel_layer, [batch_size*to_seq_length*num_attention_heads, conv_kernel_size, 1]) # conv_kernel_layer = tf.nn.softmax(conv_kernel_layer, axis=1) attention_probs = tf.exp(tf.nn.log_softmax(conv_kernel_layer+1e-10, axis=-1)) paddings = tf.constant([[0, 0,], [int((conv_kernel_size-1)/2), int((conv_kernel_size-1)/2)],[0,0]]) conv_out_layer = tf.layers.dense( to_tensor_2d, num_attention_heads * size_per_head, activation=value_act, name="conv_attn_point", kernel_initializer=create_initializer(initializer_range)) # [B,T, N*H] conv_out_layer = tf.reshape(conv_out_layer,[batch_size,to_seq_length,num_attention_heads * size_per_head]) if from_tensor_mask is not None and to_tensor_mask is not None: conv_out_layer *= to_tensor_2d_mask tf.logging.info("== apply conv seq-masking on sequence padding ==") conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT") # unfold [B,T, N*H, K] unfold_conv_out_layer = tf.stack( [tf.slice(conv_out_layer, [0, i, 0],[batch_size,to_seq_length,num_attention_heads * size_per_head]) for i in range(conv_kernel_size)],-1) # following only work for gpu version # conv_out_layer = tf.reshape(conv_out_layer,[batch_size,to_seq_length,num_attention_heads * size_per_head,1]) # unfold_conv_out_layer = tf.extract_image_patches(images=conv_out_layer, sizes=[1, conv_kernel_size, 1, 1], strides=[1, 1, 1, 1], rates=[1, 1, 1, 1], padding='SAME') conv_out_layer = tf.reshape(unfold_conv_out_layer, [batch_size*to_seq_length*num_attention_heads ,size_per_head, conv_kernel_size]) conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer) conv_out_layer = tf.reshape(conv_out_layer, [batch_size*to_seq_length, num_attention_heads*size_per_head]) # `query_layer` = [B, N, F, H] query_layer = transpose_for_scores(query_layer, batch_size, num_attention_heads, from_seq_length, size_per_head) # `key_layer` = [B, N, T, H] key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads, to_seq_length, size_per_head) # Take the dot product between "query" and "key" to get the raw # attention scores. # `attention_scores` = [B, N, F, T] attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(size_per_head))) if attention_mask is not None: # `attention_mask` = [B, 1, F, T] attention_mask = tf.expand_dims(attention_mask, axis=[1]) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0 # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_scores += adder # Normalize the attention scores to probabilities. # `attention_probs` = [B, N, F, T] # attention_probs = tf.nn.softmax(attention_scores) attention_probs = tf.exp(tf.nn.log_softmax(attention_scores+1e-10)) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = dropout(attention_probs, attention_probs_dropout_prob) # `value_layer` = [B, T, N, H] value_layer = tf.reshape( value_layer, [batch_size, to_seq_length, num_attention_heads, size_per_head]) # `value_layer` = [B, N, T, H] value_layer = tf.transpose(value_layer, [0, 2, 1, 3]) # `context_layer` = [B, N, F, H] context_layer = tf.matmul(attention_probs, value_layer) # `context_layer` = [B, F, N, H] context_layer = tf.transpose(context_layer, [0, 2, 1, 3]) if conv_type in ["sdconv"]: # only applicable for self-attention, will cause error if from_seq_length not equal to_seq_length assert from_seq_length==to_seq_length conv_out = tf.reshape( conv_out_layer, [batch_size , from_seq_length, num_attention_heads , size_per_head]) context_layer = tf.concat([context_layer, conv_out],2) num_attention_heads = num_attention_heads*2 if do_return_2d_tensor: # `context_layer` = [B*F, N*H] context_layer = tf.reshape( context_layer, [batch_size * from_seq_length, num_attention_heads * size_per_head]) else: # `context_layer` = [B, F, N*H] context_layer = tf.reshape( context_layer, [batch_size, from_seq_length, num_attention_heads * size_per_head]) return context_layer, attention_probs, value_layer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n query_layer = tf.layers.dense(\n from_tensor_2d,\n num_attention_heads * size_per_head,\n activation=query_act,\n name=\"query\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `key_layer` = [B*T, N*H]\n key_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=key_act,\n name=\"key\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `value_layer` = [B*T, N*H]\n value_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=value_act,\n name=\"value\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `query_layer` = [B, N, F, H]\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*V]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*V]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer", "def call(self, inputs):\n (from_tensor, to_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n # `query_tensor` = [B, F, N ,H]\n query_tensor = self.query_dense(from_tensor)\n\n # `key_tensor` = [B, T, N, H]\n key_tensor = self.key_dense(to_tensor)\n\n # `value_tensor` = [B, T, N, H]\n value_tensor = self.value_dense(to_tensor)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n attention_scores = tf.einsum(\"BTNH,BFNH->BNFT\", key_tensor, query_tensor)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(self.size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, attention_scores.dtype)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.attention_probs_dropout(attention_probs)\n\n # `context_layer` = [B, F, N, H]\n context_tensor = tf.einsum(\"BNFT,BTNH->BFNH\", attention_probs, value_tensor)\n\n return context_tensor, attention_scores", "def create_attention_mask_from_input_mask(from_tensor, to_mask):\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n \n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n \n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n \n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n \n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n \n return mask", "def multi_head_attention_forward(\n query,\n key,\n value,\n embed_dim_to_check,\n num_heads,\n in_proj_weight,\n in_proj_bias,\n out_proj_weight,\n out_proj_bias,\n dropout_p=0.,\n training=True,\n need_weights=True,\n key_padding_mask=None,\n attn_mask=None,\n use_separate_proj_weight=False,\n q_proj_weight=None,\n k_proj_weight=None,\n v_proj_weight=None,\n):\n tgt_len, bsz, embed_dim = query.size()\n src_len = key.size(0)\n assert embed_dim == embed_dim_to_check\n assert src_len == value.size(0) and key.size(1) == value.size(1)\n head_dim = embed_dim // num_heads\n\n def to_qkv(input, weight, bias, num_proj=1):\n \"\"\"Compute input projections via a single matmul.\"\"\"\n qkv_size = (tgt_len, bsz, num_proj * num_heads, head_dim)\n outputs = linear(input, weight, bias).reshape_(qkv_size)\n outputs = outputs.permute(1, 2, 0, 3)\n return outputs if num_proj == 1 else outputs.chunk(num_proj, 1)\n\n q, k, v = None, None, None\n if not use_separate_proj_weight:\n if (query is key) and (key is value):\n # Parallelism for self attention.\n q, k, v = to_qkv(query, in_proj_weight, in_proj_bias, 3)\n elif key is value:\n # Parallelism for encode-decoder attention.\n q_proj_weight = in_proj_weight[:embed_dim, :]\n kv_proj_weight = in_proj_weight[embed_dim:, :]\n q_proj_bias = kv_proj_bias = in_proj_bias\n if in_proj_bias is not None:\n q_proj_bias = in_proj_bias[:embed_dim]\n kv_proj_bias = in_proj_bias[embed_dim:]\n q = to_qkv(query, q_proj_weight, q_proj_bias)\n k, v = to_qkv(key, kv_proj_weight, kv_proj_bias, 2)\n if q is None:\n q_proj_bias = k_proj_bias = v_proj_bias = in_proj_bias\n if use_separate_proj_weight and q_proj_weight is None:\n q_proj_weight = in_proj_weight[:embed_dim, :]\n k_proj_weight = in_proj_weight[embed_dim:embed_dim * 2, :]\n v_proj_weight = in_proj_weight[embed_dim * 2:, :]\n if in_proj_bias is not None:\n q_proj_bias = in_proj_bias[:embed_dim]\n k_proj_bias = in_proj_bias[embed_dim:embed_dim * 2]\n v_proj_bias = in_proj_bias[embed_dim * 2:]\n q = to_qkv(query, q_proj_weight, q_proj_bias)\n k = to_qkv(key, k_proj_weight, k_proj_bias)\n v = to_qkv(value, v_proj_weight, v_proj_bias)\n q *= float(head_dim) ** -0.5\n attn = q.bmm(k.transpose(-2, -1))\n assert attn.size() == (bsz, num_heads, tgt_len, src_len)\n if attn_mask is not None:\n if attn_mask.dtype == 'bool' or attn_mask.dtype == 'uint8':\n attn.masked_fill_(attn_mask, float('-inf'))\n else:\n attn += attn_mask\n if key_padding_mask is not None:\n if key_padding_mask.size() != attn.size():\n key_padding_mask.reshape_((bsz, 1, 1, src_len))\n attn.masked_fill_(key_padding_mask, float('-inf'))\n attn = softmax(attn, dim=-1, inplace=True)\n attn = dropout(attn, p=dropout_p, training=training)\n output = attn.bmm(v).permute(2, 0, 1, 3)\n output = output.reshape_((tgt_len, bsz, embed_dim))\n output = linear(output, out_proj_weight, out_proj_bias)\n weights = attn.mean(dim=1) if need_weights else None\n return output, weights", "def attention(inp, scope, e_dim, past, config):\n assert inp.shape.ndims == 3 # input should be of shape [batch, seqlen, embeddings] # [batch, sequence, features]\n assert e_dim % config.num_heads == 0 # embedding can be split in heads\n\n if past is not None:\n assert past.shape.ndims == 5 # [batch, 2, heads, seqlen, emebeddings]\n\n def split_heads(x):\n out = split_into_n_states(x, config.num_heads)\n out = tf.transpose(out, [0, 2, 1, 3])\n return out\n\n def merge_heads(x):\n out = merge_n_states(tf.transpose(x, [0, 2, 1, 3]))\n return out\n\n def mask_attention_weights(w):\n # w should have shape [batches, heads, dst_seq, src_seq], where information flows from scr to dst\n _, _, nd, ns = shapes_list(w)\n b = attention_mask(nd, ns, w.dtype)\n b = tf.reshape(b, [1, 1, nd, ns])\n w = w * b - tf.cast(1e10, w.dtype) * (1 - b)\n return w\n\n def multihead_attention(q, k, v):\n w = tf.matmul(q, k, transpose_b=True)\n w *= tf.rsqrt(tf.cast(v.shape[-1].value, w.dtype))\n\n # mask attention weights\n w = mask_attention_weights(w)\n w = softmax_with_reduce_max(w)\n out = tf.matmul(w, v)\n return out\n\n with tf.variable_scope(scope):\n c = conv1d(inp, 'convolutional_attention', e_dim * 3)\n q, k, v = map(split_heads, tf.split(c, 3, axis=2))\n present = tf.stack([k, v], axis=1)\n if past is not None:\n # there is a stack below it\n pk, pv = tf.unstack(past, axis=1)\n k = tf.concat([pk, k], axis=2)\n v = tf.concat([pv, v], axis=2)\n\n attn = multihead_attention(q, k, v)\n attn = merge_heads(attn)\n\n out = conv1d(attn, 'convolutional_projection', e_dim)\n return out, present", "def attention(query, key, value, adj_matrix, mask=None,dropout=None):\n # query.shape = (batch, h, max_length, d_e)\n # key.shape = (batch, h, max_length, max_length, d_e)\n # value.shape = (batch, h, max_length, d_e)\n # out_scores.shape = (batch, h, max_length, max_length)\n # in_scores.shape = (batch, h, max_length, max_length)\n\n d_e = query.size(-1)\n out_scores = torch.einsum('bhmd,bhmnd->bhmn', query, key) / math.sqrt(d_e)\n in_scores = torch.einsum('bhnd,bhmnd->bhnm', query, key) / math.sqrt(d_e)\n\n if mask is not None:\n mask = mask.unsqueeze(1).repeat(1, query.shape[1], query.shape[2], 1)\n out_scores = out_scores.masked_fill(mask == 0, -np.inf)\n in_scores = in_scores.masked_fill(mask == 0, -np.inf)\n\n out_attn = F.softmax(out_scores, dim=-1)\n in_attn = F.softmax(in_scores, dim=-1)\n diag_attn = torch.diag_embed(torch.diagonal(out_attn, dim1=-2, dim2=-1), dim1=-2, dim2=-1)\n\n message = out_attn + in_attn - diag_attn\n\n # add the diffusion caused by distance\n message = message * adj_matrix.unsqueeze(1)\n\n if dropout is not None:\n message = dropout(message)\n\n # message.shape = (batch, h, max_length, max_length), value.shape = (batch, h, max_length, d_k)\n node_hidden = torch.einsum('bhmn,bhnd->bhmd', message, value)\n edge_hidden = message.unsqueeze(-1) * key\n\n return node_hidden, edge_hidden, message", "def forward(self, query, key, value, key_padding_mask=None,\n need_weights=True, attn_mask=None):\n # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]\n if not self._qkv_same_embed_dim:\n return _linear_multi_head_attention_forward(\n query, key, value, self.embed_dim, self.num_heads,\n self.in_proj_weight, self.in_proj_bias,\n self.bias_k, self.bias_v, self.bias_e, self.bias_f, self.add_zero_attn,\n self.dropout, self.out_proj.weight, self.out_proj.bias,\n training=self.training,\n key_padding_mask=key_padding_mask, need_weights=need_weights,\n attn_mask=attn_mask, use_separate_proj_weight=True,\n q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,\n v_proj_weight=self.v_proj_weight, e_proj_weight=self.e_proj_weight,\n f_proj_weight=self.f_proj_weight)\n else:\n return _linear_multi_head_attention_forward(\n query, key, value, self.embed_dim, self.num_heads,\n self.in_proj_weight, self.in_proj_bias,\n self.bias_k, self.bias_v, self.bias_e, self.bias_f, self.add_zero_attn,\n self.dropout, self.out_proj.weight, self.out_proj.bias,\n training=self.training,\n key_padding_mask=key_padding_mask, need_weights=need_weights,\n attn_mask=attn_mask, e_proj_weight=self.e_proj_weight,\n f_proj_weight=self.f_proj_weight)", "def _compute_attention(\n self, query_tensor, key_tensor, value_tensor, attention_mask=None\n ):\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n attention_scores = tf.einsum( # pragma: no cover\n self._dot_product_equation, key_tensor, query_tensor\n )\n attention_scores = tf.multiply( # pragma: no cover\n attention_scores, 1.0 / math.sqrt(float(self._key_size))\n )\n\n # Normalize the attention scores to probabilities.\n # `attention_scores` = [B, N, T, S]\n attention_scores = self._masked_softmax(\n attention_scores, attention_mask\n ) # pragma: no cover\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_scores_dropout = self._dropout_layer(\n attention_scores\n ) # pragma: no cover\n\n # `context_layer` = [B, T, N, H]\n attention_output = tf.einsum(\n self._combine_equation, attention_scores_dropout, value_tensor\n ) # pragma: no cover\n return attention_output, attention_scores # pragma: no cover", "def forward(self, queries, keys, mask=None, attn_prior=None, speaker_embed=None):\n if speaker_embed is not None:\n keys = keys + self.key_spk_proj(speaker_embed.unsqueeze(1).expand(\n -1, keys.shape[-1], -1\n )).transpose(1, 2)\n queries = queries + self.query_spk_proj(speaker_embed.unsqueeze(1).expand(\n -1, queries.shape[-1], -1\n )).transpose(1, 2)\n keys_enc = self.key_proj(keys) # B x n_attn_dims x T2\n queries_enc = self.query_proj(queries)\n\n # Simplistic Gaussian Isotopic Attention\n attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None]) ** 2 # B x n_attn_dims x T1 x T2\n attn = -self.temperature * attn.sum(1, keepdim=True)\n\n if attn_prior is not None:\n #print(f\"AlignmentEncoder \\t| mel: {queries.shape} phone: {keys.shape} mask: {mask.shape} attn: {attn.shape} attn_prior: {attn_prior.shape}\")\n attn = self.log_softmax(attn) + torch.log(attn_prior[:, None] + 1e-8)\n #print(f\"AlignmentEncoder \\t| After prior sum attn: {attn.shape}\")\n\n attn_logprob = attn.clone()\n\n if mask is not None:\n attn.data.masked_fill_(mask.permute(0, 2, 1).unsqueeze(2), -float(\"inf\"))\n\n attn = self.softmax(attn) # softmax along T2\n return attn, attn_logprob", "def apply_attention(inputs,\n attention_mode=None,\n attention_in=None,\n use_5d_mode=False,\n data_format='channels_last'):\n assert data_format == 'channels_last'\n\n h_ch_loc = 2 if use_5d_mode else 1\n\n if attention_mode == 'peer':\n attn = softmax_merge_peer_attentions(attention_in, data_format)\n else:\n attn = tf.reduce_mean(inputs, [h_ch_loc, h_ch_loc+1])\n attn = tf.layers.dense(\n inputs=attn,\n units=inputs.shape[-1],\n kernel_initializer=tf.random_normal_initializer(stddev=.01))\n attn = tf.math.sigmoid(attn)\n channel_attn = tf.expand_dims(tf.expand_dims(attn, h_ch_loc), h_ch_loc)\n\n inputs = tf.multiply(inputs, channel_attn)\n\n return inputs", "def compute_attention(t1, t2):\n dim = t1.shape.as_list()[2]\n init = tf.constant_initializer(1.0 / dim)\n\n t1_logits = ops.last_dim_weighted_sum(t1, \"t1_w\")\n t2_logits = ops.last_dim_weighted_sum(t2, \"t2_w\")\n\n dot_w = tf.get_variable(\n \"dot_w\", shape=dim, initializer=init, dtype=tf.float32)\n # Compute x * dot_weights first, then batch mult with x\n dots = t1 * tf.expand_dims(tf.expand_dims(dot_w, 0), 0)\n dot_logits = tf.matmul(dots, t2, transpose_b=True)\n\n return dot_logits + \\\n tf.expand_dims(t1_logits, 2) + \\\n tf.expand_dims(t2_logits, 1)", "def transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False,\n conv_kernel_size=3,\n head_ratio=2,\n conv_type=\"noconv\",\n **kargs):\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size / num_attention_heads)\n input_shape = bert_utils.get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n prev_output = bert_utils.reshape_to_matrix(input_tensor)\n\n attn_maps = []\n all_layer_outputs = []\n all_value_outputs = []\n for layer_idx in range(num_hidden_layers):\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head, probs, value_layer = attention_layer(\n from_tensor=prev_output,\n to_tensor=prev_output,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=True,\n batch_size=batch_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length,\n conv_kernel_size=conv_kernel_size,\n head_ratio=head_ratio,\n conv_type=conv_type,\n from_tensor_mask=kargs.get('from_tensor_mask', None),\n to_tensor_mask=kargs.get('to_tensor_mask', None),\n conv_method=kargs.get('conv_method', \"dot\"))\n attention_heads.append(attention_head)\n attn_maps.append(probs)\n all_value_outputs.append(value_layer)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(attention_heads, axis=-1)\n\n with tf.variable_scope(\"output\"):\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n \n\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + prev_output)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n intermediate_size,\n activation=intermediate_act_fn,\n kernel_initializer=create_initializer(initializer_range))\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n prev_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n\n prev_output = dropout(prev_output, hidden_dropout_prob)\n prev_output = layer_norm(prev_output + attention_output)\n all_layer_outputs.append(prev_output)\n\n attn_maps = tf.stack(attn_maps, 0)\n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = bert_utils.reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs, attn_maps, all_value_outputs\n else:\n final_output = bert_utils.reshape_from_matrix(prev_output, input_shape)\n return final_output, attn_maps, all_value_outputs", "def call(self, inputs):\n (input_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)\n with tf.name_scope('attention'):\n attention_output, attention_scores = self.attention_layer(\n from_tensor=input_tensor,\n to_tensor=input_tensor,\n attention_mask=attention_mask)\n with tf.name_scope('output'):\n attention_output = self.attention_output_dense(attention_output)\n attention_output = self.attention_dropout(attention_output)\n # Use float32 in keras layer norm and the gelu activation in the\n # intermediate dense layer for numeric stability\n attention_output = self.attention_layer_norm(input_tensor +\n attention_output)\n if self.float_type == tf.float16:\n attention_output = tf.cast(attention_output, tf.float16)\n\n with tf.name_scope('intermediate'):\n intermediate_output = self.intermediate_dense(attention_output)\n if self.float_type == tf.float16:\n intermediate_output = tf.cast(intermediate_output, tf.float16)\n\n with tf.name_scope('output'):\n layer_output = self.output_dense(intermediate_output)\n layer_output = self.output_dropout(layer_output)\n # Use float32 in keras layer norm for numeric stability\n layer_output = self.output_layer_norm(layer_output + attention_output)\n if self.float_type == tf.float16:\n layer_output = tf.cast(layer_output, tf.float16)\n return layer_output, attention_scores", "def dot_attention(query, memory, mem_mask, hidden_size,\n ln=False, num_heads=1, cache=None, dropout=None,\n out_map=True, scope=None, count_mask=None):\n with tf.variable_scope(scope or \"dot_attention\", reuse=tf.AUTO_REUSE,\n dtype=tf.as_dtype(dtype.floatx())):\n if memory is None:\n # suppose self-attention from queries alone\n h = func.linear(query, hidden_size * 3, ln=ln, scope=\"qkv_map\")\n q, k, v = tf.split(h, 3, -1)\n\n if cache is not None:\n k = tf.concat([cache['k'], k], axis=1)\n v = tf.concat([cache['v'], v], axis=1)\n cache = {\n 'k': k,\n 'v': v,\n }\n else:\n q = func.linear(query, hidden_size, ln=ln, scope=\"q_map\")\n if cache is not None and ('mk' in cache and 'mv' in cache):\n k, v = cache['mk'], cache['mv']\n else:\n k = func.linear(memory, hidden_size, ln=ln, scope=\"k_map\")\n v = func.linear(memory, hidden_size, ln=ln, scope=\"v_map\")\n\n if cache is not None:\n cache['mk'] = k\n cache['mv'] = v\n\n q = func.split_heads(q, num_heads)\n k = func.split_heads(k, num_heads)\n v = func.split_heads(v, num_heads)\n\n q *= (hidden_size // num_heads) ** (-0.5)\n\n # q * k => attention weights\n logits = tf.matmul(q, k, transpose_b=True)\n\n if mem_mask is not None:\n logits += mem_mask\n\n # modifying 'weights = tf.nn.softmax(logits)' to include the counting information.\n # --------\n logits = logits - tf.reduce_max(logits, -1, keepdims=True)\n exp_logits = tf.exp(logits)\n\n # basically, the count considers how many states are dropped (i.e. gate value 0s)\n if count_mask is not None:\n exp_logits *= count_mask\n\n exp_sum_logits = tf.reduce_sum(exp_logits, -1, keepdims=True)\n weights = exp_logits / exp_sum_logits\n # --------\n\n dweights = util.valid_apply_dropout(weights, dropout)\n\n # weights * v => attention vectors\n o = tf.matmul(dweights, v)\n o = func.combine_heads(o)\n\n if out_map:\n o = func.linear(o, hidden_size, ln=ln, scope=\"o_map\")\n\n results = {\n 'weights': weights,\n 'output': o,\n 'cache': cache\n }\n\n return results", "def forward(self,\n node_states,\n from_idx,\n to_idx,\n graph_idx,\n n_graphs,\n similarity='dotproduct',\n edge_features=None,\n node_features=None):\n aggregated_messages = self._compute_aggregated_messages(\n node_states, from_idx, to_idx, edge_features=edge_features)\n\n cross_graph_attention = batch_block_pair_attention(\n node_states, graph_idx, n_graphs, similarity=similarity)\n attention_input = node_states - cross_graph_attention\n\n return self._compute_node_update(node_states,\n [aggregated_messages, attention_input],\n node_features=node_features)", "def forward(self, queries, keys, mask=None, attn_prior=None, speaker_embed=None):\n if speaker_embed is not None:\n keys = keys + self.key_spk_proj(speaker_embed.unsqueeze(1).expand(-1, keys.shape[-1], -1)).transpose(1, 2)\n queries = queries + self.query_spk_proj(speaker_embed.unsqueeze(1).expand(-1, queries.shape[-1], -1)).transpose(1, 2)\n keys_enc = self.key_proj(keys)\n queries_enc = self.query_proj(queries)\n attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None]) ** 2\n attn = -self.temperature * attn.sum(1, keepdim=True)\n if attn_prior is not None:\n attn = self.log_softmax(attn) + torch.log(attn_prior[:, None] + 1e-08)\n attn_logprob = attn.clone()\n if mask is not None:\n attn.data.masked_fill_(mask.permute(0, 2, 1).unsqueeze(2), -float('inf'))\n attn = self.softmax(attn)\n return attn, attn_logprob", "def attention(query, use_attention=False):\n attn_weights = []\n ds = [] # Results of attention reads will be stored here.\n for i in xrange(num_heads):\n with variable_scope.variable_scope(\"Attention_%d\" % i):\n y = rnn_cell._linear(query, attention_vec_size, True)\n y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(\n v[i] * math_ops.tanh(hidden_features[i] + y), [2, 3])\n if use_attention is False: # apply mean pooling\n weights = tf.tile(sequence_length, tf.pack([attn_length]))\n weights = array_ops.reshape(weights, tf.shape(s))\n a = array_ops.ones(tf.shape(s), dtype=dtype) / math_ops.to_float(weights)\n # a = array_ops.ones(tf.shape(s), dtype=dtype) / math_ops.to_float(tf.shape(s)[1])\n else:\n a = nn_ops.softmax(s)\n attn_weights.append(a)\n # Now calculate the attention-weighted vector d.\n d = math_ops.reduce_sum(\n array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n ds.append(array_ops.reshape(d, [-1, attn_size]))\n return attn_weights, ds", "def task_specific_attention(inputs, output_size, sequence_lengths,\n initializer=layers.xavier_initializer(),\n activation_fn=tf.tanh, scope=None):\n assert len(inputs.get_shape()) == 3 and inputs.get_shape()[-1].value is not None\n\n with tf.variable_scope(scope or 'attention') as scope:\n attention_context_vector = tf.get_variable(name='attention_context_vector',\n shape=[output_size],\n initializer=initializer,\n dtype=tf.float32)\n \n input_projection = layers.fully_connected(inputs, output_size,\n activation_fn=activation_fn,\n scope=scope)\n\n vector_attn = tf.reduce_sum(tf.multiply(input_projection, attention_context_vector), axis=2) \n mask = tf.sequence_mask(sequence_lengths, dtype=tf.float32) \n attention_weights = tf.nn.softmax(vector_attn, axis=1)\n attention_weights = attention_weights*mask\n norms = tf.reduce_sum(attention_weights, axis = 1, keepdims = True) + 1e-6 \n attention_weights = attention_weights / norms\n attention_weights = tf.expand_dims(attention_weights, axis = 2) \n \n weighted_projection = inputs*attention_weights\n outputs = tf.reduce_sum(weighted_projection, axis=1)\n\n return outputs", "def call(self, inputs, attention_mask=None):\n inputs_len = len(inputs) # pragma: no cover\n if inputs_len > 3 or inputs_len < 2: # pragma: no cover\n raise ValueError( # pragma: no cover\n \"Expects inputs list of length 2 or 3, namely [query, value] or \"\n \"[query, value, key]. \"\n \"Given length: %d\" % inputs_len\n )\n query = inputs[0] # pragma: no cover\n value = inputs[1] # pragma: no cover\n key = inputs[2] if inputs_len == 3 else value # pragma: no cover\n\n # N = `num_attention_heads`\n # H = `size_per_head`\n # `query_tensor` = [B, T, N ,H]\n query_tensor = self._query_dense(query) # pragma: no cover\n\n # `key_tensor` = [B, S, N, H]\n key_tensor = self._key_dense(key) # pragma: no cover\n\n # `value_tensor` = [B, S, N, H]\n value_tensor = self._value_dense(value) # pragma: no cover\n\n attention_output, attention_scores = self._compute_attention(\n query_tensor, key_tensor, value_tensor, attention_mask\n ) # pragma: no cover\n attention_output = self._output_dense(attention_output) # pragma: no cover\n\n if self._return_attention_scores: # pragma: no cover\n return attention_output, attention_scores # pragma: no cover\n return attention_output # pragma: no cover", "def Anatomical_attention_gate(featureMap1,featureMap2):\n ndims = len(featureMap1.get_shape()) - 2\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n# input_channels = featureMap1.get_shape().as_list()[-1]\n# batch_size1 = tf.shape(down_in)[0]\n# nf = tf.min(batch_size0,batch_size1)\n featureMap = concatenate([featureMap1, featureMap2])\n Conv = getattr(KL, 'Conv%dD' % ndims)\n tensorweight1 = Conv(1, kernel_size=1, padding='same',\n kernel_initializer='he_normal', use_bias = True, bias_initializer='zeros',strides=1,activation='sigmoid')(featureMap)\n# tensorweight1 = Activation('relu')(tensorweight1)\n w_featureMap1 = Multiply()([featureMap1,tensorweight1])\n tensorweight2 = Conv(1, kernel_size=1, padding='same',\n kernel_initializer='he_normal', use_bias = True, bias_initializer='zeros',strides=1,activation='sigmoid')(featureMap)\n# tensorweight2 = Activation('relu')(tensorweight2)\n w_featureMap2 = Multiply()([featureMap2,tensorweight2])\n w_featureMap = Add()([w_featureMap1,w_featureMap2])\n return w_featureMap", "def fusion_with_peer_attention(inputs,\n index=None,\n attention_mode=None,\n attention_in=None,\n use_5d_mode=False,\n data_format='channels_last'):\n assert data_format == 'channels_last'\n\n if FLAGS.precision == 'bfloat16':\n dtype = tf.bfloat16\n else:\n dtype = tf.float32\n\n if use_5d_mode:\n h_channel_loc = 2\n conv_function = asn.conv3d_same_padding\n else:\n h_channel_loc = 1\n conv_function = asn.conv2d_fixed_padding\n\n # If only 1 input. Apply peer-attention to the connection when used.\n if len(inputs) == 1:\n if attention_mode:\n inputs[0] = apply_attention(inputs[0],\n attention_mode,\n attention_in,\n use_5d_mode,\n data_format)\n return inputs[0]\n\n # get smallest spatial size and largest channels\n sm_size = [10000, 10000]\n lg_channel = 0\n for inp in inputs:\n # assume batch X height x width x channels\n sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])\n sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc+1])\n # Note that, when using object inputs, object channel sizes are usually big.\n # Since we do not want the object channel size to increase the number of\n # parameters for every fusion, we exclude it when computing lg_channel.\n if inp.shape[-1] > lg_channel and inp.shape[-1] != FLAGS.num_object_classes: # pylint: disable=line-too-long\n lg_channel = inp.shape[3]\n\n # loads or creates weight variables to fuse multiple inputs\n weights_shape = [len(inputs)]\n if index is None or FLAGS.model_edge_weights == '[]':\n initial_weight_values = lambda: tf.random.truncated_normal( # pylint: disable=g-long-lambda\n weights_shape,\n mean=0.0,\n stddev=0.01,\n dtype=tf.float32)\n weights = tf.Variable(\n initial_value=initial_weight_values, trainable=True,\n name='agg_weights', dtype=tf.float32)\n else:\n model_edge_weights = json.loads(FLAGS.model_edge_weights)\n initial_weights_after_sigmoid = np.asarray(\n model_edge_weights[index][0]).astype('float32')\n # Initial_weights_after_sigmoid is never 0, as the initial weights are\n # based the results of a successful connectivity search.\n initial_weights = -np.log(1. / initial_weights_after_sigmoid - 1.)\n\n weights = tf.Variable(\n initial_value=initial_weights, trainable=False,\n name='agg_weights', dtype=tf.float32)\n weights = tf.math.sigmoid(tf.cast(weights, dtype))\n\n # Compute weighted inputs. We group inputs with the same channels.\n per_channel_inps = dict({0: []})\n for i, inp in enumerate(inputs):\n input_shape = inp.shape\n if input_shape[h_channel_loc] != sm_size[0] or input_shape[h_channel_loc+1] != sm_size[1]: # pylint: disable=line-too-long\n assert sm_size[0] != 0\n ratio = (input_shape[h_channel_loc] + 1) // sm_size[0]\n if use_5d_mode:\n inp = tf.layers.max_pooling3d(\n inp, [1, ratio, ratio], [1, ratio, ratio],\n padding='same', data_format=data_format)\n else:\n inp = tf.layers.max_pooling2d(\n inp, [ratio, ratio], ratio, padding='same', data_format=data_format)\n\n if input_shape[-1] in per_channel_inps:\n per_channel_inps[input_shape[-1]].append(weights[i] * inp)\n else:\n per_channel_inps.update({input_shape[-1]: [weights[i] * inp]})\n\n # Implementation of connectivity with peer-attention\n if attention_mode:\n for key, channel_inps in per_channel_inps.items():\n for idx in range(len(channel_inps)):\n with tf.variable_scope('Connection_' + str(key) + '_' + str(idx)):\n channel_inps[idx] = apply_attention(channel_inps[idx],\n attention_mode,\n attention_in,\n use_5d_mode,\n data_format)\n\n # Adding 1x1 conv layers (to match channel size) and fusing all inputs.\n # We add inputs with the same channels first before applying 1x1 conv to save\n # memory.\n inps = []\n for key, channel_inps in per_channel_inps.items():\n if len(channel_inps) < 1:\n continue\n if len(channel_inps) == 1:\n if key == lg_channel:\n inp = channel_inps[0]\n else:\n inp = conv_function(\n channel_inps[0],\n lg_channel,\n kernel_size=1,\n strides=1,\n data_format=data_format)\n inps.append(inp)\n else:\n if key == lg_channel:\n inp = tf.add_n(channel_inps)\n else:\n inp = conv_function(\n tf.add_n(channel_inps),\n lg_channel,\n kernel_size=1,\n strides=1,\n data_format=data_format)\n inps.append(inp)\n\n return tf.add_n(inps)", "def forward(self, query, key=None, value=None, attn_mask=None, cache=None):\n if attn_mask is not None:\n # Support bool or int mask\n attn_mask = _convert_attention_mask(attn_mask, query.dtype)\n\n out = incubate_f.fused_multi_head_attention(\n x=query,\n qkv_weight=self.qkv_weight,\n linear_weight=self.linear_weight,\n pre_layer_norm=self.normalize_before,\n pre_ln_scale=self.pre_ln_scale,\n pre_ln_bias=self.pre_ln_bias,\n ln_scale=self.ln_scale,\n ln_bias=self.ln_bias,\n pre_ln_epsilon=self._epsilon,\n qkv_bias=self.qkv_bias,\n linear_bias=self.linear_bias,\n cache_kv=cache,\n attn_mask=attn_mask,\n dropout_rate=self.dropout_rate,\n attn_dropout_rate=self.attn_dropout_rate,\n ln_epsilon=self._epsilon,\n training=self.training,\n ring_id=self._ring_id,\n num_heads=self.num_heads,\n transpose_qkv_wb=self.transpose_qkv_wb,\n name=self.name,\n )\n return out", "def multi_head_attention(queries, keys, values, attn_bias, d_key, d_value, d_model, pos_enc,\n n_head=1, dropout_rate=0., cache=None, static_kv=False):\n keys = queries if keys is None else keys\n values = keys if values is None else values\n if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):\n raise ValueError(\n \"Inputs: quries, keys and values should all be 3-D tensors.\"\n )\n\n def __compute_qkv(queries, keys, values, n_head, d_key, d_value):\n \"\"\"\n Add linear projection to queries, keys, and values.\n \"\"\"\n q = layers.fc(input=queries, size=d_key * n_head,\n bias_attr=False, num_flatten_dims=2)\n fc_layer = wrap_layer_with_block(\n layers.fc, fluid.default_main_program().current_block().parent_idx\n ) if cache is not None and static_kv else layers.fc\n k = fc_layer(input=keys, size=d_key * n_head,\n bias_attr=False, num_flatten_dims=2)\n v = fc_layer(input=values, size=d_value * n_head,\n bias_attr=False, num_flatten_dims=2)\n return q, k, v\n\n def __split_heads_qkv(queries, keys, values, n_head, d_key, d_value)\n \"\"\"\n Reshape input tensors at the last dimension to split multi-heads\n and then transpose. Specifically, transform the input tensor with shape\n [bs, max_sequence_length, n_head * hidden_dim] to the output tensor\n with shape [bs, n_head, max_sequence_length, hidden_dim].\n \"\"\"\n # The value 0 in shape attr means copying the corresponding dimension\n # size of the input as the output dimension size.\n reshaped_q = layers.reshape(\n x=queries, shape=[0, 0, n_head, d_key], inplace=True)\n # permute the dimensions into:\n # [batch_size, n_head, max_sequence_len, hidden_size_per_head]\n q = layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])\n # For encoder-decoder attention in inference, insert the ops and vars\n # into global block to use as cache among beam search.\n reshape_layer = wrap_layer_with_block(\n layers.reshape,\n fluid.default_main_program().current_block(\n ).parent_idx) if cache is not None and static_kv else layers.reshape\n transpose_layer = wrap_layer_with_block(\n layers.transpose,\n fluid.default_main_program().current_block().\n parent_idx) if cache is not None and static_kv else layers.transpose\n reshaped_k = reshape_layer(\n x=keys, shape=[0, 0, n_head, d_key], inplace=True)\n k = transpose_layer(x=reshaped_k, perm=[0, 2, 1, 3])\n reshaped_v = reshape_layer(\n x=values, shape=[0, 0, n_head, d_value], inplace=True)\n v = transpose_layer(x=reshaped_v, perm=[0, 2, 1, 3])\n\n if cache is not None: # only for faster inference\n cache_, i = cache\n if static_kv: # For encoder-decoder attention in inference\n cache_k, cache_v = cache_[\"static_k\"], cache_[\"static_v\"]\n # To init the static_k and static_v in global block.\n static_cache_init = wrap_layer_with_block(\n layers.assign,\n fluid.default_main_program().current_block().parent_idx)\n static_cache_init(\n k,\n fluid.default_main_program().global_block().var(\n \"static_k_%d\" % i))\n static_cache_init(\n v,\n fluid.default_main_program().global_block().var(\n \"static_v_%d\" % i))\n k, v = cache_k, cache_v\n else: # For decoder self-attention in inference\n # use cache and concat time steps.\n cache_k, cache_v = cache_[\"k\"], cache_[\"v\"]\n k = layers.concat([cache_k, k], axis=2)\n v = layers.concat([cache_v, v], axis=2)\n cache_[\"k\"], cache_[\"v\"] = (k, v)\n return q, k, v\n\n def __combine_heads(x):\n \"\"\"\n Transpose and then reshape the last two dimensions of inpunt tensor x\n so that it becomes one dimension, which is reverse to __split_heads.\n \"\"\"\n if len(x.shape) != 4:\n raise ValueError(\"Input(x) should be a 4-D Tensor.\")\n\n trans_x = layers.transpose(x, perm=[0, 2, 1, 3])\n # The value 0 in shape attr means copying the corresponding dimension\n # size of the input as the output dimension size.\n return layers.reshape(\n x=trans_x,\n shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],\n inplace=True)\n \n def _shift(BD):\n \"\"\"\n -3 -2 -1 0 1 2\n -3 -2 -1 0 1 2\n -3 -2 -1 0 1 2\n\n to\n 0 1 2\n -1 0 1\n -2 -1 0\n\n :param BD: batch_size x n_head x max_len x 2max_len\n :return: batch_size x n_head x max_len x max_len\n \"\"\"\n bsz, n_head, max_len, _ = BD.size()\n zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))\n BD = layers.reshape(x=layers.concat([BD, zero_pad], axis=-1),\n shape=(bsz, n_head, -1, max_len))\n BD = layers.reshape(x=BD[:, :, :-1], shape=(bsz, n_head, max_len, -1))\n BD = BD[:, :, :, max_len:]\n return BD\n\n def _transpose_shift(E):\n \"\"\"\n -3 -2 -1 0 1 2\n -30 -20 -10 00 10 20\n -300 -200 -100 000 100 200\n\n to\n 0 -10 -200\n 1 00 -100\n 2 10 000\n\n\n :param E: batch_size x n_head x max_len x 2max_len\n :return: batch_size x n_head x max_len x max_len\n \"\"\"\n bsz, n_head, max_len, _ = E.size()\n zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))\n E = layers.reshape(x=layers.concat([E, zero_pad], axis=-1),\n shape=(bsz, n_head, -1, max_len))\n indice = layers.arange(start=0, end=max_len, dtype=int)\n E = layers.index_select(input=E, index=indice, dim=-2)\n E = layers.transpose(E, perm=[0, 1, 3, 2])\n return E\n\n def scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key, dropout_rate):\n \"\"\"\n Scaled Dot-Product Attention\n\n Change:\n - Different from the original one.\n We will remove the scale factor math: \\sqrt{d_k} according to the paper.\n - Bias for attention and position encoding are added.\n \n \"\"\"\n # product = layers.matmul(x=q, y=k, transpose_y=True, alpha=d_key**-0.5)\n\n # now q, k should be shaped like\n # [batch_size, n_head, max_sequence_len, hidden_size_per_head]\n # pos_enc should be shaped like [2 X l, head_dim], and head_dim = d_key\n max_sequence_len = q.shape[2]\n \n r_r_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]\n r_w_bias = layers.create_parameter(shape=(n_head, d_key)) # [n_head, head_dim]\n rw_head_q = q + r_r_bias[:, None] # [batch, n_head, max_sequence_len, head_dim]\n AC = layers.matmul(x=rw_head_q, y=k, transpose_y=True) # [batch, n_head, max_sequence_len, max_seqence_len]\n \n # position bias for each head, shaped like [n_head, 2 X max_sequence_len].\n # Then add two dimensions at `batch` and `maxlen`.\n D_ = layers.matmul(x=r_w_bias, y=pos_enc, transpose_y=True)[None, :, None]\n # position bias for each query, shaped like [batch, n_head, max_len, 2 X max_len]\n B_ = layers.matmul(x=q, y=pos_enc, transpose_y=True)\n # bias for each key, shaped like [batch, n_head, max_len, 2 X max_len]\n E_ = layers.matmul(x=k, y=pos_enc, transpose_y=True)\n \n # shaped like [batch, n_head, max_len, 2 X max_len]\n # change it to [batch, n_head, max_len, max_len]\n BD = B_ + D_\n BDE = _shift(BD) + _transpose_shift(E_)\n product = AC + BDE\n\n # product = layers.matmul(x=q, y=k, transposed_y=True, alpha=1.0) + \\\n # layers.matmul(x=q, y=pos_enc, transposed_y=True) +\\\n # layers.transpose(x=last_two, perm=[0, 1, 3, 2])\n if attn_bias:\n product += attn_bias\n weights = layers.softmax(product)\n if dropout_rate:\n weights = layers.dropout(\n weights,\n dropout_prob=dropout_rate,\n seed=dropout_seed,\n is_test=False)\n out = layers.matmul(weights, v)\n return out\n\n q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)\n q, k, v = __split_heads_qkv(q, k, v, n_head, d_key, d_value)\n\n ctx_multiheads = scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key,\n dropout_rate)\n\n out = __combine_heads(ctx_multiheads)\n\n # Project back to the model size.\n proj_out = layers.fc(input=out,\n size=d_model,\n bias_attr=False,\n num_flatten_dims=2)\n return proj_out", "def inter_weighted_attention(sentence, # [batch_size, timestep, embed_size]\n other_sentence_vec, # [batch_size, 1, embed_size]\n reuse=None):\n bs, timestep, embed_size1 = sentence.get_shape().as_list()\n bs, ts, embed_size2 = other_sentence_vec.get_shape().as_list()\n assert ts == 1\n embed_size = embed_size1 + embed_size2\n\n with tf.variable_scope('inter_weighted_attention', reuse=reuse):\n inputs = tf.reshape(\n tf.concat([sentence, tf.tile(other_sentence_vec, [1, timestep, 1])], axis=2),\n [-1, embed_size]) # [batch_size * timestep, embed_size1 + embed_size2]\n hidden_size = embed_size\n w1 = tf.get_variable('weight1', [embed_size, hidden_size],\n initializer=tf.contrib.layers.xavier_initializer())\n w2 = tf.get_variable('weight2', [hidden_size, 1],\n initializer=tf.contrib.layers.xavier_initializer())\n hidden_output = tf.tanh(tf.matmul(inputs, w1)) # [batch_size * timestep, hidden_size]\n attention_output = tf.nn.softmax(tf.matmul(hidden_output, w2)) # [batch_size * timestep, 1]\n attention_output = tf.reshape(attention_output, [-1, timestep, 1])\n # attention = tf.squeeze(attention_output, axis=2)\n\n return attention_output", "def forward(self, key, value, query, mask=None, enc_attn_cache=None, self_attn_cache=None):\n\n batch_size = key.size(0)\n dim_per_head = self.dim_per_head\n head_count = self.head_count\n\n # 1) Project key, value, and query.\n if enc_attn_cache is not None:\n key_up, value_up = enc_attn_cache\n else:\n key_up = self._split_heads(self.linear_keys(key)) # [batch_size, num_head, seq_len, dim_head]\n value_up = self._split_heads(self.linear_values(value))\n\n if self_attn_cache is not None:\n key_up_prev, value_up_prev = self_attn_cache\n # Append current key and value to the cache\n key_up = torch.cat([key_up_prev, key_up], dim=2)\n value_up = torch.cat([value_up_prev, value_up], dim=2)\n\n query_up = self._split_heads(self.linear_query(query))\n\n key_len = key_up.size(2)\n query_len = query_up.size(2)\n\n # 2) Calculate and scale scores.\n query_up = query_up / math.sqrt(dim_per_head)\n scores = torch.matmul(query_up, key_up.transpose(2, 3))\n\n if mask is not None:\n mask = mask.unsqueeze(1).expand_as(scores)\n scores = scores.masked_fill(mask, -1e18)\n\n # 3) Apply attention dropout and compute context vectors.\n attn = self.sm(scores)\n drop_attn = self.dropout(attn)\n context = self._combine_heads(torch.matmul(drop_attn, value_up))\n\n output = self.final_linear(context)\n\n # Return one attn\n top_attn = attn \\\n .view(batch_size, head_count,\n query_len, key_len)[:, 0, :, :] \\\n .contiguous()\n # END CHECK\n return output, top_attn, [key_up, value_up]", "def forward(self, key, value, query, mask=None, enc_attn_cache=None, self_attn_cache=None):\n\n batch_size = key.size(0)\n dim_per_head = self.dim_per_head\n head_count = self.head_count\n\n # 1) Project key, value, and query.\n if enc_attn_cache is not None:\n key_up, value_up = enc_attn_cache\n else:\n key_up = self._split_heads(self.linear_keys(key)) # [batch_size, num_head, seq_len, dim_head]\n value_up = self._split_heads(self.linear_values(value))\n\n if self_attn_cache is not None:\n key_up_prev, value_up_prev = self_attn_cache\n # Append current key and value to the cache\n key_up = torch.cat([key_up_prev, key_up], dim=2)\n value_up = torch.cat([value_up_prev, value_up], dim=2)\n\n query_up = self._split_heads(self.linear_query(query))\n\n key_len = key_up.size(2)\n query_len = query_up.size(2)\n\n # 2) Calculate and scale scores.\n query_up = query_up / math.sqrt(dim_per_head)\n scores = torch.matmul(query_up, key_up.transpose(2, 3))\n\n if mask is not None:\n mask = mask.unsqueeze(1).expand_as(scores)\n scores = scores.masked_fill(mask, -1e18)\n\n # 3) Apply attention dropout and compute context vectors.\n attn = self.sm(scores)\n drop_attn = self.dropout(attn)\n context = self._combine_heads(torch.matmul(drop_attn, value_up))\n\n output = self.final_linear(context)\n\n # Return one attn\n top_attn = attn \\\n .view(batch_size, head_count,\n query_len, key_len)[:, 0, :, :] \\\n .contiguous()\n # END CHECK\n return output, top_attn, [key_up, value_up]", "def forward(self, query, key, value, mask, relation_bias_matrix, output=None):\n batch_size, seq_len, d_model = query.size()\n\n # 1) Do all the linear projections in batch, reshape from d_model => num_heads x d_k\n # query, key, value have shapes [N, num_heads, S, dk]\n query, key, value = [l(x).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)\n for l, x in zip(self.linear_layers, (query, key, value))]\n\n # 2) Apply attention on all the projected vectors in batch.\n # first attention term, [N, num_heads, S, dk] * [N, num_heads, dk, S] -> [N, num_heads, S, S]\n score1 = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.d_k)\n\n # second attention term (relative attention)\n # S parallel multiplications of [N*num_heads, dk] and [dk, S] matrices\n # [S, N*num_heads, dk] * [S, dk, S] -> [S, N*num_heads, S], transpose to [N, num_heads, S, S]\n query = query.view(batch_size*self.num_heads, -1, -1).transpose(0, 1)\n score2 = torch.matmul(query, relation_bias_matrix.transpose(-2, -1)).transpose(0, 1)\n score2 = score2.view(batch_size, self.num_heads, -1, -1)\n\n scores = (score1 + score2) / math.sqrt(self.d_k)\n\n scores = scores.masked_fill(mask == 0, -1e9)\n\n p_attn = F.softmax(scores, dim=-1)\n\n if self.dropout is not None:\n p_attn = self.dropout(p_attn)\n\n x = torch.matmul(p_attn, value)\n\n # 3) \"Concat\" using a view and apply a final linear.\n x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.num_heads * self.d_k)\n\n if output is not None:\n output['attention'].append(p_attn)\n\n return self.output_linear(x)", "def compute_attention(self,\n query,\n key,\n value,\n position,\n content_attention_bias,\n positional_attention_bias,\n segment_matrix=None,\n segment_encoding=None,\n segment_attention_bias=None,\n attention_mask=None):\n content_attention = tf.einsum(self._dot_product_equation,\n key,\n query + content_attention_bias)\n positional_attention = tf.einsum(self._dot_product_equation,\n position,\n query + positional_attention_bias)\n positional_attention = _rel_shift(\n positional_attention, klen=tf.shape(content_attention)[3])\n\n if segment_matrix is not None:\n segment_attention = tf.einsum(\"bind,snd->bnis\",\n query + segment_attention_bias,\n segment_encoding)\n target_shape = tf.shape(positional_attention)\n segment_attention = tf.where(\n tf.broadcast_to(tf.expand_dims(segment_matrix, 1), target_shape),\n tf.broadcast_to(segment_attention[:, :, :, 1:], target_shape),\n tf.broadcast_to(segment_attention[:, :, :, :1], target_shape))\n attention_sum = (\n content_attention + positional_attention + segment_attention)\n else:\n attention_sum = content_attention + positional_attention\n\n attention_scores = tf.multiply(\n attention_sum, 1.0 / math.sqrt(float(self._key_dim)))\n\n # `attention_scores`: `[B, N, S, S + M]`\n if attention_mask is not None:\n attention_scores += (_large_compatible_negative(attention_scores.dtype)\n * attention_mask)\n\n attention_scores = tf.nn.softmax(attention_scores, 3)\n attention_output = self._dropout_layer(attention_scores)\n\n attention_output = tf.einsum(self._combine_equation,\n attention_output,\n value)\n return attention_output", "def create_attention_node(self, mask_index: str, q_matmul: NodeProto, k_matmul: NodeProto, v_matmul: NodeProto,\n q_add: NodeProto, k_add: NodeProto, v_add: NodeProto, num_heads: int, hidden_size: int,\n input: str, output: str) -> Union[NodeProto, None]:\n assert num_heads > 0 and hidden_size > 0 and (hidden_size % num_heads) == 0\n\n q_weight = self.model.get_initializer(q_matmul.input[1])\n k_weight = self.model.get_initializer(k_matmul.input[1])\n v_weight = self.model.get_initializer(v_matmul.input[1])\n q_bias = self.model.get_initializer(q_add.input[1]) or self.model.get_initializer(q_add.input[0])\n k_bias = self.model.get_initializer(k_add.input[1]) or self.model.get_initializer(k_add.input[0])\n v_bias = self.model.get_initializer(v_add.input[1]) or self.model.get_initializer(v_add.input[0])\n\n if q_weight is None:\n print(f\"{q_matmul.input[1]} is not initializer. Please set do_constant_folding=True in torch.onnx.export\")\n return None\n if not (k_weight and v_weight and q_bias and k_bias):\n return None\n qw = NumpyHelper.to_array(q_weight)\n kw = NumpyHelper.to_array(k_weight)\n vw = NumpyHelper.to_array(v_weight)\n\n # Check if all matrices have the same shape\n assert qw.shape == kw.shape == vw.shape\n\n # All the matrices have the same shape. For 2d weights, the shapes would be [in_size, out_size].\n # For 3d weights, shape would be [in_size, a, b] where a*b = out_size\n in_size = qw.shape[0]\n out_size = np.prod(qw.shape[1:])\n\n qkv_weight = np.stack((qw, kw, vw), axis=1)\n\n qb = NumpyHelper.to_array(q_bias)\n kb = NumpyHelper.to_array(k_bias)\n vb = NumpyHelper.to_array(v_bias)\n\n # 1d bias shape: [outsize,]. 2d bias shape: [a, b] where a*b = out_size\n assert qb.shape == kb.shape == vb.shape\n assert np.prod(qb.shape) == out_size\n\n if out_size != hidden_size:\n logger.debug(\n f\"Shape for weights of Q is {in_size, out_size}, which does not match hidden_size={hidden_size}\")\n return None\n\n qkv_bias = np.stack((qb, kb, vb), axis=0)\n attention_node_name = self.model.create_node_name('Attention')\n\n weight = helper.make_tensor(name=attention_node_name + '_qkv_weight',\n data_type=TensorProto.FLOAT,\n dims=[in_size, 3 * out_size],\n vals=qkv_weight.flatten().tolist())\n\n # Sometimes weights and bias are stored in fp16\n if q_weight.data_type == 10:\n weight.CopyFrom(numpy_helper.from_array(NumpyHelper.to_array(weight).astype(np.float16), weight.name))\n self.model.add_initializer(weight, self.this_graph_name)\n\n bias = helper.make_tensor(name=attention_node_name + '_qkv_bias',\n data_type=TensorProto.FLOAT,\n dims=[3 * out_size],\n vals=qkv_bias.flatten().tolist())\n if q_bias.data_type == 10:\n bias.CopyFrom(numpy_helper.from_array(NumpyHelper.to_array(bias).astype(np.float16), bias.name))\n self.model.add_initializer(bias, self.this_graph_name)\n\n attention_inputs = [input, attention_node_name + '_qkv_weight', attention_node_name + '_qkv_bias']\n if mask_index is not None:\n attention_inputs.append(mask_index)\n\n attention_node = helper.make_node('Attention',\n inputs=attention_inputs,\n outputs=[output],\n name=attention_node_name)\n attention_node.domain = \"com.microsoft\"\n attention_node.attribute.extend([helper.make_attribute(\"num_heads\", num_heads)])\n\n return attention_node", "def forward(self, query, key_txt, value_txt, mask_txt,\n key_img, value_img, mask_img=None):\n residual = query\n query = self.apply_pre_norm_if_needed(query)\n if key_txt is None:\n key_txt = query\n if value_txt is None:\n value_txt = query\n\n combined_mask = self._generate_combined_mask(\n key_img, mask_img, mask_txt)\n\n multimodal_key = torch.cat((key_img, key_txt), dim=0)\n multimodal_value = torch.cat((value_img, value_txt), dim=0)\n attn_multimodal, attn_weights = self.multimodal_attn(\n (query, multimodal_key, multimodal_value, combined_mask))\n\n out = self.apply_residual(residual, attn_multimodal)\n out = self.apply_post_norm_if_needed(out)\n return out, attn_weights" ]
[ "0.78216404", "0.65913695", "0.5948683", "0.58981717", "0.55256927", "0.54728514", "0.53279114", "0.529244", "0.5275274", "0.52608335", "0.5222576", "0.51994085", "0.5185228", "0.5127895", "0.5086893", "0.505606", "0.5044171", "0.49941444", "0.49187762", "0.48961437", "0.48906064", "0.4888328", "0.4852728", "0.4819556", "0.4798512", "0.4798512", "0.47970098", "0.47764245", "0.47655272", "0.47629902" ]
0.7887788
0
Computes and prints moments 02 to stdout.
def moments(path): g = from_file(path) h = 1.0 - g m1 = bgy3d.moments1(h) # Get the center of distribution center = m1[1:4] / m1[0] # Use center to compute 2nd momenta m2 = bgy3d.moments2nd(h, center) print "Moments from", path print "<1> = ", m1[0] print "<x> = ", m1[1] / m1[0] print "<y> = ", m1[2] / m1[0] print "<z> = ", m1[3] / m1[0] print "<xy> = ", m2[0] / m1[0] print "<yz> = ", m2[1] / m1[0] print "<zx> = ", m2[2] / m1[0] print "<z^2 - 1/3 * r^2> = ", m2[3] / m1[0] print "<x^2 - y^2> = ", m2[4] / m1[0] print "<r^2> = ", m2[5] / m1[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moments(self):", "def moments(infile):\n \n version = 0.0\n #ruler75###########################################\n # ALL THE IMPORT STUFF \n import read_sex\n import string\n from flags import addflag, allflags,isflagon\n from momsource import momsource, momcat\n from time import time\n import os, sys\n import pyfits\n from CommonTools.loadascii import loadascii\n import momlib\n quitpath = momlib.quitpath\n secs_to_dhms = momlib.secs_to_dhms\n from Moments.latex import LaTeX\n from copy import copy\n from algorithms import get_stat\n # END IMPORT STUFF\n \n isthere = os.path.exists\n \n t1 = time()\n \n execpars = momlib.read_inputs(infile) # execution parameters\n\n #ruler75############################################\n \n # READOUT OF CATALOGUE\n \n father_cat = momcat(execpars) # initialize input catalog\n father_cat.read_catsex() # read input catalog\n \n for key in father_cat.flags:\n if father_cat.flags[key] == False :\n print \"\"\"SOME REQUESTED PARAMETER NOT IN CATALOGUE: %s\"\"\" %\\\n key\n stop()\n del key\n \n # READOUT OF SEX FILE\n # dictionary with keywords:values\n # father_sex = read_sex.read_filesex(execpars['father_sex_name'])\n # Not used by now.\n \n # Filtering of catalog using an external file which selects which objects\n # in the SEXTRACTOR catalog are to be analyzed.\n \n if father_cat.execpars['NUMBERs_f'][0] != 'None' :\n NUMBERs_f = father_cat.execpars['NUMBERs_f'][0]\n cols = ['NUMBER']\n formats = 'i'\n load = loadascii(NUMBERs_f,cols,formats,separator='blank')\n NUMBERs = load['NUMBER']\n indxs = []\n for number in NUMBERs:\n indxs.append(num.where(cat['NUMBER'] == number)[0])\n indxs = (num.array(indxs),)\n for key in father_cat:\n father_cat[key] = father_cat[key][indxs]\n \n elif 'DO' in father_cat and father_cat.execpars['useDo'][0] == 1:\n indxs = num.where(father_cat['DO'] == 1)\n NUMBERs = father_cat['NUMBER'][indxs]\n for key in father_cat:\n father_cat[key] = father_cat[key][indxs]\n \n if father_cat.execpars['delay'][0] != 0:\n delay = father_cat.execpars['delay'][0]\n for key in father_cat:\n father_cat[key] = father_cat[key][delay:]\n \n nobj_out = len(father_cat[father_cat.keys()[0]]) # number of objects to \n # analyze\n \n father_cat.initialize() # initialize output catalog\n \n # LOADING IMAGES:\n \n extension = 0 # Image extension. Not ready to handle higer extensions.\n \n # ARE IMAGES TOO BIG? THEN USE FITSCUT TO MAKE STAMPS.\n stampmode = execpars['cutmode'][0]\n \n if 'SEGIMAGE' not in father_cat and 'IMAGE' not in father_cat:\n \n if stampmode == 'pyfits':\n father_img = pyfits.getdata(execpars['father_img_name'],\\\n ext=extension).astype('Float32')\n father_seg = pyfits.getdata(execpars['father_seg_name'],\\\n ext=extension).astype('Int32')\n try: father_mask = pyfits.getdata(execpars['father_mask_name'],\\\n ext = extension).astype('Int32')\n except KeyError : pass\n father_img_dim = father_img.shape\n elif stampmode == 'fitscut' or stampmode == 'pyraf':\n father_img_name = execpars['father_img_name']\n father_seg_name = execpars['father_seg_name']\n try : \n father_mask_name = execpars['father_mask_name']\n except KeyError:\n pass\n father_img_dim = pyfits.getdata(father_img_name,\\\n ext=extension).shape\n \n # LOOP OVER SOURCES AND SOURCE OBJECT CREATION\n \n father_img_name_prev = ''\n \n try: Ndump = father_cat.execpars['Ndump'][0]\n except KeyError: Ndump = 0\n \n # MAIN LOOP\n \n for counter in range(nobj_out):\n \n if 'SEGIMAGE' in father_cat and 'IMAGE' in father_cat:\n \n if father_cat['IMAGE'][counter] != father_img_name_prev:\n father_img_name = father_cat['IMAGE'][counter]\n father_seg_name = father_cat['SEGIMAGE'][counter]\n try: father_mask_name = father_cat['MASK'][counter]\n except KeyError: father_mask_name = 'None'\n \n if stampmode == 'pyfits':\n father_img = pyfits.getdata(father_img_name,\\\n ext=extension).astype('Float32')\n father_img_dim = father_img.shape\n father_seg = pyfits.getdata(father_seg_name,\\\n ext=extension).astype('Int32')\n if isthere(father_mask_name):\n father_mask = pyfits.getdata(father_mask_name,\\\n ext=extension).astype('Int32')\n else : pass\n \n else:\n father_img_dim = pyfits.getdata(father_img_name,\\\n ext= extension).shape\n \n father_img_name_prev = father_img_name\n \n else : pass\n \n # TIME CONTROL\n \n t3 = time()\n print '\\nprocessing object %i of a total of %i\\n' % \\\n (counter+1,nobj_out)\n talready = t3-t1\n talready_fr = secs_to_dhms(talready)\n if talready_fr[0]>1:\n print '... %i days, %i hours, %i minutes, %f seconds since start' % \\\n talready_fr[0:]\n else:\n print '...%i hours, %i minutes, %f seconds since start' % \\\n talready_fr[1:]\n \n tahead = ((t3-t1)/(counter+1))* (nobj_out-counter+1)\n tahead_fr = secs_to_dhms(tahead)\n if tahead_fr[0] > 0 :\n print '%i days, %i hours, %.2f minutes to finish...' % \\\n tahead_fr[0:3]\n else:\n print '%i hours, %.2f minutes to finish...' % tahead_fr[1:3]\n \n source = momsource(execpars) # inherits from dict class.\n for col in father_cat : source[col] = father_cat[col][counter]\t\t \n del col\n\t\n if source.execpars['useMANBACK'][0] == 1:\n source['BACKGROUND'] = source.execpars['MANBACK'][0]\n \n # WINDOW DEFINITION\n \n source.getwindow(father_img_dim)\n \n # MAKING STAMPS\n extension = 0 # default by now\n \n\t\n if stampmode == 'pyfits':\n source.make_stamp(imgname=None,img=father_img,\\\n name='STAMP',extension=extension,mode=stampmode)\n source.make_stamp(imgname=None,img=father_seg,\\\n name='SEGSTAMP',extension=extension,mode=stampmode)\n try: source.make_stamp(imgname=None,img=father_mask,\\\n name='EXTMASK',extension=extension,mode=stampmode)\n except NameError : source['EXTMASK'] = None\n \n elif stampmode == 'fitscut' or stampmode=='pyraf':\n \n source.make_stamp(imgname=father_img_name,img=None,\\\n name='STAMP',extension=extension,mode=stampmode)\n source.make_stamp(imgname=father_seg_name,img=None,\\\n name='SEGSTAMP',extension=extension,mode=stampmode)\n if father_mask_name != 'None':\n source.make_stamp(imgname=father_mask_name,img=None,\\\n name='EXTMASK',extension=extension,mode=stampmode)\n else : source['EXTMASK'] = None\n \n # \"MASQUERADE...\"\n \n # 1 is masked, 0 is non masked\n \n source.make_mask(source['SEGSTAMP'],name='SEXMASK',\\\n mask_in=None,mode='nosky') # array.\n source.make_mask(source['SEGSTAMP'],name='SEXMASKOTHER',\\\n mask_in=None,mode='withsky') # array.\n\n\n if source['EXTMASK'] != None : \n source['MASK'] = momlib.mergemasks((source['SEXMASK'],\\\n source['EXTMASK']))\n source['MASKOTHER'] = momlib.mergemasks((source['SEXMASKOTHER'],\\\n source['EXTMASK']))\n source['SKYMASK'] = momlib.mergemasks((1-source['SEXMASK'],\\\n source['SEXMASKOTHER'],source['EXTMASK']))\n \n else : \n source['MASK'] = source['SEXMASK']\n source['MASKOTHER'] = source['SEXMASKOTHER']\n source['SKYMASK'] = momlib.mergemasks((1-source['SEXMASK'],\\\n source['SEXMASKOTHER']))\n \n source.execpars[\"version\"] = version\n if bool(source.execpars['makefits'][0]):\n source.wrap2mef()\n \n # If there's no pixel different from 0 in object, flag as \"BLANK\".\n nselected = len(num.where(source['MASK']==0)[0])\n \n nonblankprocent = 100.0 * len(num.where(((1-source['MASK']) * \\\n source['STAMP']) != 0.)[0])/ nselected\n print '\\nnonblankprocent = %.1f\\n' % nonblankprocent\n \n if nonblankprocent < 90.:\t \n source['flags'] = addflag(source['flags'],allflags['BLANK'])\n isblank = True\n print '\\n BLANK OBJECT!\\n'\t \n else: isblank = False\n \n # Do verbose Graphical Output?\n \n dograph = execpars['dograph'][0] == 1\n if dograph: \n source['figures'] = {}\n source['figcomms'] = {}\n \n # MANDATORY\n \n # mandatory: radial profile, petrosian radius, petrosian mask, \n # 1st & 2nd order moments\n # radial returns radial profile (within MASK)\n \n source['BOXY'] = 0 # Elliptical apertures always.\n \n # Measure sky sigma: THIS SAVES SOME TIME, AS IT IS ALREADY\n # REQUIRED BY SEVERAL TASKS.\n \n sky_sigma = source.execpars['sigma_sky'][0]\n \n source.getsky()\n\t\n if source['SKY_MEDIAN'] != None and \\\n source.execpars['useMANBACK'][0] == -1:\n print '\\nMeasured background : %.2e\\n' % source['SKY_MEDIAN']\n source['BACKGROUND'] = source['SKY_MEDIAN']\n \n if int(sky_sigma) == -1:\n sky_sigma = source['SKY_SIGMA']\n print 'sky_sigma = %.2e' % sky_sigma\n \n if sky_sigma != None : source.execpars['sigma_sky'] = [sky_sigma]\n del sky_sigma\n \n # RADIAL PROFILE\n tRAD_1 = time()\n if not isblank:\n \n if 'M_RADIAL' in execpars['toexec']: \n if execpars['doFineRadial'][0]==0:\n source.radial_v1(dograph=dograph)\n else:\n source.radial_v3(dograph=dograph)\n \n tRAD_2 = time()\n print '%f seconds in making RADIAL profile' % (tRAD_2-tRAD_1,)\n \n # SAVE RADIAL PROFILE\n tRADSAVE_1 = time()\n if 'M_RADIAL' in execpars['toexec'] and \\\n bool(source.execpars['saveradial'][0]) :\n try: imgid = father_img_name\n except NameError: imgid = \\\n quitpath(source.execpars['father_img_name'])\n imgid = imgid[0:string.rfind(imgid,'.')]\n id = '%s' % source['name']\n radialfile = '%s_%s_RADIAL.txt' % (id,imgid)\n source.SaveRadial(radialfile)\n tRADSAVE_2 = time()\n print '%f seconds in saving RADIAL profile' % \\\n (tRADSAVE_2-tRADSAVE_1,)\n \n # PETROSIAN returns petrosian radius, intensity and \n # flux (within MASK)\n if 'M_PETRO' in execpars['toexec']: \n #source.petrosian()\n\t\tsource.petrosian2()\n \n # petromsk returns PETROSIAN MASK!\n if 'M_PETROMSK' in execpars['toexec']:\n source.petromsk()\n \n # Average Signal to Noise ratio\n if 'SNR' in execpars['toexec']:\n source.snr()\n \n # ellipticity parameters are computed before the mask may be\n # updated to Petrosian mask.\n # ellipse updates A, B, THETA, ELONGATION, ELLIP\n # (within MASK^SEGMAP)\n \n tELL_1 = time()\n if 'M_ELLIP' in execpars['toexec']:\n source.ellipse()\n tELL_2 = time()\n print '%f seconds in running ellipse' % (tELL_2-tELL_1,)\n \n tMOM_1 = time()\n if 'M_MOM' in execpars['toexec']:\n source.getmoments()\n tMOM_2 = time()\n print '%f seconds in running moments' % (tMOM_2-tMOM_1,)\n else:\n # RADIAL\n radiusflags = 0L\n radiusflags = addflag(radiusflags,allflags['NORADIAL'])\n source['M_RADIAL'] = {'radii':None,'cumulflx':None,\\\n 'intens':None,'npix':None,'npixout':None,\\\n 'radiusflags':radiusflags}\n # SAVE RADIAL\n source['radial_file'] = '0'\n # PETROSIAN\n source['R_PETRO'] = -99.0 ; source['I_PETRO'] = -99.0\n source['F_PETRO'] = -99.0\n # PETROMSK\n source['M_PETROMSK'] = None\n # SNR\n source['SNR'] = -99.\n # ELLIPSE\n source['M_A'] = -99.0 ; source['M_B'] = -99.0 \n source['M_THETA'] = -99.0 ; source['M_ELONG'] = -99.0 \n source['M_ELLIP'] = -99.0\n # Moments\n source['M_X'] = -99.0 ; source['M_Y'] = -99.0\n source['M_X2'] = -99.0 ; source['M_Y2'] = -99.0\n source['M_XY'] = -99.0\n \n # END MANDATORY\n \n # Use of petrosian mask\n # \n if source.execpars['usepetro'][0] == 1 and \\\n source['M_PETROMSK'] != None:\n if source['EXTMASK'] != None : \n bunch_of_masks = (source['M_PETROMSK'],\\\n source['SEXMASKOTHER'],source['EXTMASK'])\n else : \n bunch_of_masks = (source['M_PETROMSK'],source['SEXMASKOTHER'])\n \n source['MASK'] = momlib.mergemasks(bunch_of_masks)\n\t source['flags'] = addflag(source['flags'],allflags['USEPETROMSK'])\n \n # WARNING: 'THE PETROSIAN MASK MAY BE UNNOTICEDLY\n # TRUNCATED BY THE WINDOW!!'\n \n # OPTIONAL PARAMETERS\n \n if not isblank:\n \n # 'Basics' gets area, average intensity and total flux of object \n # within MASK.\n \n if 'BASICS' in execpars['toexec']:\n source.Basics()\n \n # SECOND ORDER MOMENT (ALTERNATIVE TAKE)\n if 'M2' in execpars['toexec']:\n source.M2()\n \n # RADII WHICH CONTAIN SEVERAL FLUX RATIOS\n \n if 'RADII' in execpars['toexec']:\n #source.Radii()\n\t\tsource.Radii2()\n \n # FLUX INSIDE A GIVEN RADIUS (APFLXRADIUS)\n \n if 'APFLX' in execpars['toexec'] and 'APFLXRADIUS' in source:\n source.ApFlx()\n \n # COORDINATES OF PEAK EMISSION\n \n # peak updates peak center (within MASK^SEGMAP)\n if 'M_PEAK' in execpars['toexec']: \n source.peak() # Minimum boxwidth = 3 pix\n \n # GINI\n tG_1 = time()\n if 'M_GINI' in execpars['toexec'] : \n source.gini(dograph)\n tG_2 = time()\n print '%f seconds in running Gini' % (tG_2-tG_1,)\n \n # ASYMMETRY\n \n tA_1 = time()\n if 'ASYM' in execpars['toexec'] : \n source.asymmetry(dograph)\n tA_2 = time()\n print '%f seconds in running asymmetry' % (tA_2-tA_1,)\n \n # ANGULAR CONTRAST\n \n tAC_1 = time()\n if 'AC' in execpars['toexec']:\n source.AC(dograph)\n tAC_2 = time()\n print '%f seconds in running AC' % (tAC_2-tAC_1,)\n \n # CONCENTRATION\n \n tC_1 = time()\n if 'CONCENT' in execpars['toexec']:\n source.concent(dograph)\n tC_2 = time()\n print '%f seconds in running concent' % (tC_2-tC_1,)\n \n # FIND PEAKS (TO BE DROP SOON...)\n \n tNP_1 = time()\n if 'NPEAKS' in execpars['toexec']:\n source.FindPeaksII(dograph) # ON TESTS\n tNP_2 = time()\n print '%f seconds in running FindPeaks' % (tNP_2 - tNP_1,)\n \n # CLUMPS STATISTICS\n \n tNC_1 = time()\n if 'NCLUMPS' in execpars['toexec']:\n source.FindClumps(dograph) # ON TESTS\n tNC_2 = time()\n print '%f seconds in running FindClumps' % \\\n (tNC_2-tNC_1,)\n \n # CLUMPINESS\n \n tCL_1 = time()\n if 'CLUMPY' in execpars['toexec'] and source['R_PETRO'] != -99:\n source.clumpy(dograph) # Minimum boxwidth = 3 pix\n else:\n source['M_S'] = -99.0 ; source['M_S_SKY'] = -99.0\n tCL_2 = time()\n print '%f seconds in running clumpy' % (tCL_2 - tCL_1,)\n \n # AXIS ASYMETRY (MAJOR AXIS)\n \n tMAXAXIS_1 = time()\n if 'MAJOR_SIM' in execpars['toexec']:\n source.axis_asymmetry(axis='major',dograph=dograph)\n tMAXAXIS_2 = time()\n print '%f seconds in MAJOR_SIM' % (tMAXAXIS_2-tMAXAXIS_1,)\n \n # AXIS ASYMMETRY (MINOR AXIS)\n \n tMINAXIS_1 = time()\n if 'MINOR_SIM' in execpars['toexec']:\n source.axis_asymmetry(axis='minor',dograph=dograph)\n tMINAXIS_2 = time()\n print '%f seconds in running MINOR_SIM' % \\\n (tMINAXIS_2-tMINAXIS_1,)\n \n # M20\n \n tM20_1 = time()\n if 'M20' in execpars['toexec'] : \n if execpars['M20mode'][0] == 'Lotz':\n source.M20Lotz(dograph)\n elif execpars['M20mode'][0] == 'Azzo':\n source.M20Azzo(dograph)\n tM20_2 = time() \n print '%f seconds in running M20 (%s)' % (tM20_2-tM20_1,\\\n execpars['M20mode'][0])\n \n # EXCENTRICITY\n \n tE_1 = time()\n if 'EXCENTRICITY' in execpars['toexec']:\n source.Excentricity(dograph)\n tE_2 = time()\n print '%f seconds in running Excentricity' % (tE_2-tE_1,)\n \n # FILLING FACTOR\n \n tFF_1 = time()\n if 'FFACTOR' in execpars['toexec']:\n source.FFactor(dograph)\n tFF_2 = time()\n print '%f seconds in running FFactor' % (tFF_2-tFF_1)\n \n # VISITORS\n ttr_1 = time()\n if 'TRUNC' in execpars['toexec']:\n scale = 0.03\n zeroT = 24.84315\n source.trunc(scale,zeroT,dograph)\n ttr_2 = time()\n print '%f seconds in running trunc' % (ttr_2-ttr_1)\n \n else : \n # BASICS\n source['M_NPIX'] = -99 ; source['M_FLUX'] = -99.\n source['M_AVINT'] = -99.\n # M2\n source['M_M2'] = -99.\n # RADII\n source['M_R20'] = -99. ; source['M_R50'] = -99. ; \n source['M_R80'] = -99. ;\n # APFLX\n source['M_APFLX'] = -99.\n # PEAK\n source['M_XPEAK'] = -99. ; source['M_YPEAK'] = -99.\n # GINI\n source['M_GINI'] = -99.\n # ASYMMETRY\n source['M_AS_X'] = -99. ; source['M_AS_Y'] = -99.\n source['M_AS'] = -99. ; source['M_AS_SKY'] = -99.\n # AC\n source['MAC8'] = -99. ; source['MAC8M'] = -99.\n source['MAC4'] = -99.\n # CONCENT\n source['M_C'] = -99.0\n # NPEAKS\n source['M_NPEAKS'] = -99\n # NCLUMPS\n source['M_NUM_CL'] = -99 \n source['M_MAX_CL'] = -99. ; source['M_MIN_CL'] = -99.\n source['M_ACC_CL'] = -99. ; source['M_FAR_CL'] = -99.\n # CLUMPY\n source['M_S'] = -99. ; source['M_S_SKY'] = -99. \n # MAJOR_SIM\n source['M_AXS_MAJ'] = -99. ; source['M_AXS_SKY_MAJ'] = -99.\n # MINOR_SIM\n source['M_AXS_MIN'] = -99. ; source['M_AXS_SKY_MIN'] = -99.\n # getmoments\n source['M_X'] = -99. ; source['M_Y'] = -99.\n source['M_X2'] = -99. ; source['M_Y2'] = -99. ; source['M_XY'] = -99.\n # M20\n source['M20'] = -99.\n # EXCENTRICITY\n source['M_E'] = -99.\n # FFACTOR\n source['M_FF'] = -99.\n \n # SOME OTHER OPTIONAL GRAPHIC OUTPUTS\n \n if dograph:\n source.stamp_graph('STAMP','stamp')\n source.stamp_graph('MASK','mask')\n if 'SKYSTAMP' in source : \n source.stamp_graph('SKYSTAMP','sky')\n source.petropeakcenter_graph()\n \n pdfid = source._getGraphId()\n latexfile = '%s.tex' % pdfid\n pdffile = '%s.pdf' % pdfid\n psfile = '%s.ps' % pdfid\n latex = LaTeX()\n figures = copy(source['figures'])\n figcomms = copy(source['figcomms'])\n header = '%s' % source['name']\n latex.DoBody(header,figures,figcomms)\n latex.Write(latexfile)\n latex.Compile(latexfile,cleanafter=True,\\\n figures=copy(source['figures']))\n latex.Ps2Pdf(psfile,pdffile,cleanafter=True)\n \n source['PDF'] = pdffile\n \n # PACKAGING OF REMAINING DATA TO THE OUTPUT OBJECT\n for key in execpars['to_output']: \n try: \n father_cat[key][counter] = source[key]\n except : stop()\n \n del source # free memory\n \n #sys.exit()\n t4 = time()\n lapsus = t4 - t3\n print '%i seconds in analyzing 1 object\\n\\n\\n' % lapsus\n \n \n # DUMPING OUTPUT to a FILE\n # it writes outputs to a file in sextractor fashion.\n # outfile_name = dumpcat(outfile_name,output,to_output)\n if (Ndump >=1) and (counter % Ndump == 0):\n if isthere(execpars['outfile_name']):\n os.system('rm %s' % execpars['outfile_name'])\n father_cat.dumpcat()\n \n if Ndump > 1:\n if isthere(execpars['outfile_name']):\n os.system('rm %s' % execpars['outfile_name'])\n \n father_cat.dumpcat()\n \n t2 = time()\n \n lapsus = t2 - t1\n \n lapsus_fr = secs_to_dhms(lapsus)\n \n if lapsus_fr[0]>0.:\n print \"\"\"\\n\\n\\n'Only' %i days, %i hours, %i minutes, %f seconds \n in analyzing %i objects\\n\\n\\n\"\"\" % \\\n (lapsus_fr[0],lapsus_fr[1],lapsus_fr[2],lapsus_fr[3],nobj_out)\n else: \n print \"\"\"'Only' %i hours, %i minutes, %f seconds in analyzing \n %i objects\\n\\n\\n\"\"\" % (lapsus_fr[1],lapsus_fr[2],lapsus_fr[3],nobj_out)\n \n return None", "def moments_display(Nstar=1,seeing=[0.9,0.,0.],npix=npix,zenith=0,filter='r', theta=0., phi=0,corrector='corrector',x=None,y=None,z=None,regular=False,noise=False,exptime=100,mag=16.,sigma=4.):\n hdu = genImgVallCCD(Nstar=Nstar,seeing=seeing,npix=npix,zenith=zenith,filter=filter, theta=theta,phi=phi, corrector=corrector,x=x,y=y,z=z,regular=regular)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n psf = rebin(hdui.data[i][4:].reshape(npix,npix),(40,40))\n if noise == True:\n gain = 0.21 # convert electrons to ADU\n zeropoint = 26.794176 # r band, from Nikolay\n objectphoton = exptime*10**(0.4*(zeropoint - mag))\n skyphoton = 8.460140*exptime\n bkg = skyphoton*gain\n img = (psf * objectphoton + skyphoton)*gain\n img = img + add_imageNoise(img) - bkg\n else:\n img = psf\n M20[i],M22[i],M31[i],M33[i]=complexMoments(data=img,sigma=sigma)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data)\n datam = data.copy()\n data = subMeanAll(data) # remove the mean of all moments except M20\n pl.figure(figsize=(11,11))\n pl.subplot(2,2,1)\n phi22 = 0.5*np.arctan2(data[:,3].imag,data[:,3].real)\n x = data[:,0].real\n y = data[:,1].real\n #phi22[x<0] = phi22+np.deg2rad(180)\n u = np.abs(data[:,3])*np.cos(phi22)\n v = np.abs(data[:,3])*np.sin(phi22)\n qvr = pl.quiver(x,y,u,v,width = 0.004, color='r',pivot='middle',headwidth=0.,headlength=0.,headaxislength=0.,scale_units='width')\n qk = pl.quiverkey(qvr, -150,-240,np.max(np.sqrt(u**2+v**2)),str(round(np.max(np.sqrt(u**2+v**2)),3))+' pix^2',coordinates='data',color='blue')\n pl.plot(x,y,'b,')\n pl.xlim(-250,250)\n pl.ylim(-250,250)\n pl.grid(color='g')\n pl.xlabel('X [mm] (WEST)')\n pl.ylabel('Y [mm] (NORTH)')\n pl.title('M22')\n pl.subplot(2,2,2)\n phi31 = np.arctan2(data[:,4].imag,data[:,4].real)\n u = np.abs(data[:,4])*np.cos(phi31)\n v = np.abs(data[:,4])*np.sin(phi31)\n qvr=pl.quiver(x,y,u,v,width=0.003,color='r',pivot='middle',headwidth=4)\n qk = pl.quiverkey(qvr, -150,-240,np.max(np.sqrt(u**2+v**2)),str(round(np.max(np.sqrt(u**2+v**2)),3))+' pix^3',coordinates='data',color='blue')\n pl.plot(x,y,'b,')\n pl.xlim(-250,250)\n pl.ylim(-250,250)\n pl.grid(color='g')\n pl.xlabel('X [mm] (WEST)')\n pl.ylabel('Y [mm] (NORTH)')\n pl.title('M31')\n pl.subplot(2,2,3)\n phi33 = np.arctan2(data[:,5].imag,data[:,5].real)/3.\n u = np.abs(data[:,5])*np.cos(phi33)\n v = np.abs(data[:,5])*np.sin(phi33)\n pl.quiver(x,y,u,v,width=0.003,color='r',headwidth=4)\n u = np.abs(data[:,5])*np.cos(phi33+np.deg2rad(120))\n v = np.abs(data[:,5])*np.sin(phi33+np.deg2rad(120))\n pl.quiver(x,y,u,v,width=0.003,color='r',headwidth=4)\n u = np.abs(data[:,5])*np.cos(phi33+np.deg2rad(240))\n v = np.abs(data[:,5])*np.sin(phi33+np.deg2rad(240))\n qvr=pl.quiver(x,y,u,v,width=0.003,color='r',headwidth=4)\n qk = pl.quiverkey(qvr, -150,-240,np.max(np.sqrt(u**2+v**2)),str(round(np.max(np.sqrt(u**2+v**2)),3))+' pix^3',coordinates='data',color='blue')\n pl.plot(x,y,'b,')\n pl.xlim(-250,250)\n pl.ylim(-250,250)\n pl.grid(color='g')\n pl.xlabel('X [mm] (WEST)')\n pl.ylabel('Y [mm] (NORTH)')\n pl.title('M33')\n pl.subplot(2,2,4)\n m20sqr = np.sqrt(data[:,2].real)\n x = data[:,0].real\n y = data[:,1].real\n m20sqr_med = np.median(m20sqr)\n m20sqr_diff = m20sqr - m20sqr_med\n m20sqr_diff_absmed = np.median(np.abs(m20sqr_diff))\n plotScale = 1./m20sqr_diff_absmed*100\n pos = m20sqr_diff >=0\n neg = m20sqr_diff < 0\n pl.scatter(x[pos],y[pos],s=m20sqr_diff[pos]*plotScale,c='r',alpha=0.5)\n pl.scatter(x[neg],y[neg],s=-m20sqr_diff[neg]*plotScale,c='b',alpha=0.5)\n pl.scatter(-230,-210,s=m20sqr_diff_absmed*plotScale,c='b',alpha=0.5)\n pl.text(-200,-215,'-'+str(round(m20sqr_diff_absmed,6))+' pix')\n pl.scatter(-230,-230,s=m20sqr_diff_absmed*plotScale,c='r',alpha=0.5)\n pl.text(-200,-235,str(round(m20sqr_diff_absmed,6))+' pix')\n pl.plot(x,y,'y,')\n pl.grid(color='g')\n pl.xlim(-250,250)\n pl.ylim(-250,250)\n pl.xlabel('X [mm] (WEST)')\n pl.ylabel('Y [mm] (NORTH)')\n pl.title('median '+r'$\\sqrt{M20}$: '+str(round(scale*4*m20sqr_med,3))+' [arcsec]')\n return datam", "def second_moment(self, mass, z=None):\n return 1.0", "def moments(values):\n\n meanValue = numpy.mean(values)\n return (meanValue,\n numpy.sqrt(moment(values, meanValue, 2)),\n moment(values, meanValue, 3),\n moment(values, meanValue, 4))", "def moments(cnt):\n\treturn cv2.moments(cnt)", "def moments(cnt):\n\treturn cv2.moments(cnt)", "def _moments_match_numerical(self,obs,tau,v):\r\n #Compute first integral for zeroth moment.\r\n #NOTE constant np.sqrt(2*pi/tau) added at the end of the function\r\n mu = v/tau\r\n def int_1(f):\r\n return self.pdf(f, obs)*np.exp(-0.5*tau*np.square(mu-f))\r\n z_scaled, accuracy = quad(int_1, -np.inf, np.inf)\r\n\r\n #Compute second integral for first moment\r\n def int_2(f):\r\n return f*self.pdf(f, obs)*np.exp(-0.5*tau*np.square(mu-f))\r\n mean, accuracy = quad(int_2, -np.inf, np.inf)\r\n mean /= z_scaled\r\n\r\n #Compute integral for variance\r\n def int_3(f):\r\n return (f**2)*self.pdf(f, obs)*np.exp(-0.5*tau*np.square(mu-f))\r\n Ef2, accuracy = quad(int_3, -np.inf, np.inf)\r\n Ef2 /= z_scaled\r\n variance = Ef2 - mean**2\r\n\r\n #Add constant to the zeroth moment\r\n #NOTE: this constant is not needed in the other moments because it cancells out.\r\n z = z_scaled/np.sqrt(2*np.pi/tau)\r\n\r\n return z, mean, variance", "def simple_moments(x):\n\n mean = x.mean()\n std = x.std()\n sterr = std / np.sqrt(len(x))\n\n return mean, std, sterr", "def _logging_smm(self, stats_obs, stats_sim):\n fname = 'monitoring.estimagic.smm.info'\n if self.num_evals == 1 and os.path.exists(fname):\n os.unlink(fname)\n\n with open(fname, 'a+') as outfile:\n\n fmt_ = '\\n\\n{:>8}{:>15}\\n\\n'\n outfile.write(fmt_.format('EVALUATION', self.num_evals))\n\n fmt_ = '{:>8}' + '{:>15}' * 4 + '\\n\\n'\n info = ['Moment', 'Observed', 'Simulated', 'Difference', 'Weight']\n outfile.write(fmt_.format(*info))\n\n for i, moment in enumerate(stats_obs):\n\n stat_obs, stat_sim = stats_obs[i], stats_sim[i]\n info = [i, stat_obs, stat_sim, abs(stat_obs - stat_sim), self.weighing_matrix[i, i]]\n\n fmt_ = '{:>8}' + '{:15.5f}' * 4 + '\\n'\n outfile.write(fmt_.format(*info))", "def moment(self, n, mu, sigma):\n return scipy_norm.moment(n, mu, sigma)", "def print_masses(names, weights):\n unique = set(names) # all simulated atom types, e.g. C, H\n s = \"%BLOCK SPECIES_MASS\\n\"\n s += \"\\n\".join([\"%s\\t%f\" % (atom, weights[atom]) for atom in unique])\n s += \"\\n%ENDBLOCK SPECIES_MASS\"\n return s", "def do_normal():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tfor key in tracking[tunit].keys():\n\t value = tracking[tunit][key][\"value\"]\n\t print \"%s.value %f\" % (key, float(value))\n\tprint\n return 0", "def _construct_mom_stuff(self):\n a = self.mom_mix_rate\n dist_mean = self.GN.dist_mean\n dist_cov = self.GN.dist_cov\n # Get the generated sample observations for this batch, transformed\n # linearly into the desired space for moment matching...\n X_b = T.dot(self.GN.output, self.mom_match_proj)\n # Get their mean\n batch_mean = T.mean(X_b, axis=0)\n # Get the updated generator distribution mean\n new_mean = ((1.0 - a[0]) * self.GN.dist_mean) + (a[0] * batch_mean)\n # Use the mean to get the updated generator distribution covariance\n X_b_minus_mean = X_b - new_mean\n # Whelp, I guess this line needs the cast... for some reason...\n batch_cov = T.dot(X_b_minus_mean.T, X_b_minus_mean) / T.cast(X_b.shape[0], 'floatX')\n new_cov = ((1.0 - a[0]) * self.GN.dist_cov) + (a[0] * batch_cov)\n # Get the cost for deviation from the target distribution's moments\n mean_err = new_mean - self.target_mean\n cov_err = (new_cov - self.target_cov)\n mm_cost = self.mom_match_weight[0] * \\\n (T.sum(mean_err**2.0) + T.sum(cov_err**2.0))\n # Construct the updates for the running estimates of the generator\n # distribution's first and second-order moments.\n mom_updates = OrderedDict()\n mom_updates[self.GN.dist_mean] = new_mean\n mom_updates[self.GN.dist_cov] = new_cov\n return [mm_cost, mom_updates]", "def printPosteriors(posteriors):\n for i,p in enumerate(posteriors):\n print(u\" > Bin {0}: {1:.2f} \\u00B1 {2:.2f}\".format(i,np.mean(p),np.std(p)))\n # \\u00B1 is the same as $\\pm$ in unicode\n\n return", "def moments(data):\n# =============================================================================\n# total = data.sum()\n# X, Y = np.indices(data.shape)\n# x = (X*data).sum()/total\n# y = (Y*data).sum()/total\n# col = data[:, int(y)]\n# \n# width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n# \n# row = data[int(x), :]\n# width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n# height = data.max()\n# height1 = height\n# =============================================================================\n return(1, 15, 14, 3, 3, 1, 14, 16, 3, 2)", "def escaped_momentum(self):\r\n position, velocity,escaped_particles,impact,collision,mom = self.box_collision_info()\r\n\r\n for i in xrange(1,self.n):\r\n velocity[np.logical_not(impact)] = velocity[np.logical_not(\r\n impact)]\r\n momentum = self.m*velocity\r\n abs_momentum = np.sum(np.sqrt(momentum[:,0]**2 + momentum[:,1]**2\r\n + momentum[:,2]**2))/2\r\n force = abs_momentum/self.dt\r\n\r\n return abs_momentum, force", "def showm():\n def show1(i):\n coeff=[]\n for m in range(5):\n a=SAC.queryDouble('carma.Ovro%d.Drive.Point.Constants.m%d' % (i+1,m+1) ,qmax_)\n coeff.append(a)\n for o in range(3):\n a=SAC.queryDouble('carma.Ovro%d.Drive.Point.Constants.o%d' % (i+1,o+1) ,qmax_)\n coeff.append(a)\n return coeff\n print ' ant m1 m2 m3 m4 m5 o1 o2 o3'\n for i in range(6):\n m = show1(i)\n print ' 00%d %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % (i+1,m[0],m[1],m[2],m[3],m[4],m[5],m[6],m[7])", "def test_2_normal(self):\n print(\"test 2: normal distributions\")\n\n mean = self.means[0]\n dispersion = self.dispersions[0]\n\n for i, x in enumerate(self.X):\n print(i+1, normal(x, mean, dispersion), sep=' : ')", "def calc_moments(distribution):\n x = torch.linspace(2, 22, 31)\n d_mean = torch.sum(x * distribution)\n d_var = torch.sum(distribution * (x - d_mean) ** 2) \n \n return d_mean, torch.sqrt(d_var)", "def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "def complex2ndMoments(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n IWsum = IWmat.sum()\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/IWsum\n dcolmean = np.sum((colgrid-colmean)*IWcol)/IWsum\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mr = np.sum(rowgrid*IWrow)/IWsum\n Mc = np.sum(colgrid*IWcol)/IWsum\n Mrr = np.sum(rowgrid**2*IWrow)/IWsum\n Mcc = np.sum(colgrid**2*IWcol)/IWsum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*IWmat)/IWsum\n Cm = np.matrix([[Mcc,Mrc],[Mrc,Mrr]])\n Cw = np.matrix([[sigma**2,0.],[0.,sigma**2]])\n Cimg = (Cm.I - Cw.I).I\n Mcc = Cimg[0,0]\n Mrr = Cimg[1,1]\n Mrc = Cimg[0,1]\n #M20 = Mrr + Mcc\n #M22 = complex(Mcc - Mrr,2*Mrc)\n return Mcc, Mrr, Mrc", "def atm_print():\n\n # Initialize file\n metric_filename = \"stdatmos_si.txt\"\n with open(metric_filename, 'w') as output_handle:\n\n # Create header\n output_handle.write(\"Geometric Geopotential Speed of\\n\")\n output_handle.write(\"Altitude Altitude Temperature Pressure Density Sound \\n\")\n output_handle.write(\" (m) (m) (K) (N/m**2) (kg/m**3) (m/s) \\n\")\n output_handle.write(\"-----------------------------------------------------------------------\\n\")\n\n # Loop through altitudes\n for i in range(51):\n\n # Calculate properties\n h = i*2000.0\n z, t, p, d = statsi(h)\n a = np.sqrt(1.4*287.0528*t)\n\n # Write to file\n write_string = \"{0:<10}{1:<13.5f}{2:<13.5f}{3:<14.5e}{4:<13.5e}{5:<8.4f}\\n\".format(h, z, t, p, d, a)\n output_handle.write(write_string)\n\n # Initialize file\n english_filename = \"stdatmos_ee.txt\"\n with open(english_filename, 'w') as output_handle:\n\n # Create header\n output_handle.write(\"Geometric Geopotential Speed of\\n\")\n output_handle.write(\"Altitude Altitude Temperature Pressure Density Sound \\n\")\n output_handle.write(\" (ft) (ft) (R) (lbf/ft^2) (slugs/ft^3) (ft/s) \\n\")\n output_handle.write(\"------------------------------------------------------------------------\\n\")\n\n # Loop through altitudes\n for i in range(51):\n\n # Calculate properties\n h = i*5000.0\n z, t, p, d = statee(h)\n a = np.sqrt(1.4*287.0528*t/1.8)/0.3048\n\n # Write to file\n write_string = \"{0:<10}{1:<13.5f}{2:<13.5f}{3:<14.5e}{4:<13.5e}{5:<8.4f}\\n\".format(h, z, t, p, d, a)\n output_handle.write(write_string)", "def mom(x):\n with mp.extradps(5):\n x = _validate_x_bounds(x, low=0, high=1,\n strict_low=True, strict_high=True)\n M1 = _mean(x)\n M2 = _mean([t**2 for t in x])\n c = (M1 - M2) / (M2 - M1**2)\n a = M1*c\n b = (1 - M1)*c\n return a, b", "def get_magmom_string():\n\n magmoms = []\n poscar_lines = open('POSCAR').readlines()\n elements = poscar_lines[5].split()\n amounts = poscar_lines[6].split()\n for i in range(len(elements)):\n if Element(elements[i]).is_transition_metal:\n magmoms.append('{}*6.0'.format(amounts[i]))\n else:\n magmoms.append('{}*0.5'.format(amounts[i]))\n return ' '.join(magmoms)", "def _compute_moments(self, u):\n\n # Get the moments from the parent Gaussian Markov Chain\n #u = self.parents[0].get_moments() #message_to_child()\n\n # Send only moments <X(n)> and <X(n)X(n)> but not <X(n-1)X(n)>\n return u[:2]", "def print_mean_loss(self):\n print(f'Moyenne {self.list_name} : {np.mean(np.array(self.min_list[:,0]))}')", "def complexMoments(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n IWsum = IWmat.sum()\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/IWsum\n dcolmean = np.sum((colgrid-colmean)*IWcol)/IWsum\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mr = np.sum(rowgrid*IWrow)/IWsum\n Mc = np.sum(colgrid*IWcol)/IWsum\n Mrr = np.sum(rowgrid**2*IWrow)/IWsum\n Mcc = np.sum(colgrid**2*IWcol)/IWsum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*IWmat)/IWsum\n Mrrr = np.sum(rowgrid**3*IWrow)/IWsum\n Mccc = np.sum(colgrid**3*IWcol)/IWsum\n Mrrc = np.sum(np.outer(rowgrid**2,colgrid)*IWmat)/IWsum\n Mrcc = np.sum(np.outer(rowgrid,colgrid**2)*IWmat)/IWsum\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n M31 = complex(3*Mc - (Mccc+Mrrc)/sigma**2, 3*Mr - (Mrcc + Mrrr)/sigma**2)\n M33 = complex(Mccc-3*Mrrc, 3.*Mrcc - Mrrr)\n return M20, M22, M31, M33", "def disp_annotation(self):\r\n print('Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec')\r\n sys.stdout.flush()", "def disp(self, modulo=None): # TODO: rather assign opt['verb_disp'] as default?\r\n if modulo is None:\r\n modulo = self.opts['verb_disp']\r\n\r\n # console display\r\n if modulo:\r\n if (self.countiter-1) % (10 * modulo) < 1:\r\n self.disp_annotation()\r\n if self.countiter > 0 and (self.stop() or self.countiter < 4\r\n or self.countiter % modulo < 1):\r\n if self.opts['verb_time']:\r\n toc = self.elapsed_time()\r\n stime = str(int(toc//60))+':'+str(round(toc%60,1))\r\n else:\r\n stime = ''\r\n print(' '.join((repr(self.countiter).rjust(5),\r\n repr(self.countevals).rjust(7),\r\n '%.15e' % (min(self.fit.fit)),\r\n '%4.1e' % (self.D.max()/self.D.min()),\r\n '%6.2e' % self.sigma,\r\n '%6.0e' % (self.sigma * sqrt(min(self.dC))),\r\n '%6.0e' % (self.sigma * sqrt(max(self.dC))),\r\n stime)))\r\n # if self.countiter < 4:\r\n sys.stdout.flush()" ]
[ "0.67292076", "0.5867294", "0.55727154", "0.55620307", "0.54837894", "0.5482262", "0.5482262", "0.5463738", "0.5439306", "0.54296654", "0.54218966", "0.54009044", "0.53692555", "0.53471076", "0.5339355", "0.5326092", "0.527676", "0.52694386", "0.5255655", "0.52355766", "0.5233068", "0.5195651", "0.5190669", "0.5190062", "0.5169542", "0.5130079", "0.51091033", "0.5093371", "0.50752026", "0.5059267" ]
0.5959959
1
Function to create the network module from provided model fn and flags
def create_model(self): model = self.model_fn(self.flags) print(model) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_fn(model_dir):\n\n net = gluon.nn.SymbolBlock.imports('%s/model.json' % model_dir,\n ['data'], \n param_file='%s/model.params' % model_dir,\n ctx=mx.cpu())\n\n return net", "def model_fn(model_dir):\n \n sym, arg_params, aux_params = mx.model.load_checkpoint('%s/102flowers' % model_dir, 0)\n mod = mx.mod.Module(symbol=sym, context=mx.cpu(), label_names=None)\n mod.bind(for_training=False, data_shapes=[('data', (1,3,224,224))], label_shapes=mod._label_shapes)\n mod.set_params(arg_params, aux_params, allow_missing=True)\n return mod", "def build_model_fn(self):\n # Define the model_fn we want to return\n def model_fn(features, labels, mode):\n with tf.variable_scope(self.variable_scope):\n # 1. Define the input placeholder\n if len(self.input_shape) == 2: # Reshape if necessary\n new_shape = [-1] + list(self.input_shape) + [1]\n net_input = tf.reshape(\n tensor=features[\"x\"],\n shape=new_shape,\n name=\"L0_RESHAPE\"\n )\n else:\n net_input = features[\"x\"]\n\n # 2. Simply call the network\n self.tf_partial_network = sequence_to_net(\n sequence=self.encoded_network,\n input_tensor=net_input\n )\n\n # 3. Call here the functions for flops & density to avoid more\n # elements. The check is done because for some reason, the\n # number of FLOPS changes during training.\n if self.flops is None:\n self.flops = compute_network_flops(\n graph=tf.get_default_graph(),\n collection_name=self.variable_scope,\n logdir=self.log_path\n )\n\n if self.density is None:\n self.density = compute_network_density(\n graph=tf.get_default_graph(),\n collection_name=self.variable_scope\n )\n\n # 4. Build the fully-connected layer after the block\n with tf.name_scope(\"L_FC\"):\n # Flatten and connect to the Dense Layer\n ll_flat = tf.layers.flatten(\n inputs=self.tf_partial_network,\n name=\"Flatten\"\n )\n dense_layer = tf.layers.dense(\n inputs=ll_flat,\n units=1024,\n activation=tf.nn.relu,\n name=\"DENSE\"\n )\n dropout_layer = tf.layers.dropout(\n inputs=dense_layer,\n rate=0.4,\n # pylint: disable=no-member\n training=mode == tf.estimator.ModeKeys.TRAIN,\n name=\"DROPOUT\"\n )\n\n # 5. Build the prediction layer, based on a softmax\n with tf.name_scope(\"L_PRED\"):\n # Logits layer\n logits_layer = tf.layers.dense(\n inputs=dropout_layer,\n units=self.n_clases,\n name=\"PL_Logits\"\n )\n\n predictions = {\n \"classes\": tf.argmax(\n input=logits_layer,\n axis=1,\n name=\"PL_Classes\"\n ),\n \"probabilities\": tf.nn.softmax(\n logits=logits_layer,\n name=\"PL_Softmax\"\n )\n }\n\n # If we are asked for prediction only, we return the\n # prediction and stop adding nodes to the graph.\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions\n )\n\n # Build the training nodes\n with tf.name_scope(\"L_TRAIN\"):\n # Loss\n loss_layer = tf.losses.sparse_softmax_cross_entropy(\n labels=labels,\n logits=logits_layer\n )\n\n # Training Op\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.TRAIN:\n # The optimizer via Gradient Descent (we can change it)\n optimizer = tf.train.AdamOptimizer(\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=10e-08,\n name=\"OPT\"\n )\n # We say that we want to optimize the loss layer using\n # the optimizer.\n train_op = optimizer.minimize(\n loss=loss_layer,\n global_step=tf.train.get_global_step(),\n name=\"OPT_MIN\"\n )\n # And return\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n train_op=train_op\n )\n\n # Build the evaluation nodes (regular accuracy).\n with tf.name_scope(\"L_EVAL\"):\n # Evaluation metric is accuracy\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels,\n predictions=predictions[\"classes\"],\n name=\"ACC\"\n )\n }\n\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n eval_metric_ops=eval_metric_ops\n )\n\n # Return the model_fn function\n return model_fn", "def build_model_fn(self):", "def create_model(self, input_shape, num_actions, mode, args, model_name='q_network'):\n assert (mode in (\"linear\", \"duel\", \"dqn\"))\n with tf.variable_scope(model_name):\n input_data = Input(shape=input_shape, name=\"input\")\n if mode == \"linear\":\n # #version 4 elu:\n # flatten_hidden = Flatten(name=\"flatten\")(input_data)\n # FC_1 = Dense(512, activation='elu', name='FC1-elu')(flatten_hidden)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(FC_1)\n # FC_3 = Dense(512, activation='elu', name='FC3-elu')(FC_2)\n # FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n # output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n #version 4 elu:\n flatten_hidden = Flatten(name=\"flatten\")(input_data)\n FC_1 = Dense(1024, activation='elu', name='FC1-elu')(flatten_hidden)\n FC_2 = Dense(1024, activation='elu', name='FC2-elu')(FC_1)\n FC_3 = Dense(1024, activation='elu', name='FC3-elu')(FC_2)\n FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n else:\n if not (args.recurrent):\n # # # version 1:\n # h1 = Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\")(input_data)\n # h2 = Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\")(h1)\n # h3 = Convolution2D(64, (3, 3), strides=1, activation=\"relu\", name=\"conv3\")(h2)\n # context = Flatten(name=\"flatten\")(h3)\n\n # # version 2:\n # conv1 = Convolution2D(1, (5, 5), strides=1, activation=\"elu\", name=\"conv1\")(input_data)\n # flatten = Flatten(name=\"flatten\")(conv1)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(flatten)\n # context = Dense(512, activation='elu', name='FC4-elu')(FC_2)\n\n # version 3:\n conv1 = Convolution2D(32, (2, 2), strides=1, activation=\"relu\", name=\"conv1\")(input_data)\n flatten = Flatten(name=\"flatten\")(conv1)\n FC_2 = Dense(128, activation='relu', name='FC2-relu')(flatten)\n FC_3 = Dense(128, activation='relu', name='FC3-relu')(FC_2)\n context = Dense(128, activation='elu', name='FC4-elu')(FC_3)\n\n\n\n # else:\n # print('>>>> Defining Recurrent Modules...')\n # input_data_expanded = Reshape((input_shape[0], input_shape[1], input_shape[2], 1),\n # input_shape=input_shape)(input_data)\n # input_data_TimeDistributed = Permute((3, 1, 2, 4), input_shape=input_shape)(input_data_expanded)\n # h1 = TimeDistributed(Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\"), \\\n # input_shape=(args.num_frames, input_shape[0], input_shape[1], 1))(\n # input_data_TimeDistributed)\n # h2 = TimeDistributed(Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\"))(h1)\n # h3 = TimeDistributed(Convolution2D(64, (2, 2), strides=1, activation=\"relu\", name=\"conv3\"))(h2)\n # flatten_hidden = TimeDistributed(Flatten())(h3)\n # hidden_input = TimeDistributed(Dense(512, activation='relu', name='flat_to_512'))(flatten_hidden)\n # if not (args.a_t):\n # context = LSTM(512, return_sequences=False, stateful=False, input_shape=(args.num_frames, 512))(\n # hidden_input)\n # else:\n # if args.bidir:\n # hidden_input = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # all_outs = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # else:\n # all_outs = LSTM(512, return_sequences=True, stateful=False,\n # input_shape=(args.num_frames, 512))(hidden_input)\n # # attention\n # attention = TimeDistributed(Dense(1, activation='tanh'))(all_outs)\n # # print(attention.shape)\n # attention = Flatten()(attention)\n # attention = Activation('softmax')(attention)\n # attention = RepeatVector(512)(attention)\n # attention = Permute([2, 1])(attention)\n # sent_representation = merge([all_outs, attention], mode='mul')\n # context = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(512,))(sent_representation)\n # # print(context.shape)\n\n if mode == \"dqn\":\n h4 = Dense(512, activation='elu', name=\"fc\")(context)\n output = Dense(num_actions, name=\"output\")(h4)\n # elif mode == \"duel\":\n # value_hidden = Dense(512, activation='relu', name='value_fc')(context)\n # value = Dense(1, name=\"value\")(value_hidden)\n # action_hidden = Dense(512, activation='relu', name='action_fc')(context)\n # action = Dense(num_actions, name=\"action\")(action_hidden)\n # action_mean = Lambda(lambda x: tf.reduce_mean(x, axis=1, keep_dims=True), name='action_mean')(\n # action)\n # output = Lambda(lambda x: x[0] + x[1] - x[2], name='output')([action, value, action_mean])\n model = Model(inputs=input_data, outputs=output)\n print(model.summary())\n return model", "def model_fn(model_dir):\n ctx = mx.cpu()\n net = unet.Unet()\n print (\"Loading\", model_dir)\n if path.exists(model_dir+\"/unet_RGB.params\"):\n print (\"Loading RGB Model\")\n net.load_params(model_dir+\"/unet_RGB.params\", ctx)\n print (\"RGB Model Loaded\")\n \n elif path.exists(model_dir+\"/unet_ALL_BANDS.params\"):\n print (\"Loading ALL_BANDS Model\")\n net.load_params(model_dir+\"/unet_ALL_BANDS.params\", ctx)\n print (\"ALL_BANDS Model Loaded\")\n \n else:\n print (\"Model Missing\")\n net=None\n return (net)", "def build_model_fn(self):\n # Define the model_fn we want to return\n def model_fn(features, labels, mode):\n with tf.variable_scope(self.variable_scope):\n # 1. Define the input placeholder\n if len(self.input_shape) == 2:\n net_input = tf.reshape(\n tensor=features[\"x\"],\n shape=[-1] + list(self.input_shape) + [1],\n name=\"L0_RESHAPE\"\n )\n else:\n net_input = features[\"x\"]\n\n # 2. Simply call the network\n self.tf_partial_network = sequence_to_net(\n sequence=self.encoded_network,\n input_tensor=net_input\n )\n\n # 3. Build the Fully-Connected layers after block.\n with tf.name_scope(\"L_FC\"):\n # Flatten and connect to the Dense Layer\n ll_flat = tf.layers.flatten(\n inputs=self.tf_partial_network,\n name=\"Flatten\"\n )\n dense_layer = tf.layers.dense(\n inputs=ll_flat,\n units=1024,\n activation=tf.nn.relu,\n name=\"DENSE\"\n )\n dropout_layer = tf.layers.dropout(\n inputs=dense_layer,\n rate=0.4,\n # pylint: disable=no-member\n training=mode == tf.estimator.ModeKeys.TRAIN,\n name=\"DROPOUT\"\n )\n\n # 4. Build the Prediction Layer based on a Softmax\n with tf.name_scope(\"L_PRED\"):\n # Logits layer\n logits_layer = tf.layers.dense(\n inputs=dropout_layer,\n units=self.n_clases,\n name=\"PL_Logits\"\n )\n\n predictions = {\n \"classes\": tf.argmax(\n input=logits_layer,\n axis=1,\n name=\"PL_Classes\"\n ),\n \"probabilities\": tf.nn.softmax(\n logits=logits_layer,\n name=\"PL_Softmax\"\n )\n }\n\n # If we are asked for prediction only, we return the\n # prediction and stop adding nodes to the graph.\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions\n )\n\n # 4. Build the training nodes\n with tf.name_scope(\"L_TRAIN\"):\n # Loss\n loss_layer = tf.losses.sparse_softmax_cross_entropy(\n labels=labels,\n logits=logits_layer\n )\n\n # Training Op\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.TRAIN:\n # The optimizer via Gradient Descent (we can change it)\n optimizer = tf.train.AdamOptimizer(\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=10e-08,\n name=\"OPT\"\n )\n # We say that we want to optimize the loss layer using\n # the optimizer.\n train_op = optimizer.minimize(\n loss=loss_layer,\n global_step=tf.train.get_global_step(),\n name=\"OPT_MIN\"\n )\n # And return\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n train_op=train_op\n )\n\n # 5. Build the evaluation nodes.\n with tf.name_scope(\"L_EVAL\"):\n # Evaluation metric is accuracy\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels,\n predictions=predictions[\"classes\"],\n name=\"ACC\"\n )\n }\n\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n eval_metric_ops=eval_metric_ops\n )\n # End of tf.variable_scope()\n\n # Return the model_fn function\n return model_fn", "def build_model(self) -> nn.Module:\n pass", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def create_module(sbml_model_file, model_name, model_output_dir, condition_df,\n observable_df):\n\n from amici.petab_import import import_model\n import_model(sbml_model=sbml_model_file, observable_table=observable_df,\n model_name=model_name, model_output_dir=model_output_dir,\n verbose=True, condition_table=condition_df)", "def create_model(self, fun, kwargs=None, compile=True):\n if kwargs is None:\n kwargs = {}\n\n self.model = fun(self.config.inputs, self.config.output, **kwargs)\n if compile:\n self.model.compile(\n loss=self.config.get_loss(self.modeldir),\n optimizer=\"adam\", metrics=[\"accuracy\"])", "def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()", "def create_model(mode: str, path_to_checkpoint = None) -> LightningModule:\n\n assert mode != None and mode != ''\n\n if mode == 'scratch':\n if path_to_checkpoint != None:\n model = DogsBreedClassifier.load_from_checkpoint(path_to_checkpoint)\n else:\n model = DogsBreedClassifier()\n elif mode == 'densenet':\n if path_to_checkpoint != None:\n model = DogsBreedClassifierDenseNet.load_from_checkpoint(path_to_checkpoint)\n else:\n model = DogsBreedClassifierDenseNet()\n else:\n if path_to_checkpoint != None:\n model = DogsBreedClassifierEfficientNet.load_from_checkpoint(path_to_checkpoint)\n else:\n model = DogsBreedClassifierEfficientNet()\n\n return model", "def model_fn(self, features, labels, mode, params, config):\n raise NotImplementedError()", "def model_fn(model_dir):\n \n model = resnet18Basic(num_classes=10)\n net.load_params('%s/model.params' % model_dir, ctx=mx.cpu())\n return net", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def model_fn(model_dir):\n model = models.resnet50(pretrained=True)\n\n _ = model.eval()\n\n modules=list(model.children())[:-1]\n model=nn.Sequential(*modules)\n for p in model.parameters():\n p.requires_grad = False\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else \"cpu\")\n\n model = model.to(device)\n\n return model", "def build_model():", "def create_model(sess, FLAGS, mode):\n if FLAGS.model == \"vallina\":\n model = LinearModel(FLAGS, mode)\n model.build()\n else:\n pass\n # other model \n\n # create task file\n model_path = os.path.join(FLAGS.logdir, FLAGS.task_name)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n print (\"Save model to {}\".format(model_path))\n elif (FLAGS.reset):\n shutil.rmtree(model_path)\n os.makedirs(model_path)\n print (\"Remove existing model at {} and restart.\".format(model_path))\n else:\n raise ValueError(\"Fail to create the new model.\")\n\n # Save the current configurations\n config = dict(FLAGS.__flags.items())\n with open(\"/\".join([model_path, \"config.json\"]), \"w\") as file:\n json.dump(config, file)\n\n # initialize variables\n sess.run(tf.global_variables_initializer())\n\n return model", "def _parse_model(model: str, num_classes: int) -> Callable[[], tf.keras.Model]:\n if model == 'cnn':\n keras_model_builder = functools.partial(\n create_conv_dropout_model, num_classes=num_classes)\n elif model in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']:\n keras_model_builder = functools.partial(\n getattr(resnet_models, f'create_{model}'),\n input_shape=(28, 28, 1),\n num_classes=num_classes)\n else:\n raise ValueError(\n 'Cannot handle model flag [{!s}], must be one of {!s}.'.format(\n model, _EMNIST_MODELS))\n return keras_model_builder", "def create(fpath):\n model_info = json.load(open(fpath))\n\n model_shape = model_info['model']\n model_settings = model_info['config']\n dropout_chance = model_info['config']['dropout_chance']\n\n nn = NeuralNetwork(model_shape, model_settings, dropout_probability=dropout_chance)\n return nn", "def create_model(hparams, mode):\n\n graph = tf.Graph()\n\n with graph.as_default():\n with tf.name_scope(\"input_pipe\"):\n dataset = create_dataset(hparams, mode)\n iterator = dataset.make_initializable_iterator()\n model = LMandBDRNNModel(hparams=hparams,\n iterator=iterator,\n mode=mode)\n\n sess = tf.Session(graph=graph)\n\n modeltuple = ModelTuple(graph=graph, iterator=iterator,\n model=model, session=sess)\n\n return modeltuple", "def build_model(\n self,\n cfg: Config,\n fp16: bool = False,\n **kwargs,\n ) -> torch.nn.Module:\n model_builder = getattr(self, \"model_builder\", build_segmentor)\n model = model_builder(cfg, **kwargs)\n if bool(fp16):\n wrap_fp16_model(model)\n return model", "def model_fn_builder(model_config,\n train_params):\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = getattr(models, model_config.model_name)(config=model_config,\n is_training=is_training)\n _ = model(input_ids, input_mask=input_mask, token_type_ids=segment_ids)\n\n # TODO (@zhaoshenjian.01): check conditional_jit_scope\n # split loss calculation across batch\n batch_splits = train_params.get(\"batch_splits\", 1)\n if batch_splits == 1:\n # sparse_softmax_cross_entropy_with_logits\n masked_lm_output_dict = get_masked_lm_output(model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights)\n else:\n # use for large vocab\n masked_lm_output_dict = get_masked_lm_output_split_batch(\n model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n batch_splits=batch_splits)\n\n masked_lm_loss = masked_lm_output_dict[\"loss\"]\n\n use_nsp = train_params.get(\"use_nsp\", True)\n if use_nsp:\n next_sentence_labels = features[\"next_sentence_labels\"]\n next_sentence_output_dict = get_next_sentence_output(\n model_config, model.get_pooled_output(), next_sentence_labels)\n next_sentence_loss = next_sentence_output_dict[\"loss\"]\n else:\n next_sentence_loss = 0\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.compat.v1.trainable_variables()\n # run init\n init_checkpoint = train_params.get(\"init_checkpoint\")\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map,\n initialized_variable_names) = get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint,\n assignment_map)\n return tf.train.Scaffold()\n scaffold_fn = tpu_scaffold\n logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n logging.info(\" name = {}, shape = {} {}\".format(var.name, var.shape,\n init_string))\n\n # default `bert_decay` lr_scheduler\n lr_params = train_params.get(\n 'lr_scheduler', {\n 'name': 'bert_decay',\n 'learning_rate': 1e-4,\n 'warmup_steps': 10000,\n 'num_train_steps': 1000000\n })\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op, _ = optimizers.create_optimizer(\n loss=total_loss,\n init_lr=lr_params['learning_rate'],\n num_train_steps=lr_params['num_train_steps'],\n num_warmup_steps=lr_params['warmup_steps'])\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n return output_spec\n raise NotImplementedError\n\n return model_fn", "def onnx_compiler(func):\n\n assert isinstance(func, tvm.relay.function.Function)\n name = str(func.attrs.global_symbol)\n model = to_onnx(func, {}, name)\n const_vars = [const.name for const in model.graph.initializer]\n name_bytes = bytes(name, \"utf-8\")\n name_size = struct.pack(\"I\", len(name_bytes))\n model_serialized = model.SerializeToString()\n model_size = struct.pack(\"I\", model.ByteSize())\n data = b\"\" + name_size + name_bytes + model_size + model_serialized\n\n runtime_func = \"runtime.ONNXModuleCreate\"\n fcreate = tvm._ffi.get_global_func(runtime_func)\n return fcreate(data.hex(), name, const_vars)", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def model_creator(config):\n return nn.Linear(1, 1)", "def createProtoSpecification(model, filename):\n net = network_pb2.Network()\n num_layers = 0\n\n # Iterate through the layers\n for layer in model.children():\n for child in layer.modules():\n if isinstance(child, nn.Conv2d):\n # Make the conv message\n convLayer = net.layers.add()\n makeConv2DMessage(child, convLayer)\n num_layers += 1\n\n elif isinstance(child, nn.MaxPool2d):\n # Make the pool message\n poolLayer = net.layers.add()\n makePool2DMessage(child, poolLayer)\n num_layers += 1\n \n elif isinstance(child, nn.AvgPool2d):\n # Make the pool message\n poolLayer = net.layers.add()\n makePool2DMessage(child, poolLayer, avg=True)\n num_layers += 1\n\n elif isinstance(child, nn.AdaptiveAvgPool2d):\n # Make the adaptive pool message\n apoolLayer = net.layers.add()\n makePool2DMessage(child, apoolLayer, avg=True, adaptive=True)\n num_layers += 1\n\n elif isinstance(child, nn.ReLU):\n # Make the activation message\n reluact = net.layers.add()\n makeReLUMessage(reluact)\n num_layers += 1\n\n elif isinstance(child, nn.Sigmoid):\n # Make the activation message\n sigact = net.layers.add()\n makeSigmoidMessage(sigact)\n num_layers += 1\n\n elif isinstance(child, nn.Linear):\n # Make the linear layer message\n linearLayer = net.layers.add()\n makeFCMessage(child, linearLayer)\n num_layers += 1\n\n elif isinstance(child, nn.Dropout):\n # Make the DropOut layer message\n dropLayer = net.layers.add()\n makeDropoutMessage(child, dropLayer)\n num_layers += 1\n\n net.num_layers = num_layers\n\n # Store in Pre-trained Models\n filename = PRE_TRAINED_DIR + filename\n f = open(filename, \"wb\")\n f.write(net.SerializeToString())\n f.close()", "def build_model(cls, args, task):\n global PAD_IDX, EOS_IDX\n # make sure all arguments are present in older models\n w2v_lm_architecture2(args)\n\n if not hasattr(args, \"max_source_positions\"):\n args.max_source_positions = 2048\n if not hasattr(args, \"max_target_positions\"):\n args.max_target_positions = 2048\n\n tgt_dict = task.target_dictionary\n PAD_IDX = tgt_dict.pad()\n EOS_IDX = tgt_dict.eos()\n\n encoder = cls.build_encoder(args)\n assigner = cls.build_assigner(args, encoder.d)\n lm = cls.build_lm(args, task)\n\n return cls(args, encoder, assigner, lm)", "def initialize_model(model_name, num_classes, feature_extract, verbose=False):\n\n model_ft = None\n\n if model_name == \"resnet\":\n \"\"\" Resnet18\n \"\"\"\n model_ft = models.resnet18(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"alexnet\":\n \"\"\" Alexnet\n \"\"\"\n model_ft = models.alexnet(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"vgg\":\n \"\"\" VGG11_bn\n \"\"\"\n model_ft = models.vgg11_bn(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"squeezenet\":\n \"\"\" Squeezenet\n \"\"\"\n with warnings.catch_warnings(): # temporarily suppress warnings about deprecated functions\n warnings.simplefilter(\"ignore\")\n model_ft = models.squeezenet1_0(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))\n model_ft.num_classes = num_classes\n\n elif model_name == \"densenet\":\n \"\"\" Densenet\n \"\"\"\n model_ft = models.densenet121(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"inception\":\n \"\"\" Inception v3\n Be careful, expects (299,299) sized images and has auxiliary output\n \"\"\"\n model_ft = models.inception_v3(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n # Handle the auxilary net\n num_ftrs = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)\n # Handle the primary net\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n else: # Unreachable\n exit()\n\n # Gather the parameters to be optimized\n params_to_update = list(filter(lambda p: p.requires_grad, model_ft.parameters()))\n\n # Print model info\n if verbose:\n print()\n print(model_ft)\n print()\n print(\"Params to learn:\")\n for name, param in model_ft.named_parameters():\n if param.requires_grad:\n print('\\t', name)\n\n return model_ft, params_to_update" ]
[ "0.671422", "0.6646007", "0.63070667", "0.6304503", "0.6272501", "0.62541854", "0.62258416", "0.6205152", "0.61878914", "0.6128737", "0.6079335", "0.6076014", "0.606094", "0.60404265", "0.5982481", "0.5978807", "0.59529245", "0.59429884", "0.59308094", "0.5929518", "0.5910746", "0.5909759", "0.59001386", "0.5876519", "0.5870974", "0.5847783", "0.583508", "0.5827585", "0.58218503", "0.579558" ]
0.6740473
0
Make the corresponding optimizer from the flags. Only below optimizers are allowed. Welcome to add more
def make_optimizer(self): # parameters = [self.encoder.parameters(), self.decoder.parameters(), self.spec_enc.parameters()] if self.flags.optim == 'Adam': op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale) elif self.flags.optim == 'RMSprop': op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale) elif self.flags.optim == 'SGD': op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale) else: raise Exception("Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben") return op
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_optimizers(self, *args, **kwargs):\n\n # self.optimizers.append(...)\n # self.loss.append(...)\n pass", "def _configure_optimizer(learning_rate):\n if FLAGS.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=FLAGS.adadelta_rho,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\n elif FLAGS.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=FLAGS.adam_beta1,\n beta2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=FLAGS.ftrl_learning_rate_power,\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\n l1_regularization_strength=FLAGS.ftrl_l1,\n l2_regularization_strength=FLAGS.ftrl_l2)\n elif FLAGS.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=FLAGS.momentum,\n name='Momentum')\n elif FLAGS.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.rmsprop_momentum,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\n return optimizer", "def configure_optimizer(learning_rate):\n if FLAGS.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=FLAGS.adadelta_rho,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\n elif FLAGS.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=FLAGS.adam_beta1,\n beta2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=FLAGS.ftrl_learning_rate_power,\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\n l1_regularization_strength=FLAGS.ftrl_l1,\n l2_regularization_strength=FLAGS.ftrl_l2)\n elif FLAGS.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=FLAGS.momentum,\n name='Momentum')\n elif FLAGS.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.rmsprop_momentum,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n elif FLAGS.optimizer == \"adamweightdecay\":\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=FLAGS.adam_beta1,\n beta_2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n else:\n raise ValueError('Optimizer [%s] was not recognized' % FLAGS.optimizer)\n return optimizer", "def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer, tpu_support=False):\n self.optimize_ops = []\n for loss in self.losses['train']: # TODO Create apropoiate external training scheme\n optimize_op = optimizer_to_use(\n learning_rate=self.learning_rate\n )\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(\n loss=loss,\n global_step=tf.train.get_global_step()\n )\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')", "def _configure_optimizer(learning_rate):\n if FLAGS.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=FLAGS.adadelta_rho,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\n elif FLAGS.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=FLAGS.adam_beta1,\n beta2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=FLAGS.ftrl_learning_rate_power,\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\n l1_regularization_strength=FLAGS.ftrl_l1,\n l2_regularization_strength=FLAGS.ftrl_l2)\n elif FLAGS.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=FLAGS.momentum,\n use_nesterov=True,\n name='Momentum')\n elif FLAGS.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.momentum,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\n return optimizer", "def _configure_optimizer(learning_rate):\n if FLAGS.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=FLAGS.adadelta_rho,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\n elif FLAGS.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=FLAGS.adam_beta1,\n beta2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=FLAGS.ftrl_learning_rate_power,\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\n l1_regularization_strength=FLAGS.ftrl_l1,\n l2_regularization_strength=FLAGS.ftrl_l2)\n elif FLAGS.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=FLAGS.momentum,\n name='Momentum')\n elif FLAGS.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.rmsprop_momentum,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\n return optimizer", "def configure_optimizer(learning_rate):\r\n if FLAGS.optimizer == 'adadelta':\r\n optimizer = tf.train.AdadeltaOptimizer(learning_rate, \r\n rho=FLAGS.adadelta_rho,epsilon=FLAGS.opt_epsilon)\r\n elif FLAGS.optimizer == 'adagrad':\r\n optimizer = tf.train.AdagradOptimizer(learning_rate,\r\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\r\n elif FLAGS.optimizer == 'adam':\r\n optimizer = tf.train.AdamOptimizer(learning_rate,\r\n beta1=FLAGS.adam_beta1,beta2=FLAGS.adam_beta2,epsilon=FLAGS.opt_epsilon)\r\n elif FLAGS.optimizer == 'ftrl':\r\n optimizer = tf.train.FtrlOptimizer(learning_rate,learning_rate_power=FLAGS.ftrl_learning_rate_power,\r\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\r\n l1_regularization_strength=FLAGS.ftrl_l1,l2_regularization_strength=FLAGS.ftrl_l2)\r\n elif FLAGS.optimizer == 'momentum':\r\n optimizer = tf.train.MomentumOptimizer(learning_rate,\r\n momentum=FLAGS.momentum,name='Momentum')\r\n elif FLAGS.optimizer == 'rmsprop':\r\n optimizer = tf.train.RMSPropOptimizer(learning_rate,decay=FLAGS.rmsprop_decay,\r\n momentum=FLAGS.rmsprop_momentum,epsilon=FLAGS.opt_epsilon)\r\n elif FLAGS.optimizer == 'sgd':\r\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)\r\n else:\r\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\r\n return optimizer", "def add_optimizer(self):\n \n with tf.variable_scope(\"optimizer\"):\n\n # Define optimizer and minimize loss\n if self.OPTIM == \"RMSProp\":\n self.optimizer = tf.train.RMSPropOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"GD\":\n self.optimizer = tf.train.GradientDescentOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"Adam\":\n self.optimizer = tf.train.AdamOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n\n # Merge all summaries for tensorboard\n #self.tbsummaries = tf.summary.merge_all()", "def retrieve_optimizer(flags):\n\n lrate = flags.learning_rate\n momentum = flags.momentum\n decay = lrate / flags.epochs\n if flags.optimizer == 'adam':\n optimizer = Adam(lr=lrate, decay=decay)\n logger.print_directly(flags, 'ADAM optimizer was configured')\n else:\n optimizer = SGD(lr=lrate, momentum=momentum, decay=decay, nesterov=False)\n logger.print_directly(flags, 'SGD optimizer was configured')\n\n return optimizer", "def get_adv_optimizer(self, mode: str) -> torch.optim.Optimizer:\n pass", "def create_optimizer(self, context, optimizer, host):\n pass", "def build_optimizer(opt_config, learning_rate):\n if opt_config.opt_method == 'SGD':\n print('Using SGD as the optimizer', file=sys.stderr)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n elif opt_config.opt_method == 'Adam':\n print('Using Adam as the optimizer', file=sys.stderr)\n optimizer = tf.train.AdamOptimizer(\n learning_rate, beta1=opt_config.adam_beta1,\n beta2=opt_config.adam_beta2, epsilon=opt_config.adam_epsilon\n )\n else:\n raise ValueError(\n 'Unknown optimization method {0}!'.format(opt_config.opt_method))\n return optimizer", "def configure_optimizer(learning_rate):\n\tif train_config['optimizer'] == 'adadelta':\n\t\toptimizer = tf.train.AdadeltaOptimizer(learning_rate,\n\t\t rho=train_config['adadelta_rho'],\n\t\t epsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'dadgrad':\n\t\toptimizer = tf.train.AdagradDAOptimizer(\n\t\t\tlearning_rate,\n\t\t\tinitial_gradient_squared_accumulator_value=train_config['adagrad_initial_accumulator_value'])\n\telif train_config['optimizer'] == 'adam':\n\t\toptimizer = tf.train.AdamOptimizer(\n\t\t\tlearning_rate,\n\t\t\tbeta1=train_config['adam_beta1'],\n\t\t\tbeta2=train_config['adam_beta2'],\n\t\t\tepsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'ftrl':\n\t\toptimizer = tf.train.FtrlOptimizer(\n\t\t\tlearning_rate,\n\t\t\tlearning_rate_power=train_config['ftrl_learning_rate_power'],\n\t\t\tinitial_accumulator_value=train_config['ftrl_initial_accumulator_value'],\n\t\t\tl1_regularization_strength=train_config['ftrl_l1'],\n\t\t\tl2_regularization_strength=train_config['ftrl_l2'])\n\telif train_config['optimizer'] == 'momentum':\n\t\toptimizer = tf.train.MomentumOptimizer(\n\t\t\tlearning_rate,\n\t\t\tmomentum=train_config['momentum'],\n\t\t\tname='Momentum')\n\telif train_config['optimizer'] == 'rmsprop':\n\t\toptimizer = tf.train.RMSPropOptimizer(\n\t\t\tlearning_rate,\n\t\t\tdecay=train_config['rmsprop_decay'],\n\t\t\tmomentum=train_config['rmsprop_momentum'],\n\t\t\tepsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'sgd':\n\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate)\n\telse:\n\t\traise ValueError('Optimizer [%s] was not recognized' % train_config['optimizer'])\n\treturn optimizer", "def add_optimizers_to_graph(self):\n with tf.device(self.params.device):\n with self.graph.as_default():\n with tf.compat.v1.variable_scope(\"optimizers\") as scope:\n self.grads_and_vars = list() # [sch_idx][weight_idx]\n self.apply_grads = list() # [sch_idx][weight_idx]\n self.learning_rates = list() # [sch_idx][weight_idx]\n if self.params.optimizer == \"lbfgsb\":\n self.minimizer = None\n #self.minimizer = tfp.optimizer.lbfgs_minimize(\n # value_and_gradients_function=self.loss_value_and_grad,#self.total_loss,\n # initial_position=self.w_init,#self.trainable_variables,\n # max_iterations=self.params.maxiter)\n #self.minimizer = tf.contrib.opt.ScipyOptimizerInterface(self.total_loss,\n # options={\"maxiter\":self.params.maxiter}) # Default method is L-BFGSB\n for schedule_idx, sch in enumerate(self.params.schedule):\n sch_grads_and_vars = list() # [weight_idx]\n sch_apply_grads = list() # [weight_idx]\n sch_lrs = list() # [weight_idx]\n #Construct weight ops\n weight_ops = [self.trainable_variables[weight] for weight in sch[\"weights\"]]\n for w_idx, weight in enumerate(sch[\"weights\"]):\n weight_name = weight.split(\"/\")[-1].split(\":\")[0]\n learning_rates = tf.compat.v1.train.exponential_decay(\n learning_rate=sch[\"weight_lr\"][w_idx],\n global_step=self.global_step,\n decay_steps=sch[\"decay_steps\"][w_idx],\n decay_rate=sch[\"decay_rate\"][w_idx],\n staircase=sch[\"staircase\"][w_idx],\n name=\"annealing_schedule_\"+weight_name)\n sch_lrs.append(learning_rates)\n if self.params.optimizer == \"sgd\":\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rates,\n name=\"grad_optimizer_\"+weight_name)\n elif self.params.optimizer == \"adam\":\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rates, beta1=0.9, beta2=0.99,\n epsilon=1e-07, name=\"adam_optimizer_\"+weight_name)\n elif self.params.optimizer == \"adadelta\":\n optimizer = tf.compat.v1.train.AdadeltaOptimizer(learning_rates, epsilon=1e-07,\n name=\"adadelta_optimizer_\"+weight_name)\n elif self.params.optimizer == \"lbfgsb\":\n optimizer = None\n else:\n assert False, (\"Optimizer \"+self.params.optimizer+\" is not supported.\")\n weight_op = self.trainable_variables[weight]\n sch_grads_and_vars.append(self.compute_weight_gradients(optimizer, weight_op))\n gstep = self.global_step if w_idx == 0 else None # Only increment once\n if self.params.optimizer == \"lbfgsb\": # BFGS doesn't actually need the update op\n if w_idx == 0:\n sch_apply_grads.append(tf.compat.v1.assign_add(self.global_step, 1))\n else:\n sch_apply_grads.append(None)\n else:\n sch_apply_grads.append(optimizer.apply_gradients(sch_grads_and_vars[w_idx],\n global_step=gstep))\n self.learning_rates.append(sch_lrs)\n self.grads_and_vars.append(sch_grads_and_vars)\n self.apply_grads.append(sch_apply_grads)\n self.optimizers_added = True", "def test_optimizers(name: str) -> None:\n if any(x in name for x in [\"Chain\", \"SMAC\", \"BO\", \"AX\"]) and os.environ.get(\"CIRCLECI\", False):\n raise SkipTest(\"too slow for CircleCI!\")\n\n def doint(s): # Converting a string into an int.\n return 7 + sum([ord(c) * i for i, c in enumerate(s)])\n\n if doint(name) % 5 > 0:\n raise SkipTest(\"too many tests for CircleCI!\")\n if (\n sum([ord(c) for c in name]) % 4 > 0\n and name\n not in [\n \"DE\",\n \"CMA\",\n \"OnePlusOne\",\n \"Cobyla\",\n \"DiscreteLenglerOnePlusOne\",\n \"PSO\",\n ]\n or \"Tiny\" in name\n or \"Micro\" in name\n ) and os.environ.get(\"CIRCLECI\", False):\n raise SkipTest(\"Too expensive: we randomly skip 3/4 of these tests.\")\n if name in [\"CMAbounded\", \"NEWUOA\"]: # Not a general purpose optimization method.\n return\n if \"BO\" in name: # Bayesian Optimization is rarely good, let us save up time.\n return\n optimizer_cls = registry[name]\n if isinstance(optimizer_cls, base.ConfiguredOptimizer):\n assert any(\n hasattr(mod, name) for mod in (optlib, xpvariants)\n ) # make sure registration matches name in optlib/xpvariants\n assert (\n optimizer_cls.__class__(**optimizer_cls._config) == optimizer_cls\n ), \"Similar configuration are not equal\"\n # some classes of optimizer are eigher slow or not good with small budgets:\n nameparts = [\"Many\", \"Chain\", \"BO\", \"Discrete\", \"NLOPT\"] + [\"chain\"] # TODO remove chain when possible\n is_ngopt = inspect.isclass(optimizer_cls) and issubclass(optimizer_cls, NGOptBase) # type: ignore\n verify = (\n not optimizer_cls.one_shot\n and name not in SLOW\n and not any(x in name for x in nameparts)\n and not is_ngopt\n )\n budget = 300 if \"BO\" not in name and not is_ngopt else 4\n # the following context manager speeds up BO tests\n patched = partial(acq_max, n_warmup=10000, n_iter=2)\n with patch(\"bayes_opt.bayesian_optimization.acq_max\", patched):\n check_optimizer(optimizer_cls, budget=budget, verify_value=verify)", "def configure_optimizers(self):\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n if self.optimizer == \"adamw\":\n optimizer = AdamW(optimizer_grouped_parameters,\n betas=(0.9, 0.98), # according to RoBERTa paper\n lr=self.args.lr,\n eps=self.args.adam_epsilon,)\n elif self.optimizer == \"torch.adam\":\n optimizer = torch.optim.AdamW(optimizer_grouped_parameters,\n lr=self.args.lr,\n eps=self.args.adam_epsilon,\n weight_decay=self.args.weight_decay)\n else:\n raise ValueError(\"Optimizer type does not exist.\")\n num_gpus = len([x for x in str(self.args.gpus).split(\",\") if x.strip()])\n t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs\n warmup_steps = int(self.args.warmup_proportion * t_total)\n if self.args.lr_scheduler == \"onecycle\":\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total),\n final_div_factor=self.args.final_div_factor,\n total_steps=t_total, anneal_strategy='linear')\n elif self.args.lr_scheduler == \"linear\":\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)\n elif self.args.lr_scheulder == \"polydecay\":\n if self.args.lr_mini == -1:\n lr_mini = self.args.lr / self.args.polydecay_ratio\n else:\n lr_mini = self.args.lr_mini\n scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini)\n else:\n raise ValueError\n return [optimizer], [{\"scheduler\": scheduler, \"interval\": \"step\"}]", "def compile_optimizer(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.cfg.learning_rate)\n\n return optimizer", "def propose_optimize():\n pass", "def get_optimizers(args):\r\n\t# Create a generator which can map a latent vector size 8 to 72\r\n\tG = Generator(\r\n\t\tinput_size=args.g_input_size,\r\n\t\thidden_size=args.g_hidden_size,\r\n\t\toutput_size=args.g_output_size,\r\n\t\tp=args.p\r\n\t)\r\n\t# Create a discriminator which can turn 72-dimensional particle to Binary\r\n\t# prediction\r\n\tD = Discriminator(\r\n\t\tinput_size=args.d_input_size,\r\n\t\thidden_size=args.d_hidden_size,\r\n\t\toutput_size=args.d_output_size,\r\n\t\tp=args.p,\r\n\t\tdropout=args.dropout\r\n\t)\r\n\r\n\t# Choose an optimizer\r\n\tif args.optim == 'Adam':\r\n\t\td_optimizer = optim.Adam(D.parameters(), lr=args.d_learning_rate)\r\n\t\tg_optimizer = optim.Adam(G.parameters(), lr=args.g_learning_rate)\r\n\telse:\r\n\t\td_optimizer = optim.SGD(D.parameters(), lr=args.d_learning_rate)\r\n\t\tg_optimizer = optim.SGD(G.parameters(), lr=args.g_learning_rate, momentum=args.sgd_momentum)\r\n\treturn G, D, d_optimizer, g_optimizer", "def make_learnable_transformation(self, optimize_flags, chain_of_transforms=None):\n # reset transformation parameters\n if chain_of_transforms is None:\n chain_of_transforms = self.chain_of_transforms\n for flag, transform in zip(optimize_flags, chain_of_transforms):\n if flag:\n transform.train()", "def _make_optimizer(use_adam, learning_rate):\n if use_adam:\n ret = tf.train.AdamOptimizer(learning_rate=learning_rate)\n else:\n ret = tf.train.MomentumOptimizer(learning_rate=learning_rate,\n momentum=0.95,\n use_nesterov=True)\n return ret", "def __init__(self, optimizer):\n super(ShardedOptimizer, self).__init__(optimizer, name=\"ShardedOptimizer\")", "def optimizer_setup(model, params):\n if params.optimizer == 'adam':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_adam(model, params)\n else:\n optimizer = optimizer_handler.plain_adam(model, params)\n elif params.optimizer == 'sgd':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_sgd(model, params)\n else:\n optimizer = optimizer_handler.plain_sgd(model, params)\n\n if params.zero_bn_bias_decay:\n optimizer = zero_wdcay_bn_bias(optimizer)\n\n return optimizer", "def compile(self, gen_optimizer, disc_optimizer):\n self.gen_optimizer = gen_optimizer\n self.disc_optimizer = disc_optimizer", "def __configure_optimizer(self, learning_rate):\n if self.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=self.adadelta_rho,\n epsilon=self.opt_epsilon)\n elif self.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=self.adagrad_initial_accumulator_value)\n elif self.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=self.adam_beta1,\n beta2=self.adam_beta2,\n epsilon=self.opt_epsilon)\n elif self.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=self.ftrl_learning_rate_power,\n initial_accumulator_value=self.ftrl_initial_accumulator_value,\n l1_regularization_strength=self.ftrl_l1,\n l2_regularization_strength=self.ftrl_l2)\n elif self.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=self.momentum,\n name='Momentum')\n elif self.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=self.rmsprop_decay,\n momentum=self.rmsprop_momentum,\n epsilon=self.opt_epsilon)\n elif self.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', self.optimizer)\n return optimizer", "def optimizer(self):\n return 'sgd'", "def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n return optimizer", "def _set_optimizer(self):\n\n if self.optimizer_name == 'Adam':\n self.optimizer = optim.Adam(self.net.parameters(),\n lr=self.learning_rate,\n betas=self.betas,\n eps=1e-8,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'SGD':\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'SGD_Nesterov':\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay,\n nesterov=True)\n elif self.optimizer_name == 'RMSprop':\n self.optimizer = optim.Adagrad(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'Adagrad':\n self.optimizer = optim.Adagrad(self.net.parameters(),\n lr=self.learning_rate,\n weight_decay=self.weight_decay)\n else:\n print(\"Optimizer '\" + self.optimizer_name + \"' not implemented.\")", "def configure_optimizers(self):\n optimizer = torch.optim.Adam(\n self.parameters(), lr=self.hparams[\"learning_rate\"]\n )\n return optimizer", "def _build_optimizers(self):\r\n self._optimize_ops = []\r\n all_trainable_variables = tf.trainable_variables()\r\n all_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n all_reg_losses = tf.losses.get_regularization_losses()\r\n for spec in self._learning_schedule:\r\n optimize_ops = []\r\n update_ops = []\r\n loss_terms = spec['loss_terms_to_optimize']\r\n reg_losses = []\r\n assert isinstance(loss_terms, dict)\r\n for loss_term_key, prefixes in loss_terms.items():\r\n assert loss_term_key in self.loss_terms['train'].keys()\r\n variables_to_train = []\r\n for prefix in prefixes:\r\n variables_to_train += [\r\n v for v in all_trainable_variables\r\n if v.name.startswith(prefix)\r\n ]\r\n update_ops += [\r\n o for o in all_update_ops\r\n if o.name.startswith(prefix)\r\n ]\r\n reg_losses += [\r\n l for l in all_reg_losses\r\n if l.name.startswith(prefix)\r\n ]\r\n\r\n optimizer_class = tf.train.AdamOptimizer\r\n optimizer = optimizer_class(\r\n learning_rate=self.learning_rate_multiplier * spec['learning_rate'],\r\n # beta1=0.9,\r\n # beta2=0.999,\r\n )\r\n final_loss = self.loss_terms['train'][loss_term_key]\r\n if len(reg_losses) > 0:\r\n final_loss += tf.reduce_sum(reg_losses)\r\n with tf.control_dependencies(update_ops):\r\n gradients, variables = zip(*optimizer.compute_gradients(\r\n loss=final_loss,\r\n var_list=variables_to_train,\r\n aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N,\r\n ))\r\n # gradients, _ = tf.clip_by_global_norm(gradients, 5.0) # TODO: generalize\r\n optimize_op = optimizer.apply_gradients(zip(gradients, variables))\r\n optimize_ops.append(optimize_op)\r\n self._optimize_ops.append(optimize_ops)\r\n logger.info('Built optimizer for: %s' % ', '.join(loss_terms.keys()))" ]
[ "0.6734002", "0.6700126", "0.6692267", "0.6672637", "0.66392416", "0.6633101", "0.6613115", "0.6404118", "0.63944757", "0.6390522", "0.6347207", "0.6307785", "0.6301192", "0.6257054", "0.6233686", "0.6220153", "0.62087554", "0.6177149", "0.6150207", "0.6144948", "0.6144496", "0.61326593", "0.6130383", "0.61294335", "0.61051977", "0.60972255", "0.60898256", "0.60856545", "0.6060226", "0.60540414" ]
0.7149734
0
Gets the active backend for this casepro instance
def get_backend(): global _ACTIVE_BACKEND if not _ACTIVE_BACKEND: _ACTIVE_BACKEND = locate(settings.SITE_BACKEND)() return _ACTIVE_BACKEND
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_profile_backend(self, profile):\n return self._get_attribute(profile, 'backend')", "def get_backend(self):\n return self.analyze_db_task(constants.TRAIN_DB).backend", "def get_backend():\n return __SETTINGS__._BACKEND", "def get_backend():\n return _BACKEND", "def backend(self):\n # This never changes (so no read locking needed).\n return self._backend", "def backend(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"backend\")", "def getBackend(self):\n return self.header['BACKEND']", "def backend(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"backend\")", "def _backend(self) -> Backend:\n return self.__backend", "def backend(self) -> str:\n return self.__class__.BACKEND_NAME", "def backend(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"backend\")", "def get_backend():\n return Connection()", "def get_backend(name):\n return _DEFAULT_PROVIDER.get_backend(name)", "def backend_name(self) -> str:\n return self._db_data.backend", "def get_backend():\n\n return sys.modules[__name__]", "def get_backend():\n\n return sys.modules[__name__]", "def get_storage_backend(self):\n return self.client.info()['Driver']", "def get_backend():\n return sys.modules[__name__]", "def get_backend():\n return sys.modules[__name__]", "def find_backend(cls) -> IBackend:\n cls.Lock.acquire()\n try:\n return cls._load_backend()\n finally:\n cls.Lock.release()", "def _get_backend(args):\n if args.backend == 'gatttool':\n backend = GatttoolBackend\n elif args.backend == 'bluepy':\n backend = BluepyBackend\n elif args.backend == 'pygatt':\n backend = PygattBackend\n else:\n raise Exception('unknown backend: {}'.format(args.backend))\n return backend", "def get_tgis_backend():\n global tgis_backend\n return tgis_backend", "def get_default_backend():\n return __default_backend", "def get_backend(\n self,\n backend_id: str,\n ) -> Optional[Type[BaseCertificateStorageBackend]]:\n return self.get('backend_id', backend_id)", "def backend_object(self, id):\n return self.model.Suite.everything.get(id=id)", "def get_backend():\n backend_path = settings.CALENDAR_BACKEND\n\n try:\n backend_modulename, backend_classname = backend_path.rsplit('.', 1)\n except ValueError:\n raise ImproperlyConfigured('{0} isn\\'t a backend module'.format(backend_path))\n\n # See if the module has already been imported.\n try:\n backend_module = sys.modules[backend_modulename]\n except KeyError:\n # ok, then import it.\n try:\n backend_module = import_module(backend_modulename)\n except ImportError as e:\n raise ImproperlyConfigured('Error importing backend {0}: \"{1}\"'.format(backend_modulename, e))\n\n try:\n backend_class = getattr(backend_module, backend_classname)\n except AttributeError:\n raise ImproperlyConfigured(\n 'Backend module \"{0}\" does not define a \"{1}\" class'.format(backend_modulename, backend_classname)\n )\n\n backend_instance = backend_class()\n\n if not isinstance(backend_instance, BaseBackend):\n raise ImproperlyConfigured(\n 'Backend class \"{0}\" is not a subclass of \"django_calendar.backends.BaseBackend\"'.format(backend_classname)\n )\n\n return backend_instance", "def backend_protocol(self) -> Optional[pulumi.Input[Union[str, 'BackendProtocol']]]:\n return pulumi.get(self, \"backend_protocol\")", "def get_active_backend(\n prefer=default_parallel_config[\"prefer\"],\n require=default_parallel_config[\"require\"],\n verbose=default_parallel_config[\"verbose\"],\n):\n backend, config = _get_active_backend(prefer, require, verbose)\n n_jobs = _get_config_param(\n default_parallel_config['n_jobs'], config, \"n_jobs\"\n )\n return backend, n_jobs", "def backend_info_get(context, host):\n result = _backend_info_query(context, host)\n return result", "def get_backends(self) -> dict:\n return Config.get_backends()" ]
[ "0.75494736", "0.7513481", "0.7458784", "0.7404637", "0.73987234", "0.7319639", "0.7303815", "0.71876395", "0.7083139", "0.708233", "0.70689523", "0.7009284", "0.6922739", "0.69098556", "0.69017106", "0.69017106", "0.68886673", "0.6833785", "0.6833785", "0.6747233", "0.6669912", "0.6605696", "0.65344614", "0.6493684", "0.6418164", "0.64079666", "0.6369102", "0.6364748", "0.633597", "0.62663865" ]
0.7887652
0
Pulls contacts modified in the given time window
def pull_contacts(self, org, modified_after, modified_before, progress_callback=None): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_recent_contacts(user, limit=5, timespan_days=14) -> typing.List[Contact]:\n timespan_recent = datetime.now().astimezone() - timedelta(days=timespan_days)\n contacts_recent = (\n Contact.objects.filter(interactions__was_at__gt=timespan_recent)\n .filter(user=user)\n .annotate(count=Count(\"interactions\"))\n .order_by(\"-count\")[:limit]\n )\n return list(contacts_recent)", "def pull_messages(self, org, modified_after, modified_before, as_handled=False, progress_callback=None):\n pass", "def fetch_contact_messages(self, org, contact, created_after, created_before):\n pass", "def parse_feed(feed, last_update, entry, get_updated = lambda e: e.updated_parsed[:6]):\n\n entries = []\n for e in feed.entries:\n if datetime(*get_updated(e)) > last_update:\n new = entry(e)\n if new != None:\n entries.append(new)\n return entries", "async def recentchanges(self, ctx, limit=50):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Wiki.recentchanges: ' + str(limit), extra={'invoker': ctx.message.author.name})\r\n twenties, limit = divmod(limit, 20)\r\n async with ctx.channel.typing():\r\n result = ['']\r\n changes = []\r\n start = 'now'\r\n for i in [20 for j in range(twenties)] + [limit]:\r\n resp = await self.req({\r\n 'action': 'query',\r\n 'list': 'recentchanges',\r\n 'rcprop': 'user|timestamp|comment|title|sizes|flags',\r\n 'rctype': 'edit|new',\r\n 'rclimit': i,\r\n 'rcstart': start\r\n })\r\n changes.extend(resp['query']['recentchanges'])\r\n start = resp['query']['recentchanges'][-1]['timestamp']\r\n i = 0\r\n for ch in changes:\r\n change = '\\n'\r\n change += ch['timestamp']\r\n change += ': '\r\n change += ch['title']\r\n change += '; '\r\n sizechange = ch['newlen'] - ch['oldlen']\r\n if sizechange <= -500 or sizechange >= 500:\r\n change += '**'\r\n change += '('\r\n if sizechange <= 0:\r\n change += str(sizechange)\r\n if sizechange > 0:\r\n change += '+' + str(sizechange)\r\n change += ')'\r\n if sizechange <= -500 or sizechange >= 500:\r\n change += '**'\r\n change += ' . . '\r\n change += ch['user']\r\n change += ' _('\r\n change += ch['comment'].replace('*', '\\\\*').replace('_', '\\\\_').replace('`', '\\\\`')\r\n change += ')_'\r\n result[i] += change\r\n if len(result[i]) > 2000:\r\n result.append('')\r\n result[i], result[i+1] = result[i].rsplit('\\n', 1)\r\n i += 1\r\n for r in result:\r\n await ctx.send(r)", "def test_forums_filter_updated(self):\n post_updated_ds = datetime(2010, 5, 3, 12, 00)\n\n thread1 = ThreadFactory(title=u't1 audio')\n PostFactory(thread=thread1, created=post_updated_ds)\n\n thread2 = ThreadFactory(title=u't2 audio')\n PostFactory(thread=thread2, created=(post_updated_ds + timedelta(days=2)))\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json',\n 'sortby': 1, 'updated_date': '05/04/2010'}\n\n qs['updated'] = constants.INTERVAL_BEFORE\n response = self.client.get(reverse('search.advanced'), qs)\n results = json.loads(response.content)['results']\n eq_([thread1.get_absolute_url()], [r['url'] for r in results])\n\n qs['updated'] = constants.INTERVAL_AFTER\n response = self.client.get(reverse('search.advanced'), qs)\n results = json.loads(response.content)['results']\n eq_([thread2.get_absolute_url()], [r['url'] for r in results])", "def recently_modified(request):\n pages = models.Page.all().order('modified').fetch(10)\n return utility.respond(request, 'admin/recently_modified', {'pages': pages})", "def _update_modified_since(self, timestamp):\n pass", "def get_list(update, context):\n chat = update.message.chat\n user_id = update.message.from_user.id\n if chat.id != user_id:\n msg = 'Управление Вашими напоминаниями доступно в личном диалоге'\n update.message.reply_text(msg)\n return\n\n try:\n handler = db_connector.DataBaseConnector()\n rems = handler.get_user_reminders(user_id)\n rems.sort(key=lambda x: x['datetime'])\n except (ValueError, ConnectionError, KeyError):\n update.message.reply_text(_ERR_MSG)\n _LOGGER.exception('Unable to fetch reminders')\n return\n\n if not rems:\n reps_text = 'У вас отсутствуют предстоящие напоминания!'\n update.message.bot.send_message(chat_id=chat.id, text=reps_text)\n return\n\n for rem in rems:\n try:\n resp_text, markup = _compile_rem(rem, show_dt=True)\n context.message = context.bot.send_message(\n chat_id=rem['user_id'], text=resp_text,\n reply_markup=markup, parse_mode=ParseMode.HTML)\n except (ValueError, ConnectionError, KeyError) as err:\n _LOGGER.exception('Unable to process reminder')", "def __time_update(user):\n\n feeds = Feed.objects.filter(user=user)\n\n for feed in feeds:\n # Last time updated more than 5 minutes ago\n if (datetime.now() - feed.time) > timedelta(0, 300, 0):\n __update_feed(feed)", "def get_filtered(self, collection, xmlFormat):\n\t\tstart = \"2012-05-01T00:00:00Z\"\n\t\tend = \"2012-05-20T00:00:00Z\"\n\t\tquery = '/text//itemRecord/metaMetadata/dateInfo/@lastModified:[%s TO %s]' % (start, end)\n\t\t\n\t\treturn {\n\t\t\t'q' : query,\n\t\t\t\"verb\": \"Search\",\n\t\t\t\"xmlFormat\": xmlFormat,\n\t\t\t\"ky\": collection,\n\t\t\t'sortDescending' : '/text//itemRecord/metaMetadata/dateInfo/@lastModified'\n\t\t\t}", "def get_recent_feed_elements(self, sq):\n return sq.sort('-created').query(query.MatchAll())", "def get_all_active_members(debug, contactsUrl):\n\n valid_date = str(datetime.date.today() - datetime.timedelta(days=7)) # 7 days ago in yyyy-mm-dd format\n\n #params = {'$filter': 'member eq true AND Status eq Active',\n # '$async': 'false'}\n params = {'$filter': \"member eq true AND ( Status eq Active OR ( Status eq PendingRenewal AND 'Renewal due' ge \" + valid_date + \"))\",\n '$async': 'false'}\n request_url = contactsUrl + '?' + urllib.parse.urlencode(params)\n if debug: print('Making api call to get contacts')\n return api.execute_request(request_url).Contacts", "def get_clients_to_be_reactivated(file=\"db.json\") -> List[Client]:\n with TinyDB(file) as db:\n query = Query()\n result = db.search(query[\"rem date\"].test(contact_now))\n output = []\n for client in result:\n output.append(Client(client[\"first name\"], client[\"last name\"],\n client[\"last visit\"], client[\"rem date\"],\n client[\"email\"]\n ))\n return output", "def update_contacts(self):\n self.contacts = self.db.list_contacts()\n return self.list_contacts()", "def maintainWindow(self,data,ts):\n\tfor (tstamp,text) in data:\n\t\tif self.compareTimestamp(tstamp,ts) > 60:\n\t\t\tdelTs, delTweet = data.pop(0)\n\t\t\tfor txt in data:\n\t\t\t\tif all(x in txt[1] for x in delTweet):\n\t\t\t\t\t return\n\t\t\tself.updateDict(text)\n\t\telse:\n\t\t\tbreak", "def feed_entries(self):\n date_format = \"%Y-%m-%dT%H:%M:%SZ\"\n entries = self.mapper.list_entries(limit=10)\n if entries:\n updated = max([e.updated for e in entries]).strftime(date_format)\n else:\n updated = datetime.utcnow().strftime(date_format)\n return {\"entries\": entries, \"updated\": updated}", "def get_tweets(which, hours):\n objects = tweepy.Cursor(\n twitter.list_timeline,list_id=which,\n include_rts=False,count=100\n ).items()\n time_objects = []\n cutoff = (\n datetime.utcnow() - timedelta(hours=hours)\n ).strftime('%b %d %H:%M:%S')\n for tweet in objects:\n data = tweet._json # isolate metadata\n raw_time = datetime.strptime(\n data['created_at'],\n '%a %b %d %H:%M:%S +0000 %Y'\n )\n time = raw_time.strftime('%b %d %H:%M:%S') # reformat to match cutoff for boolean\n if time > cutoff:\n time_objects.append(tweet)\n return time_objects", "def latest(self, username='', friends=False):\n if friends:\n entries = Entry.all().filter('subscribers_usernames = ',\n username).order('-updated')\n else:\n if username:\n user = User.all().filter('username = ', username).get()\n if user is None: return []\n else:\n entries = Entry.all().filter('owner = ', user).\\\n order('-updated')\n else:\n entries = Entry.all().order('-updated')\n return entries", "def get_last_modified_rec(bibrank_method_lastupdate):\n query = \"\"\"SELECT id FROM bibrec\n WHERE modification_date >= '%s' \"\"\" % bibrank_method_lastupdate\n query += \"order by id ASC\"\n ilist = run_sql(query)\n return ilist", "def populate_twitter_acct_tweets_by_date():\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n for acct in twitter_accts:\n results = api.GetSearch(raw_query=\"l=&q=from%3AReutersUS%20since%3A2017-12-01%20until%3A2017-12-02&src=typd\")", "def get_contacts_list(self):\n contacts = self.driver.find_elements_by_class_name(\"_1wjpf\")\n s= [contact.text for contact in contacts] #extracts chats and last messsages\n print (\"get contacts: \"+str(s)) #print only chat names\n return s[::2] #returns only chat names", "def search_email_by_time(M):\n print \"search mail by time\\n\"\n date = (datetime.date.today() - datetime.timedelta(1)).strftime(\"%d-%b-%Y\")\n rv, data = M.uid('search', None, '(SENTSINCE {date})'.format(date=date))\n if check_response(rv):\n return data\n else:\n return None", "def GetChangesSample():\n client = CreateClient()\n changes = client.GetChanges()\n for change in changes.entry:\n print change.title.text, change.changestamp.value", "def updates_after_timestamp(cls, timestamp):\n\n return Score.query.filter(Score.updated_on >= timestamp).order_by(desc(Score.updated_on)).all()", "def _find_update_docs_since(since: str):\n delta = since_to_delta(since)\n earliest_dt = datetime.now(timezone.utc) - delta\n query = get_db().collection_group(\"updates\").where(\"date\", \">\", earliest_dt)\n return (doc.to_dict() for doc in query.stream())", "def get_step_changes_after(\n project: 'projects.Project',\n timestamp: float,\n write_running: bool = False\n) -> typing.List[dict]:\n return [\n _get_step_changes(project, step, write_running)\n for step in project.steps\n if step.report.last_update_time >= timestamp\n or (step.last_modified or 0) >= timestamp\n ]", "def cull(self):\n now = time.time()\n self.lines = [line for line in self.lines if line.timestamp + self.timeout > now]", "def test_get_filter_with_date_contacts_e(self):\n data = {\"type_contact\": 1, \"date_start\": '2018-08-20',\n \"date_end\": '2018-08-25'}\n response = self.client.get(reverse('contacts-filter'), data)\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 2)", "def getChanges():" ]
[ "0.6372907", "0.56030774", "0.53730536", "0.52406937", "0.51859474", "0.51588595", "0.51582783", "0.5062291", "0.49823055", "0.49673826", "0.49304265", "0.49119183", "0.4893238", "0.48634708", "0.48569855", "0.4847088", "0.4834561", "0.4832907", "0.4832073", "0.4830561", "0.4827244", "0.48224935", "0.47882828", "0.47407097", "0.4735655", "0.47255936", "0.4720486", "0.47193813", "0.47141215", "0.46994177" ]
0.6342501
1
Pulls all contact fields
def pull_fields(self, org): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n args = GET_PARSER.parse_args()\n print(f'args={args}')\n\n return Contacts().get_all(\n args[\"phonetypeOne\"],\n args[\"phonetypeTwo\"],\n args[\"phonetypeThree\"],\n args[\"firstName\"],\n args[\"lastName\"],)", "def get_fields(self):\n\t\tlogging.debug(\"Beginning\")\n\t\toptions=dict(api_key = self.apiKey, results = 0)\n\t\turl = '{ts}channels/{id}/feeds.json'.format(\n\t\t\tts=self.tsRUL,\n\t\t\tid=self.channel\n\t\t)\n\t\ttry:\n\t\t\tresults = requests.get(url, params=options)\n\t\t\tif results.ok != True:\n\t\t\t\tlogging.error(\"The URL didn't return a 200\")\n\t\t\t\treturn\n\t\texcept:\n\t\t\tlogging.error(\"Error calling the thingspeak URL\")\n\t\t\treturn\n\t\tresultsJson = results.json()\n\t\tchannelsJson = resultsJson['channel']\n\t\tfields = dict()\n\t\tfor i in range(1,8):\n\t\t\tif 'field'+str(i) in channelsJson:\n\t\t\t\tfields['field'+str(i)] = channelsJson['field'+str(i)]\n\t\treturn fields", "def get_all(self):\n total_contacts = []\n get_count = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'CONTACTNAME',\n 'COMPANYNAME',\n 'FIRSTNAME',\n 'LASTNAME',\n 'INITIAL',\n 'PRINTAS',\n 'TAXABLE',\n 'MAILADDRESS.ADDRESS1'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n contacts = self.format_and_send_request(data)['data']['CONTACT']\n total_contacts = total_contacts + contacts\n offset = offset + pagesize\n return total_contacts", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def all_fields(item):\n return scom.all_fields(item)", "def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def get_all_contacts(self):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} ORDER BY id;\".format(\", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query)\n\n return [Contact(*item) for item in data]", "def get_fields_for_cr(cr_id):\n # Construct request\n url = \"{}/reports/{}/patient_fields\"\n url = url.format(FABRIC_API_URL, cr_id)\n\n sys.stdout.flush()\n result = requests.get(url, auth=auth)\n return result.json()", "def _get_fields(self):\n return self._fields", "def get_contact_info(self, html_content: str) -> object:\n if not html_content:\n raise Exception(\"HTML content not found\")\n\n soup = BeautifulSoup(html_content, 'html.parser')\n\n self.contact = {}\n cards = soup.select(self.tags.get(\"contact.panels\"))\n\n # read cards panels for cotnact info\n for card in cards:\n form = card.parent.select_one(\"form\")\n\n # if is form of user information\n if form:\n rows = form.select(self.tags.get(\"contact.form.row\"))\n for row in rows:\n label = row.select_one(self.tags.get(\"contact.form.row.label\")).get_text(strip=True)\n value = row.select_one(self.tags.get(\"contact.form.row.value\")).get_text(strip=True)\n\n if label == \"User ID\":\n self.contact[\"account\"] = value\n\n elif label == \"Name\":\n self.contact[\"full_name\"] = value\n\n elif label == \"Email\":\n self.contact[\"email\"] = value\n\n else:\n lis = card.parent.select(\"li\")\n for li in lis:\n label = li.select_one(\"label\").get_text(strip=True)\n if label == \"Address\":\n street1 = get_value(li.select_one(self.tags.get(\"contact.address.street1\"))).strip()\n street2 = get_value(li.select_one(self.tags.get(\"contact.address.street2\"))).strip()\n state = get_value(li.select_one(self.tags.get(\"contact.address.state\"))).strip()\n postalcode = get_value(li.select_one(self.tags.get(\"contact.address.zip\"))).strip()\n\n self.contact[\"address_line1\"] = street1\n self.contact[\"address_line2\"] = street2\n self.contact[\"address_state\"] = letters_only(state.strip())\n self.contact[\"address_postal_code\"] = postalcode\n\n elif label in [\"Phone\", \"Time Zone\"]:\n\n key = \"phone_number\" if label == \"Phone\" else \"timezone\"\n self.contact[key] = li.select_one(self.tags.get(\"contact.phone\")).get_text(strip=True).strip()\n\n return self.contact", "def contact_info(self, sensitive=True):\n account_id = self.account_id()\n retry_count = 5\n\n req_url = self.get(\"/accounts/{}/contacts\".format(account_id))['ResultUrl']\n resp = self.get(req_url)\n tries = 0\n while 'Contacts' not in resp and tries < retry_count:\n resp = self.get(req_url)\n tries += 1\n time.sleep(1)\n contacts = resp['Contacts']\n\n contact_data = list()\n for contact in contacts:\n row_data = {\n 'ContactId': contact['Id'],\n 'Email': \"*****@****.***\" if sensitive else contact['Email'],\n 'FirstName': \"*****\" if sensitive else contact['FirstName'],\n 'LastName': \"*****\" if sensitive else contact['LastName'],\n 'Status': contact.get('Status'),\n 'MembeshipEnabled': contact.get('MembershipEnabled'),\n 'TermsOfUseAccepted': contact['TermsOfUseAccepted'],\n }\n\n if 'MembershipLevel' in contact:\n row_data['MembershipLevel'] = contact['MembershipLevel']['Name']\n\n # Map all field values into a dict for convenience\n field_values = {val['FieldName']: val['Value']\n for val in contact['FieldValues']}\n\n # Get list of authorizations\n if 'Managed Authorizations' in field_values:\n authorizations = [i['Label']\n for i in field_values['Managed Authorizations']]\n row_data['Authorizations'] = authorizations\n\n contact_data.append(row_data)\n self.__contact_df = pd.DataFrame(contact_data).set_index('ContactId')\n return self.__contact_df", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def _parse_contact_information(self):\n left_column = self.content.find(\"div\", class_=\"linkeSpalte40\")\n graubox = left_column.find(\n lambda tag: tag.name == \"div\" and tag[\"class\"] == [\"grauBox\"]\n )\n\n emails_raw = graubox.find_all(\"a\", class_=\"mail\")\n websites_raw = graubox.find_all(\"a\", class_=\"noDecoration\")\n telephone_raw = graubox.find_all(\"span\", class_=\"telefonnummer\")\n address_raw = [\n e.nextSibling for e in graubox.find_all(\"em\") if e.text == \"Anschrift:\"\n ]\n\n address = address_raw[0].li.get_text(\"\\n\") if address_raw else None\n emails = [re.sub(r\"^mailto:\", \"\", e.attrs[\"href\"]) for e in emails_raw]\n phone_numbers = [t.text for t in telephone_raw]\n websites = [w.attrs[\"href\"] for w in websites_raw]\n\n return {\n \"address\": address,\n \"emails\": emails,\n \"phone_numbers\": phone_numbers,\n \"websites\": websites,\n }", "def list_contacts(self):\n return self.contacts", "def get_fields(self):\r\n return self.fields", "def contact_info(self):\n return [\n {\n 'contact_info': c.get('contactInfo'),\n 'type': c.get('type'),\n 'primary': c.get('primary'),\n 'verified': c.get('verified'),\n }\n for c in self.entity_payload.get('contactInfo')]", "def get_fields():\n if not request.is_xhr:\n abort(403)\n fields = Field.query.all()\n result = {field.id:field.name for field in fields}\n return jsonify(result)", "def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result", "def get_all_fields(self):\n fields = []\n for f in self._meta.fields:\n\n fname = f.name \n # resolve picklists/choices, with get_xyz_display() function\n get_choice = 'get_'+fname+'_display'\n if hasattr( self, get_choice):\n value = getattr( self, get_choice)()\n else:\n try :\n value = getattr(self, fname)\n except User.DoesNotExist:\n value = None\n\n # only display fields with values and skip some fields entirely\n if f.editable and value and f.name not in ('id', 'status', 'workshop', 'user', 'complete') :\n\n fields.append(\n {\n 'label':f.verbose_name, \n 'name':f.name, \n 'value':value,\n }\n )\n return fields", "def get_contacts():\n return jsonify(g.driver.get_contacts())", "def listFields(self):\n return self.get_json('/field')", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def _get_all_fields():\n participant_form = custom_form_factory(ParticipantEditForm)\n return list(participant_form().exclude([\n CustomField.CHECKBOX,\n CustomField.IMAGE,\n CustomField.EVENT\n ]))", "def contacts(self):\n return ContactCollection(self.request)", "def fetch_contacts(owner_account_id):\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/Contacts?$select=id,identifier\")\n contacts = resp.json()\n return contacts", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields" ]
[ "0.6611348", "0.6416279", "0.63586295", "0.62937874", "0.6180672", "0.61634904", "0.61201644", "0.61004156", "0.60794693", "0.60475147", "0.6044251", "0.60372907", "0.6014031", "0.5993656", "0.59730715", "0.5948702", "0.59382004", "0.59350777", "0.5933707", "0.593277", "0.5917711", "0.59148526", "0.5914682", "0.5891893", "0.5852614", "0.5848786", "0.5848786", "0.5848786", "0.5839117", "0.5839117" ]
0.6581018
1
Pulls messages modified in the given time window
def pull_messages(self, org, modified_after, modified_before, as_handled=False, progress_callback=None): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_messages(self, since_timestamp=0):\n return filter(lambda x: x.timestamp > since_timestamp,\n self.chat_messages)", "def _get_messages(self):\n try:\n messages = self.channel.get_messages(int(self.get_argument('since_timestamp', 0)))\n\n except ValueError as e:\n messages = self.channel.get_messages()\n\n return messages", "async def recentchanges(self, ctx, limit=50):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Wiki.recentchanges: ' + str(limit), extra={'invoker': ctx.message.author.name})\r\n twenties, limit = divmod(limit, 20)\r\n async with ctx.channel.typing():\r\n result = ['']\r\n changes = []\r\n start = 'now'\r\n for i in [20 for j in range(twenties)] + [limit]:\r\n resp = await self.req({\r\n 'action': 'query',\r\n 'list': 'recentchanges',\r\n 'rcprop': 'user|timestamp|comment|title|sizes|flags',\r\n 'rctype': 'edit|new',\r\n 'rclimit': i,\r\n 'rcstart': start\r\n })\r\n changes.extend(resp['query']['recentchanges'])\r\n start = resp['query']['recentchanges'][-1]['timestamp']\r\n i = 0\r\n for ch in changes:\r\n change = '\\n'\r\n change += ch['timestamp']\r\n change += ': '\r\n change += ch['title']\r\n change += '; '\r\n sizechange = ch['newlen'] - ch['oldlen']\r\n if sizechange <= -500 or sizechange >= 500:\r\n change += '**'\r\n change += '('\r\n if sizechange <= 0:\r\n change += str(sizechange)\r\n if sizechange > 0:\r\n change += '+' + str(sizechange)\r\n change += ')'\r\n if sizechange <= -500 or sizechange >= 500:\r\n change += '**'\r\n change += ' . . '\r\n change += ch['user']\r\n change += ' _('\r\n change += ch['comment'].replace('*', '\\\\*').replace('_', '\\\\_').replace('`', '\\\\`')\r\n change += ')_'\r\n result[i] += change\r\n if len(result[i]) > 2000:\r\n result.append('')\r\n result[i], result[i+1] = result[i].rsplit('\\n', 1)\r\n i += 1\r\n for r in result:\r\n await ctx.send(r)", "def parse_feed(feed, last_update, entry, get_updated = lambda e: e.updated_parsed[:6]):\n\n entries = []\n for e in feed.entries:\n if datetime(*get_updated(e)) > last_update:\n new = entry(e)\n if new != None:\n entries.append(new)\n return entries", "def get_messages(character):\n mail = character.db.mail\n try:\n messages = [item for item in mail if item[TIMESTAMP] <= item[MESSAGE].date_sent]\n # Let's clean up mail storage for this user while we're at it.\n character.db.mail = messages\n except TypeError:\n messages = []\n return messages", "async def get_project_recent_messages(self, workspace):\n await self.client.login(os.environ['DISCORD_BOT_TOKEN'], bot=self.is_bot)\n messages = []\n try:\n channel = await self.get_channel(workspace.project_channel_id)\n async for message in channel.history(limit=5):\n if message.author.bot:\n continue\n messages.append(message.content)\n except HTTPException as error:\n self.logger.critical(\n f\"discord {self.get_project_recent_messages.__name__} request failed for workspace {workspace.id} and raised error: {error.text} (code {error.code})\")\n\n await self.client.logout()\n return messages", "def get_list(update, context):\n chat = update.message.chat\n user_id = update.message.from_user.id\n if chat.id != user_id:\n msg = 'Управление Вашими напоминаниями доступно в личном диалоге'\n update.message.reply_text(msg)\n return\n\n try:\n handler = db_connector.DataBaseConnector()\n rems = handler.get_user_reminders(user_id)\n rems.sort(key=lambda x: x['datetime'])\n except (ValueError, ConnectionError, KeyError):\n update.message.reply_text(_ERR_MSG)\n _LOGGER.exception('Unable to fetch reminders')\n return\n\n if not rems:\n reps_text = 'У вас отсутствуют предстоящие напоминания!'\n update.message.bot.send_message(chat_id=chat.id, text=reps_text)\n return\n\n for rem in rems:\n try:\n resp_text, markup = _compile_rem(rem, show_dt=True)\n context.message = context.bot.send_message(\n chat_id=rem['user_id'], text=resp_text,\n reply_markup=markup, parse_mode=ParseMode.HTML)\n except (ValueError, ConnectionError, KeyError) as err:\n _LOGGER.exception('Unable to process reminder')", "def get_recent_feed_elements(self, sq):\n return sq.sort('-created').query(query.MatchAll())", "def get_recent_chat_messages(limit, round_id=None):\n query = _query_messages(round_id)\n messages = query[:limit]\n return [_make_message(msg, user_id, user_name)\n for msg, user_id, user_name in messages]", "def maintainWindow(self,data,ts):\n\tfor (tstamp,text) in data:\n\t\tif self.compareTimestamp(tstamp,ts) > 60:\n\t\t\tdelTs, delTweet = data.pop(0)\n\t\t\tfor txt in data:\n\t\t\t\tif all(x in txt[1] for x in delTweet):\n\t\t\t\t\t return\n\t\t\tself.updateDict(text)\n\t\telse:\n\t\t\tbreak", "def get_msgs_by_time_range(self, start: Optional[datetime], end: Optional[datetime]):\n data = self.database.search_by_range(self.tname, MsgWithTag.get_time_key(),\n start, end)\n return [(item[0], self.data_to_msg(item)) for item in data]", "def get_new_messages(self):\n inbox = list(self.reddit.inbox.unread(limit=10))\n inbox.reverse()\n return inbox", "def _find_update_docs_since(since: str):\n delta = since_to_delta(since)\n earliest_dt = datetime.now(timezone.utc) - delta\n query = get_db().collection_group(\"updates\").where(\"date\", \">\", earliest_dt)\n return (doc.to_dict() for doc in query.stream())", "def __time_update(user):\n\n feeds = Feed.objects.filter(user=user)\n\n for feed in feeds:\n # Last time updated more than 5 minutes ago\n if (datetime.now() - feed.time) > timedelta(0, 300, 0):\n __update_feed(feed)", "def fetch_group_messages(self):\n last_time = self.last_group_message\n for group_message in GroupMessage.query(GroupMessage.to_group.IN(self.get_all_groups()),\n GroupMessage.sent_time > self.last_group_message):\n Message.send(from_user=group_message.from_user,\n to_user=self.user,\n to_group=group_message.to_group,\n sent_time=group_message.sent_time,\n subject=group_message.subject,\n content=group_message.content)\n if group_message.sent_time > last_time:\n last_time = group_message.sent_time\n self.last_group_message = last_time\n self.put()", "def get_muti_weixin(pubtime, interval):\n return Weixin.select().where(\n (Weixin.pubtime < pubtime) & \n (Weixin.pubtime > pubtime - timedelta(days=7)))", "def get_messages(self, t0, t1, nmax=100, srcid=None):\n if not (nmax < 10000): nmax = 10000\n if srcid is None:\n self.read_curs.execute(\"SELECT time,readout_id,msg FROM textlog WHERE time >= ? AND time <= ? ORDER BY time DESC LIMIT ?\", (t0, t1, nmax))\n else:\n self.read_curs.execute(\"SELECT time,readout_id,msg FROM textlog WHERE time >= ? AND time <= ? AND readout_id=? ORDER BY time DESC LIMIT ?\", (t0, t1, srcid, nmax))\n return self.read_curs.fetchall()", "def request_messages(auth_token, room_id):\n\n params = {'auth_token': auth_token, 'max-results': 1000, 'start-index': 0, 'date': \"recent\",\n 'timezone': \"America/New_York\", 'reverse': True}\n\n results = []\n\n room_url = \"https://api.hipchat.com/v2/room/{}/history\".format(room_id)\n\n for i in range(0, 5):\n resp = requests.get(room_url, params=params)\n\n resp_json = resp.json()\n\n assert resp.status_code == 200, \"API request returned a non 200 status code\"\n\n for item in resp_json['items']:\n results.append(item)\n\n current_date = resp_json['items'][0]['date']\n params['date'] = current_date\n\n return results", "def test_forums_filter_updated(self):\n post_updated_ds = datetime(2010, 5, 3, 12, 00)\n\n thread1 = ThreadFactory(title=u't1 audio')\n PostFactory(thread=thread1, created=post_updated_ds)\n\n thread2 = ThreadFactory(title=u't2 audio')\n PostFactory(thread=thread2, created=(post_updated_ds + timedelta(days=2)))\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json',\n 'sortby': 1, 'updated_date': '05/04/2010'}\n\n qs['updated'] = constants.INTERVAL_BEFORE\n response = self.client.get(reverse('search.advanced'), qs)\n results = json.loads(response.content)['results']\n eq_([thread1.get_absolute_url()], [r['url'] for r in results])\n\n qs['updated'] = constants.INTERVAL_AFTER\n response = self.client.get(reverse('search.advanced'), qs)\n results = json.loads(response.content)['results']\n eq_([thread2.get_absolute_url()], [r['url'] for r in results])", "def getElemAfterTime(self, stamp):\n newer = [msg for (msg, time) in zip(self.cache_msgs, self.cache_times)\n if time >= stamp]\n if not newer:\n return None\n return newer[0]", "def get_updates():\n url = TELEGRAM_URL + TELEGRAM_TOKEN + '/getUpdates'\n response = requests.get(url).json()\n last_object = response['result'][-1] # -1 = last update\n\n chat_id = last_object['message']['chat']['id']\n message_text = last_object['message']['text']\n message = {\n 'chat_id': chat_id,\n 'text': message_text\n }\n return message", "def longpoll(self, last_offset=0):\n\n params = {'timeout': 25}\n\n if last_offset != 0:\n params['offset'] = last_offset + 1\n\n return self.api_request('getUpdates', params=params, timeout=50)", "def since(self, ts):\n while True:\n items = super(TailingOplog, self).since(ts)\n for doc in items:\n yield doc\n ts = doc['ts']", "def get_tweets(which, hours):\n objects = tweepy.Cursor(\n twitter.list_timeline,list_id=which,\n include_rts=False,count=100\n ).items()\n time_objects = []\n cutoff = (\n datetime.utcnow() - timedelta(hours=hours)\n ).strftime('%b %d %H:%M:%S')\n for tweet in objects:\n data = tweet._json # isolate metadata\n raw_time = datetime.strptime(\n data['created_at'],\n '%a %b %d %H:%M:%S +0000 %Y'\n )\n time = raw_time.strftime('%b %d %H:%M:%S') # reformat to match cutoff for boolean\n if time > cutoff:\n time_objects.append(tweet)\n return time_objects", "def get_sum_of_removed_intervals(messages, time):\n removed_intervals = []\n remote_frame_and_response_indices = __get_remote_frame_and_response_indices(messages)\n\n # Getting the timestamp that specifies the cutoff.\n timestamp = messages[0].timestamp + time\n\n # Going through each remote frame and response with a timestamp below the specified limit.\n for index in remote_frame_and_response_indices:\n if messages[index].timestamp > timestamp:\n break\n else:\n removed_intervals.append(messages[index].timestamp - messages[index - 1].timestamp)\n\n # Return the sum\n return sum(removed_intervals)", "def get_muti_weibo(pubtime, interval):\n return Weibo.select().where(\n (Weibo.pubtime < pubtime) & \n (Weibo.pubtime > pubtime - timedelta(days=7)))", "def get_any_issues_need_reminder(self, search_timedelta: timedelta, records: List[EventRecord]) -> List[str]:\n fingerprints = [record.fingerprint for record in records]\n with self.session.begin() as session:\n fingerprints_to_remind = (\n session.query(\n sqlalchemy.sql.expression.func.max(EventRecord.sent_at).label(\"sent_at\"), EventRecord.fingerprint\n )\n .filter(EventRecord.fingerprint.in_(fingerprints) & EventRecord.sent_at.isnot(None))\n .group_by(EventRecord.fingerprint)\n .all()\n )\n result = []\n deltat = datetime.utcnow() - search_timedelta\n for f in fingerprints_to_remind:\n if f.sent_at <= deltat:\n result.append(f.fingerprint)\n\n return result", "def list_messages(self):", "def get_messages(self, limit=10):\n logging.info(f\"Retrieving Slack messages from {self.channel_name}...\")\n messages = self.get_messages_json(limit)\n return [msg[\"text\"] for msg in messages]", "def lastMessageReceived():" ]
[ "0.5940991", "0.5776993", "0.57537824", "0.57378644", "0.5713814", "0.56254584", "0.549043", "0.5456706", "0.5432054", "0.5430087", "0.5368939", "0.5348661", "0.533533", "0.53292567", "0.5229311", "0.52072114", "0.5195331", "0.5186423", "0.5154368", "0.5151444", "0.51347214", "0.51029325", "0.5093443", "0.5066005", "0.5057993", "0.502909", "0.5017869", "0.50141466", "0.50031435", "0.4996764" ]
0.6059442
0
Adds the given contact to a group
def add_to_group(self, org, contact, group): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def add_contact_to_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n q = \"\"\"replace into contact_group_contacts (contact_group_id, contact_id) values (%s, %s)\"\"\"\n q_args = (contact_group_id, contact_id)\n await dbcon.operation(q, q_args)", "def AddContact(self, contact):\n\t\tcontact.group_membership_info = [gdata.contacts.data.GroupMembershipInfo(href=self.GetFirstGroupId())]\n\t\ttry:\n\t\t\tself.client.CreateContact(contact)\n\t\texcept gdata.client.RequestError:\n\t\t\tpass", "def inviteIntoGroup(self, group, contacts=[]):\n contact_ids = [contact.id for contact in contacts]\n self._inviteIntoGroup(group.id, contact_ids)", "def add_contacts(self, contacts, group=None, group_uuid=None):\n payload = self._build_params(contacts=contacts, action='add', group=group, group_uuid=group_uuid)\n self._post('contact_actions', None, payload)", "def add_contact(self, request, **kwargs):\n if request.data is None:\n return Response({'message': 'Invalid contact details'}, status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('first_name') is None:\n return Response({'message': 'First name not provided'}, status=status.HTTP_400_BAD_REQUEST)\n\n contact_data = request.data.get('contact')\n for data in contact_data:\n print(data.get('phone'))\n try:\n parse_number = phonenumbers.parse(data.get('phone'), None)\n except Exception:\n return Response({'details': 'Invalid Phonenumber'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n if not phonenumbers.is_valid_number(parse_number):\n return Response({'details': 'Invalid Phonenumber entered'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n new_contact_data = ContactCreationAndUpdationMixin().create(request.data)\n group = self.get_object()\n group.contacts.add(new_contact_data)\n serializer_data = ContactSerializer(new_contact_data) \n return Response(serializer_data.data)", "def add_contact(self, contact):\n\t\tclient_log.debug(f'Создание контакта {contact}')\n\t\treq = {\n\t\t\tACTION: ADD_CONTACT,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username,\n\t\t\tACCOUNT_NAME: contact\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tself.process_server_ans(get_message(self.transport))", "def add_contact(self, contact):\n self.db.insert_contact(contact)\n return self.update_contacts()", "async def add_contact_group_to_active_monitor(dbcon: DBConnection, contact_group_id: int, monitor_id: int) -> None:\n if not await active_monitor_exists(dbcon, monitor_id):\n raise errors.InvalidArguments('monitor does not exist')\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact does not exist')\n q = \"\"\"replace into active_monitor_contact_groups (active_monitor_id, contact_group_id) values (%s, %s)\"\"\"\n q_args = (monitor_id, contact_group_id)\n await dbcon.operation(q, q_args)", "def update_contact_groups(sender, instance, created, **kwargs):\n if not hasattr(instance, Contact.SAVE_GROUPS_ATTR):\n return\n\n org = instance.org\n\n new_groups_by_uuid = {g[0]: g[1] for g in getattr(instance, Contact.SAVE_GROUPS_ATTR)}\n\n cur_groups_by_uuid = {} if created else {g.uuid: g for g in instance.groups.all()}\n\n # remove this contact from any groups not in the new set\n remove_from = [g for g in cur_groups_by_uuid.values() if g.uuid not in new_groups_by_uuid.keys()]\n if remove_from:\n instance.groups.remove(*remove_from)\n\n # add this contact to any groups not in the current set\n add_to_by_uuid = {uuid: name for uuid, name in new_groups_by_uuid.items() if uuid not in cur_groups_by_uuid.keys()}\n\n if add_to_by_uuid:\n org_groups = {g.uuid: g for g in org.groups.all()}\n\n # create any groups that don't exist\n add_to_groups = []\n for uuid, name in add_to_by_uuid.items():\n existing = org_groups.get(uuid)\n if not existing:\n # create stub\n existing = org.groups.create(uuid=uuid, name=name, is_active=False)\n\n add_to_groups.append(existing)\n\n instance.groups.add(*add_to_groups)\n\n delattr(instance, Contact.SAVE_GROUPS_ATTR)", "def createGroupWithContacts(self, name, contacts=[]):\n try:\n contact_ids = []\n for contact in contacts:\n contact_ids.append(contact.id)\n\n group = LineGroup(self, self._createGroup(name, contact_ids))\n self.groups.append(group)\n\n return group\n except Exception as e:\n self.raise_error(e)\n\n return None", "def add_contact(self, contact):\n assert self.contact_in_range(contact), 'Wrong KBucket.'\n try:\n self._contacts.remove(contact)\n except ValueError:\n pass\n\n if len(self._contacts) < constants.K:\n self._contacts.append(contact)\n else:\n raise FullBucketError('No space in bucket to insert contact')", "def add_contact(self, name, number, email, zipcode):\n \n new_contact = f\"{name}, {number}, {email}, {zipcode}\"\n contact_list = [name,number,email,zipcode]\n self.contacts.append(contact_list)\n self.save()\n print(f\"Thank you {new_contact} has been added to your contact book.\")", "def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)", "def add_contact(self):\n contact_mob_num = self._input_mob_num(\"-=\" * 30 + \"\\n\" + \"Please enter contact's mobile number to be added: \")\n if contact_mob_num == self._user.mob_num:\n print(\"You can't add yourself, IDIOT!!\")\n return self.homepage()\n \n found_contact = self.auth.get_users_by_MobNum(contact_mob_num)\n if found_contact != None:\n print('A user with Mobile number: \"{0}\", and User name: \"{1}\" is found'.format(found_contact.mob_num, found_contact.username))\n user_choice = self._int_input_in_range(\" (1) Add the found user. \\n (0) Back to Home page \\n Your choice: \" \n ,range_ = (0, 1))\n if user_choice:\n add_flag = self._user.add_contact(found_contact)\n if not add_flag:\n print('This user is already one of your contacts')\n return self.homepage()\n print(\"Contact added successfully\")\n else:\n self.homepage()\n else:\n print('This user mobile number has no matches')\n return self.homepage()", "def add_contact_to_google_account(self, i):\n\n self.add_contact_to_phone(i)", "def add_group():\n name = request.form['name']\n data, code, message = FIELD_SERVICE.add_group(name)\n return __result(data, code, message)", "def _add_group(self, group):\n\n if group.name not in self.groups:\n # it's brand new, add him!\n self.groups[group.name] = group\n if self.groups[group.name] != group:\n # different object, merge\n self._merge_groups(self.groups[group.name], group)", "def AddMemberToGroup(group_id,user_id):\r\n Group.AddMemberToGroup(group_id,user_id)", "def add_contact_to_db_by_one(name, email, module_db_id, contact_id):\n success = False\n if name is not None:\n try:\n done_email = email.lower().strip()\n validate_email(done_email)\n\n if contact_id:\n try:\n contact = Contact.objects.get(id=contact_id, list_owner_id=module_db_id)\n contact.name_and_last_name = name\n contact.email = email\n contact.status = 1\n contact.save()\n success = True\n except Contact.DoesNotExist:\n pass\n else:\n contact, created = Contact.objects.get_or_create(list_owner_id=module_db_id, email=email)\n if created and contact:\n contact.name_and_last_name = name\n contact.status = 1\n contact.save()\n success = True\n except Exception as e:\n print(e.args)\n\n return success, name, email", "def add_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() not in db:\n db[contact.get_hash_name()] = json.loads(contact.json())\n write_db(db)\n else:\n sys.exit(logger.fail('fatal: contact already exists'))", "def addcontact(name, address=None, phone=None, email=None):\n try:\n newid = str(r.incr(\"global:nextUserId\"))\n _setcontact(newid, name, address, phone, email)\n r.sadd(\"contacts\", newid)\n\n return _getcontact(newid)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def add_contact():\n return 'add contact'", "def test_add_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n dictionary = org.as_dict()\n assert dictionary['contacts']\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']", "def group_add_name(org_id, data):\n if data.has_key('groupname'):\n groupname = data['groupname']\n add_group(org_id, groupname, False)", "def add_to_group(self, group):\n\n if not self.in_group(group):\n self.secondary_groups.append(group)\n return self", "async def groupadd(bot: fido, channel: str, sender: str, args: List[str]):\n\n if len(args) == 0:\n return \"Usage: \" + IRC.commandPrefix + \"groupadd <groupname> <nickname> <phonenumber>\"\n\n lines = []\n print(f\"Args: {args}\")\n number = ''\n nickname = ''\n group = ''\n for arg in args:\n if arg == \"\":\n continue # Ignore blank args.\n print(f\"Arg: [{arg.strip()}]\")\n if arg.startswith('+'):\n number = arg\n elif arg in bot.users:\n nickname = arg\n else:\n group = arg\n if not group or not nickname or not number:\n await bot.message(channel, \"Incorrect command usage. Ensure user is in channel, and that number has +<country code>.\")\n return\n add_group(mygroup=group, nickname=nickname, number=number)\n await bot.message(channel, f\"Added {nickname} to SMS group {group} with number {number}\")", "def test_add_member_by_id_to_group(self):\n pass", "async def set_contact_group_contacts(dbcon: DBConnection,\n contact_group_id: int, contact_ids: Iterable[int]) -> None:\n\n async def _run(cur: Cursor) -> None:\n q = \"\"\"delete from contact_group_contacts where contact_group_id=%s\"\"\"\n await cur.execute(q, (contact_group_id,))\n for contact_id in contact_ids:\n q = \"\"\"insert into contact_group_contacts (contact_group_id, contact_id) values (%s, %s)\"\"\"\n q_args = (contact_group_id, contact_id)\n await cur.execute(q, q_args)\n\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n await dbcon.transact(_run)", "def remove_from_group(self, org, contact, group):\n pass", "def set_group(self, address, group):\n self.groups[address] = group" ]
[ "0.7749197", "0.77469045", "0.715298", "0.71111506", "0.69037414", "0.6896029", "0.68665344", "0.6827527", "0.6824475", "0.67674", "0.67276967", "0.64900553", "0.63950515", "0.63912946", "0.63846886", "0.6381403", "0.63315827", "0.62689406", "0.622115", "0.6212235", "0.61742145", "0.6171326", "0.61192375", "0.6098023", "0.60867846", "0.6064894", "0.6047536", "0.60392797", "0.6031538", "0.6031177" ]
0.8768964
0
Removes the given contact from a group
def remove_from_group(self, org, contact, group): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def delete_contact_from_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:\n q = \"\"\"delete from contact_group_contacts where contact_group_id=%s and contact_id=%s\"\"\"\n q_args = (contact_group_id, contact_id)\n await dbcon.operation(q, q_args)", "def RemoveContact(self, contact):\n\t\tself.client.Delete(contact)", "def remove_contact(self, contact):\n\t\tclient_log.debug(f'Удаление контакта {contact}')\n\t\treq = {\n\t\t\tACTION: REMOVE_CONTACT,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username,\n\t\t\tACCOUNT_NAME: contact\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tself.process_server_ans(get_message(self.transport))", "def remove_contact(self, contact):\n try:\n self._contacts.remove(contact)\n except ValueError:\n pass", "def remove_contacts(self, contacts, group=None, group_uuid=None):\n payload = self._build_params(contacts=contacts, action='remove', group=group, group_uuid=group_uuid)\n self._post('contact_actions', None, payload)", "def del_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() in db:\n db.pop(contact.get_hash_name())\n write_db(db)\n sys.exit(logger.ok('success: contact ' + '\"%s\"' % contact.get_name() + ' deleted'))\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))", "async def delete_contact_group(dbcon: DBConnection, contact_group_id: int) -> None:\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n q = \"\"\"delete from contact_groups where id=%s\"\"\"\n await dbcon.operation(q, (contact_group_id,))", "def delete_contact(self, contact):\n self._delete('contacts', self._build_params(uuid=contact))", "def remove_contact(self):\n contact_mob_num = input(\"-=\" * 30 + \"\\n\" + \"Please enter contact's mobile number to be removed: \")\n contact = self.auth.get_users_by_MobNum(contact_mob_num)\n if (not contact) or contact not in self._user.contacts:\n print('This user not in your contact list')\n return self.homepage()\n \n self._user.remove_contact(contact)\n print('Contact removed successfully')\n return self.homepage()", "async def delete_contact_group_from_active_monitor(dbcon: DBConnection, contact_group_id: int, monitor_id: int) -> None:\n q = \"\"\"delete from active_monitor_contact_groups where active_monitor_id=%s and contact_group_id=%s\"\"\"\n q_args = (monitor_id, contact_group_id)\n await dbcon.operation(q, q_args)", "def do_del_group(dbsync, group):\n pass", "def remove_group():\n _id = request.form['_id']\n data, code, message = FIELD_SERVICE.remove_group(_id)\n return __result(data, code, message)", "def delcontact(id):\n delid = str(id)\n\n try:\n r.srem(\"contacts\", delid, 1)\n\n r.delete(\"uid:\" + delid + \":name\")\n r.delete(\"uid:\" + delid + \":address\")\n r.delete(\"uid:\" + delid + \":phone\")\n r.delete(\"uid:\" + delid + \":email\")\n\n return {}\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def delete_contact(self):\n delete_first_name = input(\"Enter first name that you want to delete\\n\")\n for contact in self.contact_list:\n if contact.first_name == delete_first_name:\n #print(str(contact))\n self.contact_list.remove(contact)\n else:\n print(f\"No contact is present with first name {delete_first_name} \")", "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def remove_mailing_list_group(sender, instance, **kwargs):\n\tname = instance.name\n\treturn requests.delete(\"https://api.mailgun.net/v3/lists/{}@arenbergorkest.be\".format(name),auth=('api', settings.MAILGUN_API_KEY))", "def remove_from_group(self, group):\n\n if self.in_group(group):\n self.secondary_groups.remove(group)\n return self", "def remove_from_contact_list(self, contacts_to_remove_list):\n if self.contact_list is None:\n return\n for id in contacts_to_remove_list:\n if id in range(0, len(self.contact_list) + 1):\n self.contact_list[id - 1] = None\n self.contact_list = [contact for contact in self.contact_list if contact is not None]", "def mailman_remove(contact, listname=None, userack=None, admin_notify=None):\n\n\n mm, listname = _get_maillist(listname)\n print('mailman removing %s from %s' % (contact.email, listname), file=sys.stderr)\n if mm.isMember(contact.email):\n try:\n mm.Lock()\n mm.ApprovedDeleteMember(contact.email, 'satchmo_ext.newsletter', admin_notify, userack)\n mm.Save()\n finally:\n mm.Unlock()", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def remove_group(self, index):\n group = self.get(index).group\n for stone_index in group.members:\n self.board[stone_index] = None", "def remove_contact(self, contact):\n super(CachingKBucket, self).remove_contact(contact)\n self.fill_from_cache()", "def group_remove(group, board):\n for xy in group:\n board[xy[0]][xy[1]] = None\n return deepcopy(board)", "def do_delContact(self, line):\n\t\tif not(self.db is None):\n\t\t\ttry:\n\t\t\t\tself.db.contact.delete_one({'_id': ObjectId(line)})\n\t\t\texcept Exception:\n\t\t\t\tprint(\"This id doesn't exist!\")\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")", "def remove_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for membership in self.group_memberships:\n if membership.group.href == group.href:\n membership.delete()\n return\n\n raise StormpathError({\n 'developerMessage': 'This user is not part of Group %s.' % group.name,\n })", "def removeMember(self, *args):\n return _libsbml.Group_removeMember(self, *args)", "def remove_contact(request, ck, contact_name):\n\n refresh_template = request.session[constants.ACTUAL_TEMPLATE]\n\n contacts = request.session[constants.ADD_CONTACTS]\n contact = next(el for el in contacts if el.contact == contact_name)\n\n if ck != \"0\":\n coding = get_object_or_404(CodingProject, id=ck)\n\n # TODO: Review this \n us = get_user(request)\n user = us\n\n # Project must have been created by the current user and\n # User must have permission to add new CodeRequest\n if coding.coder != user.id:\n raise Http404\n\n if coding.contacts.filter(contact=contact_name):\n cache_list = request.session[constants.REM_CONTACTS]\n cache_list.append(contact)\n\n contacts.remove(contact)\n request.session[constants.ADD_CONTACTS] = contacts\n\n # TODO: Centralize this?\n return HttpResponseRedirect(refresh_template)", "def test_remove_member_from_group(client):\n group = client.remove_members_from_group(TEAM_ID, GROUP_ID, 35555)\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert 35555 not in group.members", "def rm_contact_from_addressbook(database, name, surname, database_counter,\n database_ids):\n\n from addressbook.verify_contact import check_if_contact_exists\n\n if check_if_contact_exists(database, name, surname, database_counter,\n database_ids)[0] == 'Yes':\n print('The following contact will be removed:')\n id = check_if_contact_exists(database, name, surname, database_counter,\n database_ids)[1]\n print(str(id), '|', database[f'{id}']['first name'], '|',\n database[f'{id}']['last name'],\n '|', database[f'{id}']['address'], '|',\n database[f'{id}']['mobile phone'])\n del database[f'{id}']\n print('\\n')\n return id\n else:\n print('There is no such contact for deletion!')\n print('\\n')\n return 0", "def delete(self):\n self.skype.conn(\"DELETE\", \"{0}/users/{1}/contacts/8:{2}\"\n .format(SkypeConnection.API_CONTACTS, self.skype.userId, self.id),\n auth=SkypeConnection.Auth.SkypeToken)\n self.skype.conn(\"DELETE\", \"{0}/users/ME/contacts/8:{1}\".format(self.skype.conn.msgsHost, self.id),\n auth=SkypeConnection.Auth.RegToken)" ]
[ "0.7738712", "0.75844175", "0.7367516", "0.73304904", "0.7268021", "0.7024257", "0.69580394", "0.69009084", "0.6803163", "0.67921567", "0.66546595", "0.66142553", "0.65734607", "0.6561294", "0.6542721", "0.6524597", "0.6427319", "0.6416925", "0.6344568", "0.6335024", "0.63288933", "0.62941855", "0.6276243", "0.6253841", "0.62476003", "0.6226366", "0.6222055", "0.61835843", "0.61765844", "0.61517936" ]
0.8879491
0
Stops any ongoing flow runs for the given contact
def stop_runs(self, org, contact): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop() -> None:", "def stop(self) -> None:\n ...", "def stop(self):\n self._run = False", "def stop (self):\n pass", "def stop (self):\n pass", "def stop(self) -> None:", "def stop(self) -> None:", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop():", "def stop():", "def stop():", "def stop():", "def stop(self):\n if self._running:\n self._running = False\n self._call.stop()", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self._current_mc_goal = None\n self._mc_goals = []\n self._position_control_client.cancel_all_goals()\n action = Stop()\n self._velocity_control_client(pickle.dumps(action))", "def stop(self):\r\n self.running = False" ]
[ "0.5933513", "0.58750427", "0.5862508", "0.58500177", "0.58500177", "0.58467513", "0.58467513", "0.58322924", "0.58322924", "0.58322924", "0.58322924", "0.58322924", "0.58322924", "0.58322924", "0.58322924", "0.58322924", "0.58322924", "0.58322924", "0.5822106", "0.5822106", "0.5822106", "0.5822106", "0.58179677", "0.5817521", "0.5817521", "0.5817521", "0.5817521", "0.5817521", "0.5809383", "0.5797682" ]
0.8120205
0
Adds a label to the given messages
def label_messages(self, org, messages, label): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addLabel(*args):", "def label_messages(self, messages, label=None, label_uuid=None):\n payload = self._build_params(messages=messages, action='label', label=label, label_uuid=label_uuid)\n self._post('message_actions', None, payload)", "def addmessageslabels(self, uidlist, labels):\n\n labels = labels - self.ignorelabels\n result = self._messagelabels_aux('+X-GM-LABELS', uidlist, labels)\n if result:\n for uid in uidlist:\n self.messagelist[uid]['labels'] = self.messagelist[uid]['labels'] | labels", "def add_labels(self, labels: dict):\n self.status = \"Creating labels\"\n for lname, value in labels.items():\n self.labels.add_label(lname, value)", "def AddLabels(self, labels):\n for label in labels:\n if not label or not self._VALID_LABEL_REGEX.match(label):\n raise ValueError((\n 'Unsupported label: \"{0!s}\". A label must only consist of '\n 'alphanumeric characters or underscores.').format(label))\n\n for label in labels:\n if label not in self.labels:\n self.labels.append(label)", "def AddLabel(self, label):\n if not isinstance(label, str):\n raise TypeError('label is not a string type. Is {0!s}'.format(\n type(label)))\n\n if not self._VALID_LABEL_REGEX.match(label):\n raise ValueError((\n 'Unsupported label: \"{0:s}\". A label must only consist of '\n 'alphanumeric characters or underscores.').format(label))\n\n if label not in self.labels:\n self.labels.append(label)", "def AddLabel(self, label):\n if self.labels is None:\n self.labels = set()\n self.labels.add(label)", "def _add_label(self):\n\n label = self._label_edit.text()\n labelNames = [i[0] for i in self.labels]\n if not label in list(labelNames):\n self.labels.append((label,0))\n self._option_selector.addItem(label)\n with open(\"{}/labels.txt\".format(self.output_directory), 'a') as file:\n file.write(\"{}\\n\".format(label))\n self._label_edit.setText('')", "def add(self, key, label):\n self.labels[key] = label", "def add_label(self, label):\n if not self.has_label(label):\n self.add_category(scheme=LABELS_SCHEME,\n term='%s#%s' % (LABELS_NS, label),\n label=label)", "def add_label(self, label_update):\n if not label_update in self.__labels:\n self.__labels.append(label_update)", "def add_label(self, label):\n status = self.ocp.add_label(resource_name=self.name, label=label)\n self.reload()\n return status", "def _add_label(self):\n add_dialog = AddLabelDialog(self._label_config)\n add_dialog.exec_()\n self._update_combobox()", "def add_labels(self, phrase_labels: List[Union[Phrase, Dict[str, Union[str, List[str]]]]]):\n for phrase in phrase_labels:\n phrase = as_phrase_object(phrase, ngram_size=self.ngram_size, skip_size=self.skip_size)\n if phrase.label is None:\n continue\n if phrase.phrase_string not in self.phrase_index:\n print(f'skipping label for unknown phrase {phrase}')\n for label in phrase.label_set:\n self.has_labels[phrase.phrase_string].add(label)\n self.is_label_of[label].add(phrase.phrase_string)", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def add_label(self, address: int) -> None:\n\n self.labels.append(address)", "def __add_new_label(self, name, value):\n self.__labels_dict[name] = value", "def set_label(self, message, color):\n self.label.prev_str = self.label_var.get()\n self.label.prev_color = self.label.configure()[\"background\"][4]\n self.label_var.set(message)\n self.label.configure(bg=color)\n self.update()", "def add_message(username, message):\n now = datetime.now().strftime(\"%H:%M:%S\")\n messages.append(\"({}) {}: {}\".format(now, username, message))", "def write_label(self, label):\n self._write_line('label ' + label) # TODO generate unique labels?", "def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment", "def labels(self, labels):\n self._instructions_setter('LABEL', labels)", "def add_message(self, msg):\n self.messages.append(msg)", "def add_labels(number, labels):\n\n cmds = [github_cli, 'pr', 'edit', str(number)]\n for lab in labels:\n cmds += ['--add-label', lab]\n\n with subprocess.Popen(cmds) as p:\n _, err = p.communicate()\n print(err)", "def test_issue_add_label(self):\n pass", "def add_label(self, new_name, status):\n api_uri = self._uri_dict.get('addLabel')\n data = {\n 'newName': new_name,\n 'status': status\n }\n r_data = self._post(api_uri, data)\n return r_data" ]
[ "0.75261736", "0.74693096", "0.71880233", "0.6999815", "0.69119644", "0.68023497", "0.6617087", "0.65335524", "0.6471467", "0.64527583", "0.6380113", "0.6318893", "0.6234198", "0.61687577", "0.6146832", "0.6146832", "0.6146832", "0.6146832", "0.6146832", "0.61147046", "0.6102525", "0.6034449", "0.6023934", "0.60081124", "0.59949005", "0.5987666", "0.5980123", "0.5973668", "0.5970328", "0.5961093" ]
0.81872386
0
Removes a label from the given messages
def unlabel_messages(self, org, messages, label): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unlabel_messages(self, messages, label=None, label_uuid=None):\n payload = self._build_params(messages=messages, action='unlabel', label=label, label_uuid=label_uuid)\n self._post('message_actions', None, payload)", "def deletemessageslabels(self, uidlist, labels):\n\n labels = labels - self.ignorelabels\n result = self._messagelabels_aux('-X-GM-LABELS', uidlist, labels)\n if result:\n for uid in uidlist:\n self.messagelist[uid]['labels'] = self.messagelist[uid]['labels'] - labels", "def remove(self, label):\n\n\t\t\tself[label].remove()", "def RemoveLabel(self, label):\n if self.labels is None:\n self.labels = set()\n else:\n try:\n self.labels.remove(label)\n except KeyError:\n pass", "def remove_label(self, label):\n for category in self.get_categories(LABELS_SCHEME):\n if category.label == label:\n self.category.remove(category)", "def remove(self: TokenMatcher, label: str) -> None:\n try:\n del self._patterns[label]\n del self._callbacks[label]\n except KeyError:\n raise ValueError(\n f\"The label: {label} does not exist within the matcher rules.\"\n )", "async def removed_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n await set_status(event, gh)", "def _del_label(self):\n label = self.combobox.currentText()\n if label:\n button = QMessageBox.warning(self, \"Delete label\", \n \"Are you sure that you want to delete label %s ?\" % label,\n QMessageBox.Yes,\n QMessageBox.No)\n if button == QMessageBox.Yes:\n self._label_config.remove_label(str(label))\n self._update_combobox()", "def unflag_messages(self, org, messages):\n pass", "def remove_label(self, key: str):\n del self.labels[key]", "def remove_labels(number, labels):\n\n cmds = [github_cli, 'pr', 'edit', str(number)]\n for lab in labels:\n cmds += ['--remove-label', lab]\n\n with subprocess.Popen(cmds) as p:\n _, err = p.communicate()\n print(err)", "def label_messages(self, org, messages, label):\n pass", "def remove_label(self, ):\n if self.AttributeNames.LABEL in self.attrs:\n del self.attrs[self.AttributeNames.LABEL]\n return self", "def test_issue_remove_label(self):\n pass", "def removeLabelFromPage(self, label, page):\n return self.pm_getSpaceManager().removeLabelFromPage(self._unbox(label), self._unbox(page))", "def remove_labels(self, test):\n ii = 0\n while ii < len(self.labels):\n if test(self.labels[ii]):\n self.labels.pop(ii)\n else:\n ii += 1\n return self", "def remove_labels(self, test):\n ii = 0\n while ii < len(self.labels):\n if test(self.labels[ii]):\n self.labels.pop(ii)\n else:\n ii += 1\n return self", "def remove_recog_label(self, event):\n\t\tc=self.seqframe\n\t\tc.delete('recogseqlabel')\n\t\treturn", "def label_messages(self, messages, label=None, label_uuid=None):\n payload = self._build_params(messages=messages, action='label', label=label, label_uuid=label_uuid)\n self._post('message_actions', None, payload)", "def remove_labels(self, phrases: Union[List[Phrase], List[str]]) -> None:\n for phrase in phrases:\n phrase_string = phrase if isinstance(phrase, str) else phrase.phrase_string\n if phrase_string not in self.phrase_index:\n raise TypeError(f'unknown phrase {phrase_string}')\n else:\n for label in self.has_labels[phrase_string]:\n self.is_label_of[label].remove(phrase_string)\n if len(self.is_label_of[label]) == 0:\n del self.is_label_of[label]\n del self.has_labels[phrase_string]", "def drop_empty_messages(messages, labels):\n non_zero_idx = [ii for ii, message in enumerate(messages) if len(message) != 0]\n messages_non_zero = np.array([messages[ii] for ii in non_zero_idx])\n labels_non_zero = np.array([labels[ii] for ii in non_zero_idx])\n return messages_non_zero, labels_non_zero", "def unset_label(self):\n self.set_label(self.label.prev_str, self.label.prev_color)", "def delete_label(self, repository, name, **kwargs):\n response = self.session.delete(\n '{}/repos/{}/labels/{}'.format(\n self.GH_API_ENDPOINT, repository, name\n )\n )\n if response.status_code != 204:\n raise GitHubError(response)", "def user_labels_erase(*args):\n return _ida_hexrays.user_labels_erase(*args)", "def _remove_receipt(self, msg):\n token = msg.get_token()\n if token in self._receipts:\n receipt = self._receipts[token]\n del self._receipts[token]\n label = receipt.get_label()\n if label and label in self._receipt_labels:\n del self._receipt_labels[label]", "def delete_metering_label(self, label):\r\n return self.delete(self.metering_label_path % (label))", "def removeLabelFromSpace(self, label, space):\n return self.pm_getSpaceManager().removeLabelFromSpace(self._unbox(label), self._unbox(space))", "def delete_label(self, label_id: str):\n return delete_label(self.api_key, label_id)", "def delete_label(id):\n dao.delete_label(id)\n return jsonify(dao.get_label(id))", "def remove_device_label(self, device_id: str, label_id: str):\n return remove_device_label(self.api_key, device_id, label_id)" ]
[ "0.80253", "0.7788127", "0.73874354", "0.71699005", "0.70764935", "0.7037179", "0.6937076", "0.6839757", "0.6730904", "0.66412556", "0.65370417", "0.65187776", "0.6510565", "0.6479989", "0.64494795", "0.64099365", "0.64099365", "0.63469744", "0.62426597", "0.6208268", "0.6190496", "0.6187082", "0.6086205", "0.6049698", "0.60231334", "0.6012901", "0.5975494", "0.5913453", "0.5870834", "0.5855289" ]
0.8519321
0
Archives the given messages
def archive_messages(self, org, messages): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def archive_messages(self, messages):\n self._post('message_actions', None, self._build_params(messages=messages, action='archive'))", "def unarchive_messages(self, messages):\n self._post('message_actions', None, self._build_params(messages=messages, action='unarchive'))", "def _archive_logs(self, logdir, files):\n cwd = os.getcwd()\n archive_wd = os.path.dirname(logdir)\n archive_file = os.path.basename(logdir) + \".tgz\"\n\n # move files into logdir for archive\n for f in files:\n self.logger.info(\"moving '%s' to archive folder\" % f)\n shutil.move(f, logdir)\n\n # move to logdir parent folder\n self.logger.info(\"archiving profile logs into '%s'\" % archive_file)\n os.chdir(archive_wd)\n archive = tarfile.open(archive_file, \"w:gz\")\n archive.add(os.path.basename(logdir))\n archive.close()\n\n # go back to current working dir and remove logdir\n os.chdir(cwd)\n shutil.rmtree(logdir)", "def archive_contact_messages(self, org, contact):\n pass", "def archive_logs():\n logging.info('Archive start...')\n\n for log_dir in filter(dir_filter, os.listdir('logs')):\n path = 'logs/{}'.format(log_dir)\n archive_files = filter(lambda x: '.log.' in x, os.listdir(path))\n zip_file_name = '{}/{}.zip'.format(\n path,\n str(datetime.now())\n .replace(' ', '_').replace('.', '_').replace(':', '_'))\n zip_file = zipfile.ZipFile(\n zip_file_name, mode='w', compression=zipfile.ZIP_DEFLATED)\n for f in archive_files:\n log_file = '{}/{}'.format(path, f)\n zip_file.write(log_file)\n os.remove(log_file)\n\n logging.info('Archive end.')", "def zip_images(self, messages_to_save):\n\n s = StringIO.StringIO()\n\n try:\n with zipfile.ZipFile(s, mode='w') as zf:\n for message, some_images in messages_to_save.iteritems():\n for an_image in some_images:\n zf.writestr(an_image.name(), an_image.body())\n\n return True, zf\n\n except:\n zf.close()\n\n return False", "def archive_bag_sns_messages(bags, bucket):\n for bag in bags:\n request_id = str(uuid.uuid4())\n yield {\n 'archiveRequestId': request_id,\n 'zippedBagLocation': {\n 'namespace': bucket,\n 'key': bag\n }\n }", "def export_messages_from_file(\n src_file: Path, msg_ids: Iterable[int], dest_folder: Path = None\n) -> None:\n\n dest_folder = (dest_folder or Path.cwd()) / src_file.stem\n dest_folder.mkdir(parents=True, exist_ok=True)\n\n with open_mail_archive(src_file) as archive:\n for msg_id in msg_ids:\n try:\n # Get message from archive\n msg = archive.get_message_by_id(int(msg_id))\n\n # Process PST or MBOX message and attachments\n if isinstance(archive, MboxArchive):\n # Extract attachments\n attachments = [\n AttachmentMetadata(\n name=part.get_filename(),\n content=part.get_payload(decode=True),\n )\n for part in msg.walk()\n if (\n content_disposition := part.get_content_disposition() or \"\"\n ).startswith(\"attachment\")\n or content_disposition.startswith(\"inline\")\n ]\n\n if attachments:\n # Make directory for this message's attachments\n attachments_folder = dest_folder / f\"{msg_id}_attachments\"\n attachments_folder.mkdir(parents=True, exist_ok=True)\n\n # Write files\n for attachment in attachments:\n (attachments_folder / attachment.name).write_bytes(\n attachment.content\n )\n\n else: # PST archive\n if msg.number_of_attachments > 0:\n # Make directory for this message's attachments\n attachments_folder = dest_folder / f\"{msg_id}_attachments\"\n attachments_folder.mkdir(parents=True, exist_ok=True)\n\n # Extract attachments and write files\n for attachment in msg.attachments:\n buffer = attachment.read_buffer(attachment.size)\n (attachments_folder / attachment.name).write_bytes(buffer)\n\n # Convert message to Python Message type\n msg = Parser(policy=policy.default).parsestr(pff_msg_to_string(msg))\n\n # Write message as eml file\n with (dest_folder / f\"{msg_id}.eml\").open(\n mode=\"w\", encoding=\"utf-8\", errors=\"replace\"\n ) as eml_file:\n Generator(eml_file).flatten(msg)\n\n except Exception as exc:\n logger.warning(\n f\"Skipping message {msg_id} from {src_file}, reason: {exc}\",\n exc_info=True,\n )", "def archive(self, files, name):\n self.log.debug(\"Putting files into archive: %s\" % \"\\n\".join(files))\n tar_name = \"%s%s\" % (name, self.extension)\n if os.path.exists(tar_name):\n raise RuntimeError (\"Tried to create an archive that already exists: %s\" % tar_name) \n else:\n self.log.info(\"Creating a new archive %s\" % tar_name)\n tar = tarfile.open(tar_name, 'w:gz');\n for name in files:\n tar.add(name)\n print '%s'% (name)\n tar.close()\n return tar_name", "def archive_bag_api_messages(bags, bucket):\n for bag in bags:\n yield {\n 'type': 'Ingest',\n 'ingestType': {\n 'id': 'create',\n 'type': 'IngestType'\n },\n 'uploadUrl': f's3://{bucket}/{bag}'\n }", "def publish_list(self, messages: list) -> None:", "def publish_list(self, messages: list) -> None:", "def archive(self, item_id, **params):\n\n self.queue('archive', item_id=item_id, **params)", "def after_archive_tarball(msg, config, checklist):\n return []", "def archive_log(self, f_in, filename):\n if not os.path.isdir('archived'):\n os.makedirs('archived')\n f_out = gzip.open('archived/'+filename+'.gz', 'wb')\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()", "def purge(self, message_list, action, userId='me'):\n\n count = 0\n for item in message_list:\n if action.lower() == 'archive':\n resource = getattr(self.connection.users().messages(), 'modify')\n dynamic_request = resource(userId=userId, id=message_list[item], body=\n {\n \"removeLabelIds\": [ \"INBOX\" ]\n })\n else:\n resource = getattr(self.connection.users().messages(), action)\n dynamic_request = resource(userId=userId, id=message_list[item])\n\n try:\n response = dynamic_request.execute()\n count += 1\n print(f'[√] Action: {action} - {count} of {len(message_list)} - Message ID: {message_list[item]}')\n except googleapiclient.errors.HttpError as error:\n if error.resp.status == 404:\n print(f'[X] Error: ID {message_list[item]} Not Found')\n else:\n print(f'[X] Error: ID {mesage_list[item]} {error}')\n count -= 1\n print(f'[√] Processed: {count} of {len(message_list)} Messages')\n return True", "def archive_logs(self):\n source = GAConfig[\"log_file_location\"]\n destination = source + \"Archive/\"\n\n if not os.path.exists(source):\n os.makedirs(source)\n if not os.path.exists(destination):\n os.makedirs(destination)\n\n if len(os.listdir(source)) > 1:\n specific_folder = destination + str(\n len(os.listdir(destination))) + '/'\n os.makedirs(specific_folder)\n for f in os.listdir(source):\n if((\".log\" in f) or (\".zip\" in f)):\n shutil.move(source + f, specific_folder)", "async def deliver(self, messages: EmailMessage | Iterable[EmailMessage]) -> None:", "def _messages_post(self, queue, messages, min_msg_count, max_msg_count):\n with atomic.ActionTimer(self, \"zaqar.post_between_%s_and_%s_messages\" %\n (min_msg_count, max_msg_count)):\n queue.post(messages)", "def consolidate_messages(self, msg):", "def cleanup(self, archive, files):\n mtime = self.test(archive, files)\n backup_home = os.path.join(self.download_dir, '-')\n if not os.path.exists(backup_home):\n os.makedirs(backup_home)\n backup_dir = tempfile.mkdtemp('', datetime.utcnow().strftime(\"%Y-%m-%d_\"), backup_home)\n for file in files:\n os.makedirs(os.path.join(backup_dir, file))\n if os.path.getmtime(file) != mtime[file]:\n raise RuntimeError(\"Failed to cleanup archived data: %s has been modified.\" % file)\n os.rename(file, os.path.join(backup_dir, file))\n self.log.debug(\"Moved %s to %s\" % (file, os.path.join(backup_dir, file)))\n return", "def archive(self, item):\n self._createAction(item, \"archive\")", "def archive_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='archive'))", "def tests_unarchives(self):\n group = self.create_group()\n user1 = self.create_user()\n user2 = self.create_user()\n\n user1.add_to_group(group.pk)\n user2.add_to_group(group.pk)\n\n thread = self.create_thread(\n group=group, sender=user1, create_recipient=False)\n\n # Since we mock-out send_message in `Message.save()` we have to call\n # `send_message` directly to generate userthreads\n send_message(thread.first_message.pk)\n\n thread.userthread_set.update(status='archived')\n self.assertEqual(\n thread.userthread_set.filter(\n status='archived').count(),\n 2\n )\n\n newmessage = mommy.make(Message, thread=thread, sender=user1)\n send_message(newmessage.pk)\n\n # Author of new message should still have thread archived\n self.assertTrue(\n thread.userthread_set.filter(\n status='archived', user=user1).exists()\n )\n # User 2 should have the thread unarchived\n self.assertTrue(\n thread.userthread_set.filter(\n status='active', user=user2).exists()\n )", "def process(mlist, msg, msgdata):\n # Digests and Mailman-craft messages should not get additional headers.\n if msgdata.get('isdigest') or msgdata.get('nodecorate'):\n return\n d = {}\n member = msgdata.get('member')\n if member is not None:\n # Calculate the extra personalization dictionary.\n recipient = msgdata.get('recipient', member.address.original_email)\n d['member'] = formataddr(\n (member.subscriber.display_name, member.subscriber.email))\n d['user_email'] = recipient\n d['user_delivered_to'] = member.address.original_email\n d['user_language'] = member.preferred_language.description\n d['user_name'] = member.display_name\n # For backward compatibility.\n d['user_address'] = recipient\n # Calculate the archiver permalink substitution variables. This provides\n # the $<archive-name>_url placeholder for every enabled archiver.\n for archiver in IListArchiverSet(mlist).archivers:\n if archiver.is_enabled:\n # Get the permalink of the message from the archiver. Watch out\n # for exceptions in the archiver plugin.\n try:\n archive_url = archiver.system_archiver.permalink(mlist, msg)\n except Exception:\n alog.exception('Exception in \"{}\" archiver'.format(\n archiver.system_archiver.name))\n archive_url = None\n if archive_url is not None:\n placeholder = '{}_url'.format(archiver.system_archiver.name)\n d[placeholder] = archive_url\n # These strings are descriptive for the log file and shouldn't be i18n'd\n d.update(msgdata.get('decoration-data', {}))\n header = decorate('list:member:regular:header', mlist, d)\n footer = decorate('list:member:regular:footer', mlist, d)\n # Escape hatch if both the footer and header are empty or None.\n if len(header) == 0 and len(footer) == 0:\n return\n # Be MIME smart here. We only attach the header and footer by\n # concatenation when the message is a non-multipart of type text/plain.\n # Otherwise, if it is not a multipart, we make it a multipart, and then we\n # add the header and footer as text/plain parts.\n #\n # BJG: In addition, only add the footer if the message's character set\n # matches the charset of the list's preferred language. This is a\n # suboptimal solution, and should be solved by allowing a list to have\n # multiple headers/footers, for each language the list supports.\n #\n # Also, if the list's preferred charset is us-ascii, we can always\n # safely add the header/footer to a plain text message since all\n # charsets Mailman supports are strict supersets of us-ascii --\n # no, UTF-16 emails are not supported yet.\n #\n # TK: Message with 'charset=' cause trouble. So, instead of\n # mgs.get_content_charset('us-ascii') ...\n mcset = msg.get_content_charset() or 'us-ascii'\n lcset = mlist.preferred_language.charset\n msgtype = msg.get_content_type()\n # BAW: If the charsets don't match, should we add the header and footer by\n # MIME multipart chroming the message?\n wrap = True\n if not msg.is_multipart() and msgtype == 'text/plain':\n # Save the RFC-3676 format parameters.\n format_param = msg.get_param('format')\n delsp = msg.get_param('delsp')\n # Save 'Content-Transfer-Encoding' header in case decoration fails.\n cte = msg.get('content-transfer-encoding')\n # header/footer is now in unicode.\n try:\n oldpayload = msg.get_payload(decode=True).decode(mcset)\n del msg['content-transfer-encoding']\n frontsep = endsep = ''\n if len(header) > 0 and not header.endswith('\\n'):\n frontsep = '\\n'\n if len(footer) > 0 and not oldpayload.endswith('\\n'):\n endsep = '\\n'\n payload = header + frontsep + oldpayload + endsep + footer\n # When setting the payload for the message, try various charset\n # encodings until one does not produce a UnicodeError. We'll try\n # charsets in this order: the list's charset, the message's\n # charset, then utf-8. It's okay if some of these are duplicates.\n for cset in (lcset, mcset, 'utf-8'):\n try:\n msg.set_payload(payload.encode(cset), cset)\n except UnicodeError:\n pass\n else:\n if format_param:\n msg.set_param('format', format_param)\n if delsp:\n msg.set_param('delsp', delsp)\n wrap = False\n break\n except (LookupError, UnicodeError):\n if cte:\n # Restore the original c-t-e.\n del msg['content-transfer-encoding']\n msg['Content-Transfer-Encoding'] = cte\n elif msg.get_content_type() == 'multipart/mixed':\n # The next easiest thing to do is just prepend the header and append\n # the footer as additional subparts\n payload = msg.get_payload()\n if not isinstance(payload, list):\n payload = [payload]\n if len(footer) > 0:\n mimeftr = MIMEText(footer.encode(lcset), 'plain', lcset)\n mimeftr['Content-Disposition'] = 'inline'\n payload.append(mimeftr)\n if len(header) > 0:\n mimehdr = MIMEText(header.encode(lcset), 'plain', lcset)\n mimehdr['Content-Disposition'] = 'inline'\n payload.insert(0, mimehdr)\n msg.set_payload(payload)\n wrap = False\n # If we couldn't add the header or footer in a less intrusive way, we can\n # at least do it by MIME encapsulation. We want to keep as much of the\n # outer chrome as possible.\n if not wrap:\n return\n # Because of the way Message objects are passed around to process(), we\n # need to play tricks with the outer message -- i.e. the outer one must\n # remain the same instance. So we're going to create a clone of the outer\n # message, with all the header chrome intact, then copy the payload to it.\n # This will give us a clone of the original message, and it will form the\n # basis of the interior, wrapped Message.\n inner = Message()\n # Which headers to copy? Let's just do the Content-* headers\n for h, v in msg.items():\n if h.lower().startswith('content-'):\n inner[h] = v\n inner.set_payload(msg.get_payload())\n # For completeness\n inner.set_unixfrom(msg.get_unixfrom())\n inner.preamble = msg.preamble\n inner.epilogue = msg.epilogue\n # Don't copy get_charset, as this might be None, even if\n # get_content_charset isn't. However, do make sure there is a default\n # content-type, even if the original message was not MIME.\n inner.set_default_type(msg.get_default_type())\n # BAW: HACK ALERT.\n if hasattr(msg, '__version__'):\n inner.__version__ = msg.__version__\n # Now, play games with the outer message to make it contain three\n # subparts: the header (if any), the wrapped message, and the footer (if\n # any).\n payload = [inner]\n if len(header) > 0:\n mimehdr = MIMEText(header.encode(lcset), 'plain', lcset)\n mimehdr['Content-Disposition'] = 'inline'\n payload.insert(0, mimehdr)\n if len(footer) > 0:\n mimeftr = MIMEText(footer.encode(lcset), 'plain', lcset)\n mimeftr['Content-Disposition'] = 'inline'\n payload.append(mimeftr)\n msg.set_payload(payload)\n del msg['content-type']\n del msg['content-transfer-encoding']\n del msg['content-disposition']\n msg['Content-Type'] = 'multipart/mixed'", "def archive(self):\n\n archive_date = self.time_file.file_date.strftime('%Y-%m-%d')\n self.record('ARCHIVE %s %s' % (archive_date,\n self.time_file.short_info()))\n\n self.keep_only_archive()", "def _archive(self,pools=['backup','archive'],verbose=True,dry_run=False):\n\t\tif type(pools) is not list:\n\t\t\tpools = [pools]\n\n\t\t_start = datetime.today()\n\t\tself.archive_bytes = 0\n\t\tfor pool in pools:\n\t\t\tqueue = self.generate_queue(pool)\n\t\t\tlog.info('%s: %s' % (pool.upper(),queue))\n\t\t\tif len(queue) == 0:\n\t\t\t\tmessage = \"%s Warning: '%s' pool: Nothing to %s.\" % (pool.title(),pool,pool)\n\t\t\t\tlog.info(message)\n\t\t\t\tif verbose:\n\t\t\t\t\tprint \" %s\" % message\n\t\t\t\tcontinue\n\n\t\t\tif verbose:\n\t\t\t\tprint \"\\n ++ %s POOL ++\" % (pool.upper())\n\t\t\t\tprint \" Creating %s of the following files:\" % (pool)\n\n\t\t\t# create a filelist and calculate the size\n\t\t\tfilelist = []\n\t\t\tfor ele in queue:\n\t\t\t\tfilelist.append(ele.abs_path)\n\t\t\t\tself.archive_bytes+=ele.st_size\n\t\t\t\tif verbose:\n\t\t\t\t\tprint \" %s\" % ele.abs_path\n\t\t\n\t\t\t# determine which strategy \n\t\t\t# we're using\n\t\t\tif pool == 'archive':\n\t\t\t\tstrat = 'A'\n\t\t\telif pool == 'backup':\n\t\t\t\tstrat = 'B'\n\t\t\tpath = ' '.join(filelist)\n\n####################### TESTING ###########################3\n#\t\t\tTina.backup(path=path,application='fake_application',strat=strat,dry_run=dry_run)\n####################### TESTING ###########################3\n\t\t\tTina.backup(path=path,application='flame_archive',strat=strat,dry_run=dry_run)\n\t\t_stop = datetime.today()\n\t\tself.archive_delta = (_stop-_start)\n\t\tself.archive_seconds = (_stop-_start).seconds\n\t\tself.archive_size = numberutil.humanize(self.archive_bytes,scale='bytes')\n\t\ttry:\n\t\t\trph = (self.archive_bytes/self.archive_seconds)*3600\n\t\texcept:\n\t\t\trph = 0\n\t\tself.archive_rate = numberutil.humanize(rph,scale='bytes')", "def publish_messages(topic_arn, messages):\n sns_client = boto3.client('sns')\n for m in messages:\n message_as_json = json.dumps(m)\n response = sns_client.publish(\n TopicArn=topic_arn,\n MessageStructure='json',\n Message=json.dumps({\n 'default': message_as_json\n }),\n Subject=f'Source: {__file__}'\n )\n response_status = response['ResponseMetadata']['HTTPStatusCode']\n print(f'{message_as_json} -> {topic_arn} [{response_status}]')\n assert response_status == 200, response", "def archive_articles(ids=None, skip_filter=False):\n from crawler.core.models import Article\n articles = Article.objects.ids(ids)\n if not skip_filter:\n articles = articles.should_be_archived()\n\n articles.set_archiving()\n for article in articles:\n try:\n archive_article(article)\n except Exception as e:\n logger.error('An error occured while archiving article', e)\n\n return True", "def archive(po_filename, bl_filename):\n\n # Store archive in same dir as this script\n root = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n po_archive = root + '/po.csv.%s' % datetime.date.today()\n bl_archive = root + '/bl.csv.%s' % datetime.date.today()\n\n shutil.move(po_filename, po_archive)\n shutil.move(bl_filename, bl_archive)\n\n perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH\n os.chmod(po_archive, perms)\n os.chmod(bl_archive, perms)" ]
[ "0.8038418", "0.65381896", "0.6135404", "0.6130519", "0.58608985", "0.5820875", "0.5799502", "0.56735307", "0.56417036", "0.56386155", "0.5579611", "0.5579611", "0.55789346", "0.5469102", "0.5454764", "0.5426546", "0.5292785", "0.5283879", "0.5280061", "0.52529144", "0.52458584", "0.520705", "0.52044356", "0.51790106", "0.5147816", "0.5139361", "0.5134146", "0.512804", "0.51187885", "0.51108307" ]
0.79814565
1
Archives all messages for the given contact
def archive_contact_messages(self, org, contact): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def archive_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='archive'))", "def archive_messages(self, org, messages):\n pass", "def archive_messages(self, messages):\n self._post('message_actions', None, self._build_params(messages=messages, action='archive'))", "def push_all(self, contacts):\n for ell in contacts:\n self.push(ell)", "def BatchEnqueue(self, action, contact):\n\t\t\n\t\tif action == 'retrieve':\n\t\t\tself.batch_queue.AddQuery(entry=contact, batch_id_string='retrieve')\n\t\telif action == 'create':\n\t\t\tcontact.group_membership_info = [gdata.contacts.data.GroupMembershipInfo(href=self.GetFirstGroupId())]\n\t\t\tself.batch_queue.AddInsert(entry=contact, batch_id_string='create')\n\t\telif action == 'update':\n\t\t\tself.batch_queue.AddUpdate(entry=contact, batch_id_string='update')\n\t\telif action == 'delete':\n\t\t\tself.batch_queue.AddDelete(entry=contact, batch_id_string='delete')", "def writecontactstocsv(self , contact_entries):\n rx = re.compile('\\W+')\n allcontacts = []\n for entry in contact_entries:\n if entry.name is not None and len(entry.phone_number) > 0 and len(entry.group_membership_info) > 0:\n\n # Clean up characters in contact name; replace all non-alphanumerics with spaces\n fullname = entry.name.full_name.text\n fullname = rx.sub(' ', fullname).strip()\n for rawPhoneNumber in entry.phone_number:\n # Remove non-numeric characters from the phone number\n phone_number = re.sub(\"[^0-9]\", \"\", rawPhoneNumber.text)\n # Save contact for later insert\n allcontacts.append((fullname, phone_number))\n\n allcontacts = tuple(set(allcontacts))\n\n csvfilename = \"Downloads/ContactExport\"+time.strftime(\"%Y%m%d-%H%M%S\")+\".csv\"\n csvfile = open(csvfilename, \"w\")\n for csvFullName, csvPhoneNumber in allcontacts:\n line = \"\\\"%s\\\",%s\\n\" % (csvFullName, csvPhoneNumber)\n csvfile.write(line)\n\n csvfile.close()", "def fetch_contact_messages(self, org, contact, created_after, created_before):\n pass", "def RemoveAll(self):\n\t\tcontacts = self.GetContactList()\n\t\t\n\t\tfor contact in contacts:\n\t\t\tself.BatchEnqueue('delete', contact)\n\t\tself.ExecuteBatchQueue()", "def archive_all_cards(self):\r\n url = '{0}/archiveAllCards'.format(self.get_url())\r\n request = http.Request('POST', url)\r\n return request, parsers.parse_json", "def archive_all_cards(self, trello):\n for i in reversed(range(len(self.cards))):\n # Loop through the collection in reverse to avoid indexing\n # errors. Don't iterate because archive() will modify the\n # contents of self.cards\n card = self.cards[i]\n card.archive(trello)", "def unarchive_messages(self, messages):\n self._post('message_actions', None, self._build_params(messages=messages, action='unarchive'))", "def fetch_messages_from_imap(host, port, username, password):\n\n with imaplib.IMAP4(host, port=port) as client:\n client.starttls()\n client.login(username, password)\n client.select(\"INBOX\", readonly=False)\n\n client.create(\"Archives\")\n client.create(\"Archives/Crashreport\")\n\n sorted_reply = client.uid(\"SORT\", \"(DATE)\", \"UTF7\", \"ALL\")\n\n if not sorted_reply[0] == \"OK\":\n raise IMAPClientError()\n\n sorted_messages = sorted_reply[1][0].split()\n\n for msg_uid in sorted_messages:\n reply = client.uid(\"FETCH\", msg_uid, \"(RFC822)\")\n\n if reply[0] != \"OK\":\n raise IMAPClientError()\n\n message = email.message_from_bytes(reply[1][0][1])\n\n yield message\n\n # mark message as read and move to archives\n mark_read_reply = client.uid(\"STORE\", msg_uid, \"+FLAGS\", \"(\\\\Seen)\")\n if mark_read_reply[0] != \"OK\":\n raise IMAPClientError()\n\n # moving messages in IMAP unfortunately means copy and delete\n copy_reply = client.uid(\"COPY\", msg_uid, \"Archives/Crashreport\")\n if copy_reply[0] != \"OK\":\n raise IMAPClientError()\n\n delete_reply = client.uid(\"STORE\", msg_uid, \"+FLAGS\", \"(\\\\Deleted)\")\n if delete_reply[0] != \"OK\":\n raise IMAPClientError()\n\n # delete the message immediately\n client.expunge()", "def archive_log(self, f_in, filename):\n if not os.path.isdir('archived'):\n os.makedirs('archived')\n f_out = gzip.open('archived/'+filename+'.gz', 'wb')\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()", "def all_in_contact(cls, contact_id: int):\n for contact_tag in cls.get_all_in(\"contacts\", contact_id):\n yield contact_tag", "def cleanup(self, archive, files):\n mtime = self.test(archive, files)\n backup_home = os.path.join(self.download_dir, '-')\n if not os.path.exists(backup_home):\n os.makedirs(backup_home)\n backup_dir = tempfile.mkdtemp('', datetime.utcnow().strftime(\"%Y-%m-%d_\"), backup_home)\n for file in files:\n os.makedirs(os.path.join(backup_dir, file))\n if os.path.getmtime(file) != mtime[file]:\n raise RuntimeError(\"Failed to cleanup archived data: %s has been modified.\" % file)\n os.rename(file, os.path.join(backup_dir, file))\n self.log.debug(\"Moved %s to %s\" % (file, os.path.join(backup_dir, file)))\n return", "def archive_logs():\n logging.info('Archive start...')\n\n for log_dir in filter(dir_filter, os.listdir('logs')):\n path = 'logs/{}'.format(log_dir)\n archive_files = filter(lambda x: '.log.' in x, os.listdir(path))\n zip_file_name = '{}/{}.zip'.format(\n path,\n str(datetime.now())\n .replace(' ', '_').replace('.', '_').replace(':', '_'))\n zip_file = zipfile.ZipFile(\n zip_file_name, mode='w', compression=zipfile.ZIP_DEFLATED)\n for f in archive_files:\n log_file = '{}/{}'.format(path, f)\n zip_file.write(log_file)\n os.remove(log_file)\n\n logging.info('Archive end.')", "def remove_all(self):\n if self._processed:\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n res, data = self._mailconn.store(msg.decode('utf-8'), '+FLAGS', '\\\\Deleted')\n print(res)", "def tests_unarchives(self):\n group = self.create_group()\n user1 = self.create_user()\n user2 = self.create_user()\n\n user1.add_to_group(group.pk)\n user2.add_to_group(group.pk)\n\n thread = self.create_thread(\n group=group, sender=user1, create_recipient=False)\n\n # Since we mock-out send_message in `Message.save()` we have to call\n # `send_message` directly to generate userthreads\n send_message(thread.first_message.pk)\n\n thread.userthread_set.update(status='archived')\n self.assertEqual(\n thread.userthread_set.filter(\n status='archived').count(),\n 2\n )\n\n newmessage = mommy.make(Message, thread=thread, sender=user1)\n send_message(newmessage.pk)\n\n # Author of new message should still have thread archived\n self.assertTrue(\n thread.userthread_set.filter(\n status='archived', user=user1).exists()\n )\n # User 2 should have the thread unarchived\n self.assertTrue(\n thread.userthread_set.filter(\n status='active', user=user2).exists()\n )", "def _write_contact(self, size, card_writer):\n msg = []\n if (self.bcrparas or self.bctadds or self.bctparas or self.bctsets\n or self.bsurf or self.bsurfs):\n msg.append('$CONTACT\\n')\n for (unused_id, bcrpara) in sorted(self.bcrparas.iteritems()):\n msg.append(bcrpara.write_bdf(size, card_writer))\n for (unused_id, bctadds) in sorted(self.bctadds.iteritems()):\n msg.append(bctadds.write_bdf(size, card_writer))\n for (unused_id, bctpara) in sorted(self.bctparas.iteritems()):\n msg.append(bctpara.write_bdf(size, card_writer))\n\n for (unused_id, bctset) in sorted(self.bctsets.iteritems()):\n msg.append(bctset.write_bdf(size, card_writer))\n for (unused_id, bsurfi) in sorted(self.bsurf.iteritems()):\n msg.append(bsurfi.write_bdf(size, card_writer))\n for (unused_id, bsurfsi) in sorted(self.bsurfs.iteritems()):\n msg.append(bsurfsi.write_bdf(size, card_writer))\n return ''.join(msg)", "def purge(self, message_list, action, userId='me'):\n\n count = 0\n for item in message_list:\n if action.lower() == 'archive':\n resource = getattr(self.connection.users().messages(), 'modify')\n dynamic_request = resource(userId=userId, id=message_list[item], body=\n {\n \"removeLabelIds\": [ \"INBOX\" ]\n })\n else:\n resource = getattr(self.connection.users().messages(), action)\n dynamic_request = resource(userId=userId, id=message_list[item])\n\n try:\n response = dynamic_request.execute()\n count += 1\n print(f'[√] Action: {action} - {count} of {len(message_list)} - Message ID: {message_list[item]}')\n except googleapiclient.errors.HttpError as error:\n if error.resp.status == 404:\n print(f'[X] Error: ID {message_list[item]} Not Found')\n else:\n print(f'[X] Error: ID {mesage_list[item]} {error}')\n count -= 1\n print(f'[√] Processed: {count} of {len(message_list)} Messages')\n return True", "def consolidate_messages(self, msg):", "def _archive_logs(self, logdir, files):\n cwd = os.getcwd()\n archive_wd = os.path.dirname(logdir)\n archive_file = os.path.basename(logdir) + \".tgz\"\n\n # move files into logdir for archive\n for f in files:\n self.logger.info(\"moving '%s' to archive folder\" % f)\n shutil.move(f, logdir)\n\n # move to logdir parent folder\n self.logger.info(\"archiving profile logs into '%s'\" % archive_file)\n os.chdir(archive_wd)\n archive = tarfile.open(archive_file, \"w:gz\")\n archive.add(os.path.basename(logdir))\n archive.close()\n\n # go back to current working dir and remove logdir\n os.chdir(cwd)\n shutil.rmtree(logdir)", "def expire_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='expire'))", "def flush(self):\n if not self._writer:\n raise RuntimeError('Archive not opened')\n\n # Reinit file\n self._reset_archive_file()\n self._writer.writeheader()\n\n # Write data timestamp by timestamp\n for _, entry in self._entries.items():\n self._writer.writerow(entry)", "async def archive(self):\n request = self._state.archive_team_thread(self.team_id, self.group_id, self.id)\n await request", "def delete_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='delete'))", "def archive_bag_sns_messages(bags, bucket):\n for bag in bags:\n request_id = str(uuid.uuid4())\n yield {\n 'archiveRequestId': request_id,\n 'zippedBagLocation': {\n 'namespace': bucket,\n 'key': bag\n }\n }", "def archive(ctx, config):\n log.info('Creating archive directory...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--', archive_dir,\n ],\n wait=False,\n )\n )\n\n try:\n yield\n except Exception:\n # we need to know this below\n set_status(ctx.summary, 'fail')\n raise\n finally:\n passed = get_status(ctx.summary) == 'pass'\n if ctx.archive is not None and \\\n not (ctx.config.get('archive-on-error') and passed):\n log.info('Transferring archived files...')\n logdir = os.path.join(ctx.archive, 'remote')\n if (not os.path.exists(logdir)):\n os.mkdir(logdir)\n for rem in ctx.cluster.remotes.iterkeys():\n path = os.path.join(logdir, rem.shortname)\n misc.pull_directory(rem, archive_dir, path)\n # Check for coredumps and pull binaries\n fetch_binaries_for_coredumps(path, rem)\n\n log.info('Removing archive directory...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'rm',\n '-rf',\n '--',\n archive_dir,\n ],\n wait=False,\n ),\n )", "def archive(self):\n\n archive_date = self.time_file.file_date.strftime('%Y-%m-%d')\n self.record('ARCHIVE %s %s' % (archive_date,\n self.time_file.short_info()))\n\n self.keep_only_archive()", "def test_archive(self):\n thread = self.create_thread()\n ut = UserThread.objects.get(\n user=thread.recipients.first(), thread=thread)\n ut_id = ut.pk\n ut.archive()\n ut = UserThread.objects.get(pk=ut_id)\n self.assertEqual(ut.status, 'archived')" ]
[ "0.70698756", "0.65666515", "0.6257201", "0.55854166", "0.51264703", "0.5064041", "0.4993844", "0.49865454", "0.49637476", "0.4916462", "0.49034166", "0.48514304", "0.48303494", "0.47683123", "0.4719243", "0.47067264", "0.47029313", "0.46990845", "0.46645817", "0.46523926", "0.46175334", "0.46106166", "0.46092942", "0.45632294", "0.45452183", "0.45120892", "0.45090142", "0.4482449", "0.44778126", "0.44337553" ]
0.80612326
0
Restores (unarchives) the given messages
def restore_messages(self, org, messages): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unarchive_messages(self, messages):\n self._post('message_actions', None, self._build_params(messages=messages, action='unarchive'))", "def archive_messages(self, messages):\n self._post('message_actions', None, self._build_params(messages=messages, action='archive'))", "def archive_messages(self, org, messages):\n pass", "def unflag_messages(self, org, messages):\n pass", "def restore(self, reader):\n while True:\n msg = reader.read()\n if msg is None:\n break\n self.publish(msg)", "def _deconstruct_messages(snuba_messages):\n return [\n (json.loads(msg.payload.value.decode(\"utf-8\")), msg.payload.headers)\n for msg in snuba_messages\n ]", "def clear_messages(self):\n with self.message_lock:\n self.messages = self.messages[self._processed_messages:]\n self._processed_messages = 0", "def trash(self, messages):\n messages.update(deleted=True, deleted_at=datetime.datetime.now())", "def send_messages(messages):\n while messages:\n msg = messages.pop()\n sent_messages.append(msg)", "def remove_all(self):\n if self._processed:\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n res, data = self._mailconn.store(msg.decode('utf-8'), '+FLAGS', '\\\\Deleted')\n print(res)", "def tests_unarchives(self):\n group = self.create_group()\n user1 = self.create_user()\n user2 = self.create_user()\n\n user1.add_to_group(group.pk)\n user2.add_to_group(group.pk)\n\n thread = self.create_thread(\n group=group, sender=user1, create_recipient=False)\n\n # Since we mock-out send_message in `Message.save()` we have to call\n # `send_message` directly to generate userthreads\n send_message(thread.first_message.pk)\n\n thread.userthread_set.update(status='archived')\n self.assertEqual(\n thread.userthread_set.filter(\n status='archived').count(),\n 2\n )\n\n newmessage = mommy.make(Message, thread=thread, sender=user1)\n send_message(newmessage.pk)\n\n # Author of new message should still have thread archived\n self.assertTrue(\n thread.userthread_set.filter(\n status='archived', user=user1).exists()\n )\n # User 2 should have the thread unarchived\n self.assertTrue(\n thread.userthread_set.filter(\n status='active', user=user2).exists()\n )", "def _deconstruct_routing_messages(snuba_messages):\n all_messages = []\n for msg in snuba_messages:\n headers: MutableMapping[str, str] = {}\n for key, value in msg.payload.routing_header.items():\n headers.update({key: value})\n\n payload = json.loads(msg.payload.routing_message.value.decode(\"utf-8\"))\n\n all_messages.append((headers, payload, msg.payload.routing_message.headers))\n\n return all_messages", "def save_messages(messages):\n with open('messages.pkl', 'wb') as save_file:\n pickle.dump(messages, save_file)", "def on_undo(self, messages):\n assert all(message.name in (u\"dispersy-undo-own\", u\"dispersy-undo-other\") for message in messages)\n if __debug__:\n for message in messages:\n dprint(message.candidate, \" \", message.authentication.member.mid.encode(\"HEX\"), \" #\", message.distribution.sequence_number, \" @\", message.distribution.global_time)\n\n self._database.executemany(u\"UPDATE sync SET undone = ? WHERE community = ? AND member = ? AND global_time = ?\",\n ((message.packet_id, message.community.database_id, message.payload.member.database_id, message.payload.global_time) for message in messages))\n for meta, iterator in groupby(messages, key=lambda x: x.payload.packet.meta):\n sub_messages = list(iterator)\n meta.undo_callback([(message.payload.member, message.payload.global_time, message.payload.packet) for message in sub_messages])\n\n # notify that global times have changed\n # meta.community.update_sync_range(meta, [message.payload.global_time for message in sub_messages])\n\n # this might be a response to a dispersy-missing-sequence\n self.handle_missing_messages(messages, MissingSequenceCache)", "def process_messages(self, messages):\n\n return messages", "def _unremediate_email_o365_EWS(emails):\n assert emails\n assert all([len(e) == 2 for e in emails])\n\n result = [] # tuple(message_id, recipient, result_code, result_text)\n \n # get the hostname and port for our EWS proxy system\n # this system receives requests for remediation and restorations and submits them to EWS on our behalf\n ews_host = saq.CONFIG['remediation']['ews_host']\n ews_port = saq.CONFIG['remediation'].getint('ews_port')\n\n # the format of each request is a POST to\n # https://host:port/delete\n # with JSON as the POST data content\n \n # note that we make a separate request for each one\n url = 'https://{}:{}/restore'.format(saq.CONFIG['remediation']['ews_host'], saq.CONFIG['remediation']['ews_port'])\n session = requests.Session()\n data = { 'recipient': None, 'message_id': None }\n headers = { 'Content-Type': 'application/json' }\n \n for message_id, recipient in emails:\n\n try:\n if recipient.startswith('<'):\n recipient = recipient[1:]\n if recipient.endswith('>'):\n recipient = recipient[:-1]\n\n data['recipient'] = recipient\n data['message_id'] = message_id\n json_data = json.dumps(data)\n\n logging.info(\"restoring message_id {} to {}\".format(message_id, recipient))\n r = session.post(url, headers=headers, data=json_data, verify=False)\n logging.info(\"got result {} text {} for message_id {} to {}\".format(r.status_code, r.text, message_id, recipient))\n result.append((message_id, recipient, r.status_code, r.text))\n except Exception as e:\n error_message = 'unable to restore message_id {} to {}: {}'.format(message_id, recipient, str(e))\n logging.error(error_message)\n report_exception()\n result.append((message_id, recipient, 'N/A', str(e)))\n\n return result", "def restore(self):\n self._result.unparse_seq = self._unparse_seq", "def recover(self):\n if self._message_storage:\n for neighbor in self.neighbors:\n self.channel.queue_declare(queue=str(self.id) + str(neighbor))\n for message in self._message_storage:\n self.channel.basic_publish(\n exchange=\"\",\n routing_key=str(self.id) + str(neighbor),\n body=message,\n )\n\n for neighbor in self.neighbors:\n for _, _, body in self.channel.consume(\n queue=str(neighbor) + str(self.id), auto_ack=True, inactivity_timeout=5\n ):\n if body is not None:\n message = body.decode(\"utf-8\")\n if message != \"marker\":\n self.states.append(message)\n else:\n self.channel.cancel()", "def message_remove(request, undo=False):\n message_pks = request.POST.getlist('message_pks')\n redirect_to = request.REQUEST.get('next', False)\n\n if message_pks:\n # Check that all values are integers.\n valid_message_pk_list = set()\n for pk in message_pks:\n try: valid_pk = int(pk)\n except (TypeError, ValueError): pass\n else:\n valid_message_pk_list.add(valid_pk)\n\n # Delete all the messages, if they belong to the user.\n now = datetime.datetime.now()\n changed_message_list = set()\n for pk in valid_message_pk_list:\n message = get_object_or_404(Message, pk=pk)\n\n # Check if the user is the owner\n if message.sender == request.user:\n if undo:\n message.sender_deleted_at = None\n else:\n message.sender_deleted_at = now\n message.save()\n changed_message_list.add(message.pk)\n\n # Check if the user is a recipient of the message\n if request.user in message.recipients.all():\n mr = message.messagerecipient_set.get(user=request.user,\n message=message)\n if undo:\n mr.deleted_at = None\n else:\n mr.deleted_at = now\n mr.save()\n changed_message_list.add(message.pk)\n\n # Send messages\n if (len(changed_message_list) > 0):\n if undo:\n message = ungettext('Message is succesfully restored.',\n 'Messages are succesfully restored.',\n len(changed_message_list))\n else:\n message = ungettext('Message is successfully removed.',\n 'Messages are successfully removed.',\n len(changed_message_list))\n\n messages.success(request, message, fail_silently=True)\n\n if redirect_to: return redirect(redirect_to)\n else: return redirect(reverse('socialapps_messages_list'))", "def flushMsgs(self):\n\n self.queue = self.pre_queue[:]\n self.pre_queue = []", "def pop_messages(self):\n msge = self.received_messages\n self.received_messages = []\n return msge", "def clear_messages(self):\n self.redis_client.delete(self.message_list)", "def consolidate_messages(self, msg):", "def fetch_messages_from_imap(host, port, username, password):\n\n with imaplib.IMAP4(host, port=port) as client:\n client.starttls()\n client.login(username, password)\n client.select(\"INBOX\", readonly=False)\n\n client.create(\"Archives\")\n client.create(\"Archives/Crashreport\")\n\n sorted_reply = client.uid(\"SORT\", \"(DATE)\", \"UTF7\", \"ALL\")\n\n if not sorted_reply[0] == \"OK\":\n raise IMAPClientError()\n\n sorted_messages = sorted_reply[1][0].split()\n\n for msg_uid in sorted_messages:\n reply = client.uid(\"FETCH\", msg_uid, \"(RFC822)\")\n\n if reply[0] != \"OK\":\n raise IMAPClientError()\n\n message = email.message_from_bytes(reply[1][0][1])\n\n yield message\n\n # mark message as read and move to archives\n mark_read_reply = client.uid(\"STORE\", msg_uid, \"+FLAGS\", \"(\\\\Seen)\")\n if mark_read_reply[0] != \"OK\":\n raise IMAPClientError()\n\n # moving messages in IMAP unfortunately means copy and delete\n copy_reply = client.uid(\"COPY\", msg_uid, \"Archives/Crashreport\")\n if copy_reply[0] != \"OK\":\n raise IMAPClientError()\n\n delete_reply = client.uid(\"STORE\", msg_uid, \"+FLAGS\", \"(\\\\Deleted)\")\n if delete_reply[0] != \"OK\":\n raise IMAPClientError()\n\n # delete the message immediately\n client.expunge()", "def unlabel_messages(self, org, messages, label):\n pass", "def post(self):\n args = request.args or request.json\n if not args:\n args = {}\n services.file.restore_files(**args)\n return {\n \"status\": True\n }", "def _store(self, messages, response, remove_oldest=True, *args, **kwargs):\n unstored_messages = []\n serialized_messages = MessagePartSerializer().dumps(messages)\n encoded_data = self._encode_parts(serialized_messages)\n if self.max_cookie_size:\n # data is going to be stored eventually by SimpleCookie, which\n # adds its own overhead, which we must account for.\n cookie = SimpleCookie() # create outside the loop\n\n def is_too_large_for_cookie(data):\n return data and len(cookie.value_encode(data)[1]) > self.max_cookie_size\n\n def compute_msg(some_serialized_msg):\n return self._encode_parts(\n some_serialized_msg + [self.not_finished_json],\n encode_empty=True,\n )\n\n if is_too_large_for_cookie(encoded_data):\n if remove_oldest:\n idx = bisect_keep_right(\n serialized_messages,\n fn=lambda m: is_too_large_for_cookie(compute_msg(m)),\n )\n unstored_messages = messages[:idx]\n encoded_data = compute_msg(serialized_messages[idx:])\n else:\n idx = bisect_keep_left(\n serialized_messages,\n fn=lambda m: is_too_large_for_cookie(compute_msg(m)),\n )\n unstored_messages = messages[idx:]\n encoded_data = compute_msg(serialized_messages[:idx])\n\n self._update_cookie(encoded_data, response)\n return unstored_messages", "def purge(self, message_list, action, userId='me'):\n\n count = 0\n for item in message_list:\n if action.lower() == 'archive':\n resource = getattr(self.connection.users().messages(), 'modify')\n dynamic_request = resource(userId=userId, id=message_list[item], body=\n {\n \"removeLabelIds\": [ \"INBOX\" ]\n })\n else:\n resource = getattr(self.connection.users().messages(), action)\n dynamic_request = resource(userId=userId, id=message_list[item])\n\n try:\n response = dynamic_request.execute()\n count += 1\n print(f'[√] Action: {action} - {count} of {len(message_list)} - Message ID: {message_list[item]}')\n except googleapiclient.errors.HttpError as error:\n if error.resp.status == 404:\n print(f'[X] Error: ID {message_list[item]} Not Found')\n else:\n print(f'[X] Error: ID {mesage_list[item]} {error}')\n count -= 1\n print(f'[√] Processed: {count} of {len(message_list)} Messages')\n return True", "async def deliver(self, messages: EmailMessage | Iterable[EmailMessage]) -> None:", "def expunge(self):\n delete = []\n for i in self.messages:\n if '\\\\Deleted' in i[1]:\n delete.append(i)\n for i in delete:\n self.messages.remove(i)\n return [i[3] for i in delete]" ]
[ "0.77775854", "0.65339184", "0.6420946", "0.62216944", "0.5984249", "0.5519885", "0.54908043", "0.54738826", "0.54212093", "0.54081935", "0.53895754", "0.53652024", "0.533392", "0.53127706", "0.5307849", "0.5288503", "0.5266448", "0.5256292", "0.5251975", "0.5245401", "0.5212375", "0.5173845", "0.5144381", "0.5114913", "0.510804", "0.51033354", "0.50713295", "0.50605255", "0.5035411", "0.50350046" ]
0.7322216
1
Flags the given messages
def flag_messages(self, org, messages): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unflag_messages(self, org, messages):\n pass", "def mark_messages_flagged(self, mbox, msgset):\n self._add_flag(mbox, msgset, r'(\\Flagged)')", "def test_filter_messages_non_message(self):\n pass", "def test_flag(self):\n recipient = self.create_user()\n thread = self.create_thread(recipient=recipient)\n message = thread.first_message\n self.assertEqual(message.status, 'approved')\n message.flag(recipient)\n self.assertEqual(message.flags.count(), 1)\n self.assertEqual(message.status, 'flagged')", "def consolidate_messages(self, msg):", "def set_append_messages(self, flag):\n\t\tself._appendMessages = bool(flag)", "def test_filter_messages(self):\n pass", "def comsume_msg(self, msg_type):", "def manage_messages(_) -> int:\n return 1 << 13", "def manage_messages(_) -> int:\n return 1 << 13", "def on_process_message(self, msg):\n # 过滤标点符号\n msg.text = re.sub(self.RE_PUNCTUATION, '', msg.text)\n\n # deny\n deny = self.RE_DENEY.findall(msg.text)\n if len(deny) != 0:\n msg.intent = 'deny'\n \n # confirm\n confirm = self.RE_CONFIRM.findall(msg.text)\n if len(confirm) != 0:\n msg.intent = 'confirm'", "def set_append_messages(self, flag):\n\t\tself.checkAppendMessages.set_active(flag)", "def __call__(self, *args, **kwargs):\n return self._enabled, self.message", "def list_messages(self):", "def collect_allowed(message):\n return True", "def mark_messages_unflagged(self, mbox, msgset):\n self._remove_flag(mbox, msgset, r'(\\Flagged)')", "def patchMessages():\n import OnlineEnv as Online\n app=Gaudi.ApplicationMgr()\n Configs.AuditorSvc().Auditors = []\n app.MessageSvcType = 'LHCb::FmcMessageSvc'\n if Gaudi.allConfigurables.has_key('MessageSvc'):\n del Gaudi.allConfigurables['MessageSvc']\n msg = Configs.LHCb__FmcMessageSvc('MessageSvc')\n msg.fifoPath = os.environ['LOGFIFO']\n msg.LoggerOnly = True\n msg.doPrintAlways = False\n# msg.OutputLevel = MSG_WARNING\n# msg.OutputLevel = Online.OutputLevel\n msg.OutputLevel = MSG_INFO", "def process_messages(self):\n for each_message in self.unprocessed_messages:\n if not ( 'message_type' in each_message):\n logging.error(\"(%s:%d) invalid message found...ignoring the message\",\\\n self.ip, self.port)\n else:\n if ( each_message['message_type'] is 'unchoke'):\n self.is_choking = 0\n elif ( each_message['message_type'] is 'choke'):\n self.is_choking = 1\n elif ( each_message['message_type'] is 'interested'):\n self.is_interested = 1\n elif ( each_message['message_type'] is 'not interested'):\n self.is_interested = 0\n elif ( each_message['message_type'] is 'have'):\n self.pieces.append(each_message['piece_index'])\n elif ( each_message['message_type'] is 'bitfield'):\n bitfield = each_message['bitfield']\n for index, each_bit in enumerate(bitfield):\n if ( each_bit is '1'):\n self.pieces.append(index)", "def flags(self) -> UserFlag:", "def process_messages(self):\n pass", "def take_action_on_flags(self, *args, **kwargs):\r\n pass", "def _apply_msg_filter(self,message):\n \n for h in self._message_handlers:\n if h.filter(message):\n h.handler(message)\n break", "def _add_flag(self, mbox, msgset, flag):\n self.select_mailbox(mbox, False)\n self._cmd(\"STORE\", msgset, \"+FLAGS\", flag)", "def check_message(self, msg):\n pass", "def send_messages(_) -> int:\n return 1 << 11", "def send_messages(_) -> int:\n return 1 << 11", "def astral(msg):\r\n return any(ord(c) > 0xFFFF for c in msg)", "def _listen_(self, msg):\n for word in msg.msg.split(' '):\n if word[:-2].lower() == msg.user.name.lower():\n return msg.prefix + \\\n _('Don\\'t even think of modifying your own karma.')\n else:\n if word[-2:] == '++':\n self._xcrease(True, word[:-2].lower())\n elif word[-2:] == '--':\n self._xcrease(False, word[:-2].lower())", "def label_messages(self, org, messages, label):\n pass", "def handleMessage(msg):" ]
[ "0.62548643", "0.60290325", "0.59306383", "0.59045357", "0.5755508", "0.57112575", "0.5688638", "0.5659094", "0.5653459", "0.5653459", "0.56464344", "0.5549997", "0.5515646", "0.55129987", "0.5499277", "0.5494998", "0.54843855", "0.548197", "0.5480137", "0.5478573", "0.5472908", "0.5447711", "0.5431738", "0.54144037", "0.54074115", "0.54074115", "0.5378595", "0.5372335", "0.53626335", "0.53501296" ]
0.79243565
0
Unflags the given messages
def unflag_messages(self, org, messages): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mark_messages_unflagged(self, mbox, msgset):\n self._remove_flag(mbox, msgset, r'(\\Flagged)')", "def unlabel_messages(self, org, messages, label):\n pass", "def unarchive_messages(self, messages):\n self._post('message_actions', None, self._build_params(messages=messages, action='unarchive'))", "def _remove_flag(self, mbox, msgset, flag):\n self.select_mailbox(mbox, False)\n self._cmd(\"STORE\", msgset, \"-FLAGS\", flag)", "def unlabel_messages(self, messages, label=None, label_uuid=None):\n payload = self._build_params(messages=messages, action='unlabel', label=label, label_uuid=label_uuid)\n self._post('message_actions', None, payload)", "def resetFlags():\r\n for flag in flags:\r\n flags[flag] = False", "def flag_messages(self, org, messages):\n pass", "def restore_messages(self, org, messages):\n pass", "def unmask_all(self):\n self.Active.mask = False\n self.Confirmed.mask = False\n self.Deaths.mask = False\n self.NewDeaths.mask = False\n self.NewCases.mask = False", "def test_removeFlags(self):\n self._flagsTest('removeFlags', b'-FLAGS')", "def mark_messages_unread(self, mbox, msgset):\n self._remove_flag(mbox, msgset, r'(\\Seen)')", "def clear_flags(self):\n self.flags.clear()", "def unset(self, *options: str) -> int:\n self.flags &= ~(self.mask(*options))\n return self.flags", "def flags_decomposer(flags):\n l = []\n if flags & 2 ** 0:\n l.append(\"superscript\")\n if flags & 2 ** 1:\n l.append(\"italic\")\n if flags & 2 ** 2:\n l.append(\"serifed\")\n else:\n l.append(\"sans\")\n if flags & 2 ** 3:\n l.append(\"monospaced\")\n else:\n l.append(\"proportional\")\n if flags & 2 ** 4:\n l.append(\"bold\")\n return \", \".join(l)", "def deletemessageslabels(self, uidlist, labels):\n\n labels = labels - self.ignorelabels\n result = self._messagelabels_aux('-X-GM-LABELS', uidlist, labels)\n if result:\n for uid in uidlist:\n self.messagelist[uid]['labels'] = self.messagelist[uid]['labels'] - labels", "def suppressMessages():\n dislin.unit(0)", "def decode_flags(flags):\n if isinstance(flags, six.string_types):\n flags = int(flags.lstrip('@'), 16)\n return [name for i, name in enumerate(flag_names) if (1 << i) & flags]", "async def clrreact(ctx, msg: discord.Message, *args: discord.Member):\n users = args\n if (not users):\n await msg.clear_reactions()\n await ctx.send(\"Cleared all reactions on message.\")\n else:\n for u in users:\n for r in msg.reactions:\n await r.remove(u)\n await ctx.send(f\"Cleared reactions on message from {len(users)} user(s).\")", "def decompose(msg):\n rtn = {}\n\n if Dock.is_dock_msg(msg):\n rtn = {\n Dock.RED_BUOY: bool(msg & Dock.RED_BUOY),\n Dock.GREEN_BUOY: bool(msg & Dock.GREEN_BUOY),\n Dock.FORCE_FIELD: bool(msg & Dock.FORCE_FIELD)\n }\n\n return rtn", "def test_removeFlagsSilentlyWithUnsolicitedData(self):\n self._flagsSilentlyWithUnsolicitedDataTest('removeFlags', b'-FLAGS.SILENT')", "def setDiscardFlags(self, flags):\r\n self.__data.discardFlags = flags", "def clean(self, uid, states=None):\n\n # doesn't change status", "def unapproved(message):\n hf.query_users(message, hf.get_users(), \"unapproved\")", "def user_iflags_erase(*args):\n return _ida_hexrays.user_iflags_erase(*args)", "def _deconstruct_messages(snuba_messages):\n return [\n (json.loads(msg.payload.value.decode(\"utf-8\")), msg.payload.headers)\n for msg in snuba_messages\n ]", "def check_clear_flags(self):\n self._command(self.commands[\"CLEAR_ERROR_FLAGS\"])\n self._command(self.commands[\"CLEAR_REBOOTED_FLAG\"])", "def unsetMessage(self):\n return _libsbml.Constraint_unsetMessage(self)", "def clear_messages(self):\n with self.message_lock:\n self.messages = self.messages[self._processed_messages:]\n self._processed_messages = 0", "def unlex(tokens):", "def expunge(self):\n delete = []\n for i in self.messages:\n if '\\\\Deleted' in i[1]:\n delete.append(i)\n for i in delete:\n self.messages.remove(i)\n return [i[3] for i in delete]" ]
[ "0.68880147", "0.67857444", "0.6343915", "0.6263609", "0.6074593", "0.59990966", "0.58899224", "0.5773783", "0.5693331", "0.56825656", "0.56801707", "0.5496917", "0.5395119", "0.532583", "0.5256164", "0.5232614", "0.5206068", "0.5148679", "0.51203686", "0.5083705", "0.5058738", "0.50416607", "0.5037007", "0.50269645", "0.50175226", "0.5017091", "0.4997816", "0.49847946", "0.49645272", "0.4949473" ]
0.8338598
0
Fetches a contact's incoming and outgoing messages to display on a case timeline
def fetch_contact_messages(self, org, contact, created_after, created_before): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receiveContactList(self, contactList):", "def contacts(request):\n User = get_user_model()\n ids = set(request.user.chatmessage_set.all().values_list(\"recipients\", flat=True))\n context = {\n 'contacts': User.objects.filter(pk__in=ids)\n }\n return render(request, \"chat/contacts.html\", context)", "def test_get_contact_msgs(self):\n\n contact_msgs = sentiment_analysis.get_contacts_msgs(7)\n \n self.assertEqual(contact_msgs[14][0].message_text, u'This is an english test message')", "def load_messages():\n # rows = db(db.bds.author == auth.user_id).select()\n rows = db(db.contacts).select()\n d = {r.contact_id: {'name': r.name}\n for r in rows}\n return response.json(dict(board_dict=d))", "def get_contacts_list(self):\n contacts = self.driver.find_elements_by_class_name(\"_1wjpf\")\n s= [contact.text for contact in contacts] #extracts chats and last messsages\n print (\"get contacts: \"+str(s)) #print only chat names\n return s[::2] #returns only chat names", "def get_contact_interactions(request, pk):\n try:\n contact = Contact.objects.get(pk=pk)\n except Contact.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n ans = []\n interactions = contact.interaction_set.all()\n for interaction in interactions:\n ans.append(InteractionSerializer(interaction).data)\n return Response(ans)", "def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def contact(self, request, **kwargs):\n group_obj = self.get_object()\n contact_data = group_obj.contacts.all()\n if contact_data is not None:\n serializer_data = ContactSerializer(contact_data, many=True)\n return Response(serializer_data.data)\n else:\n return Response({'message': 'No details found for contact of this group'}, status=status.HTTP_404_NOT_FOUND)", "def contact_profile(request, key):\n\n ret = {}\n\n ret['number'] = key.split('@')[0][2:]\n ret['jid'] = key\n ret['activity'] = get_activity_data(key)\n ret['whatsapp'] = 0\n\n ret['messages'] = Messages.objects.using('msgstore').filter(key_remote_jid=key).count()\n tstamp = Messages.objects.using('msgstore').filter(key_remote_jid=key).values('timestamp').order_by('timestamp')[0:1][0]['timestamp']\n ret['first_seen'] = timestamp2utc(float(tstamp) / 1000)\n tstamp = Messages.objects.using('msgstore').filter(key_remote_jid=key).values('timestamp').order_by('-timestamp')[0:1][0]['timestamp']\n ret['last_seen'] = timestamp2utc(float(tstamp) / 1000)\n ret['media_messages'] = Messages.objects.using('msgstore').filter(key_remote_jid=key).exclude(media_url__isnull=True).count()\n ret['gps_messages'] = Messages.objects.using('msgstore').filter(key_remote_jid=key).exclude((Q(longitude='0.0') | Q(latitude='0.0'))).count()\n\n # no wa_contacts table available\n if not 'wa_contacts' in connection.introspection.table_names():\n ret['name'] = 'Not in contacts'\n ret['status'] = 'N/A'\n if ret['messages'] > 0:\n ret['whatsapp'] = 1\n else:\n ret['name'] = WaContacts.objects.filter(jid=key).values('display_name')[0]['display_name']\n ret['whatsapp'] = WaContacts.objects.filter(jid=key).values('is_whatsapp_user')[0]['is_whatsapp_user']\n ret['status'] = WaContacts.objects.filter(jid=key).values('status')[0]['status']\n\n # if it's a group, get participants information\n peers = None\n if '-' in key:\n peers = []\n aux = Messages.objects.using('msgstore').filter(key_remote_jid=key).exclude(Q(remote_resource = '')).values('remote_resource').distinct()\n for peer in aux:\n peer = peer['remote_resource']\n if not 'wa_contacts' in connection.introspection.table_names():\n name = peer\n else:\n name = WaContacts.objects.filter(jid=peer).values('display_name')[0]\n \n count = Messages.objects.using('msgstore').filter(Q(key_remote_jid=key) & Q(remote_resource = peer)).count()\n \n peers.append({'id': peer , 'name': name , 'count': count})\n\n return render_to_response('whatsapp/profile.html', {'contact': ret, 'activity': ret['activity'] , 'peers': peers }, context_instance=RequestContext(request))", "def get_conversation(request):\n collected_values = {}\n\n # Only allow GET requests for this endpoint\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n # Extract and form params\n uid = request.GET['uid']\n oid = request.GET['oid']\n token = request.GET['token']\n ts_query = request.GET['ts']\n time_user_seen = request.GET.get('tus')\n limit = int(request.GET['limit'])\n\n if ts_query == \"\":\n ts_query = timezone.now()\n\n change_user_seen = False\n if time_user_seen == \"true\":\n change_user_seen = True\n\n # Check if token is valid\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n # Collect all messages sent by two users in question listed by created at time\n message_query_set = Messages.objects.filter(\n Q(user_id=uid, other_id=oid) |\n Q(other_id=uid, user_id=oid)).order_by('-created_at')[:limit]\n\n # Collect all messages from query\n test_list = []\n for message in message_query_set:\n if change_user_seen:\n message.time_user_seen = timezone.now()\n message.save()\n test_list.append(message.get_map())\n\n # Collect return values\n collected_values[\"messages\"] = test_list\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Get Conversation Result: %s\", collected_values)\n return JsonResponse(collected_values, status=200)", "def test_get_contact_objects(self):\n\n contacts = MessageController.get_contact_objects(['2'])\n self.assertEqual(contacts[0].contact_first_name, 'Contact2')\n self.assertEqual(contacts[0].contact_phone, '4153417706')\n self.assertEqual(contacts[0].user_id, 1)\n self.assertEqual(contacts[0].lang_id, 1)", "def list_messages(self):", "def fetch(self, start, stop=None, mbox=None):\n self.select_mailbox(mbox, False)\n if start and stop:\n submessages = self.messages[start - 1:stop]\n mrange = \",\".join(submessages)\n else:\n submessages = [start]\n mrange = start\n headers = \"DATE FROM TO CC SUBJECT\"\n query = (\n \"(FLAGS BODYSTRUCTURE RFC822.SIZE BODY.PEEK[HEADER.FIELDS ({})])\"\n .format(headers)\n )\n data = self._cmd(\"FETCH\", mrange, query)\n result = []\n for uid in submessages:\n msg_data = data[int(uid)]\n msg = email.message_from_string(\n msg_data[\"BODY[HEADER.FIELDS ({})]\".format(headers)]\n )\n msg[\"imapid\"] = uid\n msg[\"size\"] = msg_data[\"RFC822.SIZE\"]\n if r\"\\Seen\" not in msg_data[\"FLAGS\"]:\n msg[\"style\"] = \"unseen\"\n if r\"\\Answered\" in msg_data[\"FLAGS\"]:\n msg[\"answered\"] = True\n if r\"$Forwarded\" in msg_data[\"FLAGS\"]:\n msg[\"forwarded\"] = True\n if r\"\\Flagged\" in msg_data[\"FLAGS\"]:\n msg[\"flagged\"] = True\n bstruct = BodyStructure(msg_data[\"BODYSTRUCTURE\"])\n if bstruct.has_attachments():\n msg[\"attachments\"] = True\n result += [msg]\n return result", "async def messages(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"messages\")", "def handle_messages(self):\n\n #Get the time at which the code started running\n current_time = datetime.datetime.now()\n\n #get all messages between now and the time where a message was last received\n messages = self.client.messages.list(\n date_sent_before = datetime.datetime.now()+ datetime.timedelta(hours = TIMEDIFFERENCE),\n date_sent_after = self.last_message_timing + datetime.timedelta(hours = TIMEDIFFERENCE)\n )\n\n #Iterate through all the new messages\n for record in messages:\n #If it is not from the Twilio Client\n if record.from_ != 'whatsapp:+14155238886':\n #Then update the timing of the last message to the current time\n self.last_message_timing = current_time\n #If the message sent is the '?' that seeks to get the number\n #of people in the queue\n if record.body == '?':\n #Get the data about people from firebase\n people_data = self.firebase.get_data('people_count')\n #Get the number of people queueing\n no_of_people = people_data['people_count']\n #Create a message from the API to tell the person\n #asking the number of people in the queue\n message = self.client.messages.create(\n body='The number of the people in the queue is {}'.format(no_of_people),\n from_='whatsapp:{sender_number}'.format(**self.config),\n to=record.from_\n )", "def contact_info(self, sensitive=True):\n account_id = self.account_id()\n retry_count = 5\n\n req_url = self.get(\"/accounts/{}/contacts\".format(account_id))['ResultUrl']\n resp = self.get(req_url)\n tries = 0\n while 'Contacts' not in resp and tries < retry_count:\n resp = self.get(req_url)\n tries += 1\n time.sleep(1)\n contacts = resp['Contacts']\n\n contact_data = list()\n for contact in contacts:\n row_data = {\n 'ContactId': contact['Id'],\n 'Email': \"*****@****.***\" if sensitive else contact['Email'],\n 'FirstName': \"*****\" if sensitive else contact['FirstName'],\n 'LastName': \"*****\" if sensitive else contact['LastName'],\n 'Status': contact.get('Status'),\n 'MembeshipEnabled': contact.get('MembershipEnabled'),\n 'TermsOfUseAccepted': contact['TermsOfUseAccepted'],\n }\n\n if 'MembershipLevel' in contact:\n row_data['MembershipLevel'] = contact['MembershipLevel']['Name']\n\n # Map all field values into a dict for convenience\n field_values = {val['FieldName']: val['Value']\n for val in contact['FieldValues']}\n\n # Get list of authorizations\n if 'Managed Authorizations' in field_values:\n authorizations = [i['Label']\n for i in field_values['Managed Authorizations']]\n row_data['Authorizations'] = authorizations\n\n contact_data.append(row_data)\n self.__contact_df = pd.DataFrame(contact_data).set_index('ContactId')\n return self.__contact_df", "def view_contact_chat(self):\n if self._user.chats == {}:\n print(\"No chats to be viewed yet\")\n self.homepage()\n \n print('-=' * 30)\n chats = self._user.list_chats()\n user_choice = self._int_input_in_range(\"Pick whose contact chat to be viewed: \"\n ,range_ = (1, len(chats)))\n if not user_choice:\n return self.homepage()\n \n chat, contact = chats[user_choice - 1]\n chat_content = chat.get_content(self._user)\n print('-=' * 12 + \" Chat Window \" + '-=' * 12)\n if chat_content != []:\n for line in chat_content:\n print(line.rstrip()) \n else:\n print('This chat is empty, send your first msg now')\n \n user_choice = self._int_input_in_range(' (1) Send new msg \\n (2) Back to homepage \\n Your choice: '\n , range_ = (1,2))\n if user_choice == 1:\n print('HINT: send (0) to exist the chat window')\n return self._send_msg(contact)\n else:\n return self.homepage()", "def view_contacts(self):\n with open(self.filename, \"r\") as contactsFile:\n contacts = self.display_contact(contactsFile.readlines())\n\n if not contacts:\n return self.msgbox(\"No contacts found.\")\n\n self.msgbox(msg=\"\\n\".join(contacts), title=\"Showing All Contacts\")", "def archive_contact_messages(self, org, contact):\n pass", "def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result", "def get_messages_from_cursor(self):\n\n def get_msg(r):\n msg = dict(\n id=r[0],\n datetime=r[1],\n text=r[2],\n sender=r[3],\n media=r[4],\n **json.loads(r[5]),\n )\n if len(r) > 6:\n msg['dialog'] = r[6]\n for field in DATETIME_FIELDS:\n if field not in msg:\n continue\n tz_field = msg[field]\n if isinstance(tz_field, str):\n msg[field] = parse_time(tz_field)\n return {k: v for k, v in msg.items() if v} # get rid of Falsey\n\n return {\n r[0]: get_msg(r)\n for r in self.cur.fetchall()\n }", "def get_messages(self):\n other_user_email = request.args.get('other_user_email')\n page = request.args.get('page')\n per_page = request.args.get('per_page')\n if not other_user_email or not page or not per_page:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"query params\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"query params\", 400\n email_token = auth.current_user()[0]\n page = int(page)\n per_page = int(per_page)\n # App sends starting with 1 but we start at 0\n page -= 1\n try:\n message_list, pages = self.friend_database.get_conversation(email_token, other_user_email, per_page, page)\n except NoMoreMessagesError:\n self.logger.debug(messages.NO_MORE_PAGES_ERROR)\n return messages.NO_MORE_PAGES_ERROR, 404\n message_list = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in message_list]\n for i in range(len(message_list)):\n message_list[i][\"timestamp\"] = message_list[i][\"timestamp\"].isoformat()\n return json.dumps({\"messages\": message_list, \"pages\": pages}), 200", "def import_(cls, contact):\n assert not isinstance(message, PhoneMessage)\n ret = PhoneMessage(peer=message.peer,text=message.text,timestamp=message.timestamp,direction=message.direction,status=message.status)\n yield ret", "def import_(cls, contact):\n assert not isinstance(message, PhoneMessage)\n ret = PhoneMessage(peer=message.peer,text=message.text,timestamp=message.timestamp,direction=message.direction,status=message.status)\n yield ret", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def GetMessages(self, limit = -1, since = -1, offset = -1):\n\n if (limit < 1):\n limit = self.limit\n\n url = self.__BuildGetUrl(\"directed_messages\", self.userName,\n limit, since, offset)\n return self.__GetJson(url, True)", "def getsms(self):\n\n # We could call voice.sms() directly, but I found this does a rather\n # inefficient parse of things which pegs a CPU core and takes ~50 CPU\n # seconds, while this takes no time at all.\n data = self.gv.sms.datafunc()\n data = re.search(r'<html><\\!\\[CDATA\\[([^\\]]*)', data, re.DOTALL).groups()[0]\n\n divs = SoupStrainer('div')\n tree = BeautifulSoup(data, parseOnlyThese=divs)\n\n # We need to know who to send texts to, as that information is\n # not included with each message.\n msgtype = str(tree.find(\"span\", attrs={\"class\": \"gc-message-type\"}))\n m = re.search('\\((\\d{3})\\) (\\d{3})-(\\d{4})', msgtype)\n self.to_phone = ''.join(m.groups())\n\n smses = [] \n # we only want the first conversation\n conversation = tree.find(\"div\", attrs={\"id\" : True},recursive=False)\n msgs = conversation.findAll(attrs={\"class\" : \"gc-message-sms-row\"})\n for row in msgs:\n msgitem = {\"id\" : conversation[\"id\"]} \n spans = row.findAll(\"span\", attrs={\"class\" : True}, recursive=False)\n for span in spans :\n cl = span[\"class\"].replace('gc-message-sms-', '')\n msgitem[cl] = (\" \".join(span.findAll(text=True))).strip()\n if msgitem[\"text\"]:\n msgitem[\"text\"] = BeautifulStoneSoup(msgitem[\"text\"],\n convertEntities=BeautifulStoneSoup.HTML_ENTITIES\n ).contents[0]\n smses.append(msgitem)\n \n # Now that we have the SMSes, we can add their text and render them.\n self.curses_lock.acquire()\n\n # If smses is shorter than history, we started a new thread, so clear the\n # history.\n if len(smses) < len(self.history):\n self.history = []\n self.chatscreen.clear()\n\n def sublist_index(haystack, needle):\n \"\"\" Find the starting index of a sublist in a list. Premature\n optimization is the root of all evil. The empty list is a sublist of\n every point in a list. \"\"\"\n try:\n for i in xrange(len(haystack)):\n if haystack[i:i+len(needle)] == needle:\n return i\n except IndexError:\n pass\n raise ValueError\n\n # only print new messages\n try:\n msgs = map(lambda m: m['from']+' '+m['text'], smses)\n idx = sublist_index(msgs, self.history)\n smses = smses[idx + len(self.history):]\n except ValueError:\n # if we didn't find anything, then print everything\n pass\n\n for sms in smses:\n name = sms[\"from\"][:-1]\n if name != 'Me':\n self.to_name = name\n # if we're adding a message that's not from me, beep\n curses.beep()\n self.message(name, sms[\"text\"])\n\n self.curses_lock.release()", "def get_history_contact(contact):\n BODY = {\n \"query\": {\n \"match\": {\n \"CONTACT\": contact\n }\n }\n }\n total_result = {}\n all_indexes = es.indices.get_alias(\"*\")\n for index in all_indexes.keys():\n result = es.search(index = index, body = BODY)\n if result[\"hits\"][\"hits\"]:\n total_result[index] = [item[\"_source\"] for item in result[\"hits\"][\"hits\"]]\n return jsonify(total_result)" ]
[ "0.6432034", "0.62822866", "0.5982838", "0.59174776", "0.58405066", "0.5831497", "0.5693382", "0.5687555", "0.5653335", "0.5652404", "0.5585191", "0.55604875", "0.55467224", "0.55259675", "0.5523025", "0.55170584", "0.5491223", "0.54852605", "0.5484155", "0.5483405", "0.5471252", "0.5460576", "0.54537314", "0.54537314", "0.5439263", "0.5439263", "0.5439263", "0.54129493", "0.5395846", "0.5379344" ]
0.77379125
0
Set items. Does not have to be strings. If items is a dictionary the string of the values are show to the user and the key is returned from GetAny If it is just an iterable the string of the values are shown and the raw equivalent input is returned.
def SetItems(self, items: Union[Iterable, dict]): if not items: return if isinstance(items, dict): items = [[key, str(value)] for key, value in items.items()] if self._sorted: items = sorted(items, key=lambda x: x[1]) self._items = [key for key, _ in items] super().SetItems([value for _, value in items]) else: if self._sorted: self._items = tuple(sorted(items)) else: self._items = tuple(items) super().SetItems([str(v) for v in self._items]) self.SetSelection(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put_in(self, items):\n try:\n if items[0] not in self.items:\n print(\"you don't have a \" + str(items[0]))\n return self\n if items[2] not in self.items:\n print(\"you don't have a \" + str(items[1]))\n return self\n except IndexError:\n print('put ' + str(items[0]) + ' where')\n except TypeError:\n print('you don\\'t have anything')\n return self\n # implement", "def inputAny(name,value,itemtype,**options):\n #print name,value,itemtype,options\n\n try:\n f = InputItems[itemtype]\n except:\n f = InputString # default convert to string\n return f(name,value,**options)", "def simpleInputItem(name,value=None,itemtype=None,**kargs):\n kargs['name'] = name\n if value is not None:\n kargs['value'] = value\n if itemtype is not None:\n kargs['itemtype'] = itemtype\n return kargs", "def setitems(self, items):\n self.clear()\n # FIXME: this allows you to pass in an OrderedDict as well :-)\n self.update(items)", "def get_item_input(self, varname, collection):\n item = self.get_ascii_input(varname)\n if item not in collection:\n raise MKUserError(varname, _(\"The requested item %s does not exist\") % item)\n return collection[item], item", "def __setitem__(self, keys, item):\n keys = self._yield_keys(keys)\n try:\n validated_value = self._validate_values(item)\n except TypeConversionError as err:\n raise TypeConversionError(\"For types {}, error {}.\".format(\n list(keys), str(err)))\n for key in keys:\n self._single_setitem(key, validated_value)", "def __getitem__(self, item):\n result = self._get_raw_input()[item]\n return result[0] if isinstance(result, list) else result", "def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {('physical_location', 'ANL'), ('has_physical_specimen', True),\n ('has_extracted_data', True), ('sample_type', 'ENVO:soil'),\n ('required_sample_info_status', 'completed'),\n ('collection_timestamp', datetime(2011, 11, 11, 13, 00, 00)),\n ('host_subject_id', '1001:M7'),\n ('description', 'Cannabis Soil Microbiome'),\n ('season_environment', 'winter'), ('assigned_from_geo', 'n'),\n ('texture', '64.6 sand, 17.6 silt, 17.8 clay'),\n ('taxon_id', '1118232'), ('depth', 0.15),\n ('host_taxid', '3483'), ('common_name', 'root metagenome'),\n ('water_content_soil', 0.164), ('elevation', 114), ('temp', 15),\n ('tot_nitro', 1.41), ('samp_salinity', 7.15), ('altitude', 0),\n ('env_biome',\n 'ENVO:Temperate grasslands, savannas, and shrubland biome'),\n ('country', 'GAZ:United States of America'), ('ph', 6.94),\n ('anonymized_name', 'SKB8'), ('tot_org_carb', 5),\n ('description_duplicate', 'Burmese root'),\n ('env_feature', 'ENVO:plant-associated habitat'),\n ('latitude', 74.0894932572),\n ('longitude', 65.3283470202)}\n self.assertEqual(set(obs), exp)", "def print_items(items): \n print(items)", "def update(self, items: Mapping[Any, Any]) -> None:\n self.extend(list(items.values()))\n return", "def mask(self, item_or_items: Union[str, list]) -> None:\n if isinstance(item_or_items, str):\n self._masked_items.add(item_or_items)\n elif isinstance(item_or_items, list):\n for item in item_or_items:\n assert isinstance(item, str)\n self._masked_items.add(item)", "def multi_set(self, items, no_update_log=False):\n opts = (no_update_log and TyrantProtocol.RDBMONOULOG or 0)\n lst = []\n for k, v in items.iteritems():\n if isinstance(v, (dict)):\n new_v = []\n for kk, vv in v.items():\n new_v.append(kk)\n new_v.append(vv)\n v = new_v\n if isinstance(v, (list, tuple)):\n assert self.separator, \"Separator is not set\"\n\n v = self.separator.join(v)\n lst.extend((k, v))\n\n wait(self.proto.misc(\"putlist\", lst, opts))", "def __getitem__(self, items):\n return list(self._words())[items]", "def inputAnyOld(item,parent=None):\n name,value = item[:2]\n \n if type(item[-1]) == dict:\n # we have options\n options = item[-1]\n item = item[:-1]\n else:\n options = {}\n\n if len(item) > 2 and type(item[2]) == str:\n itemtype = item[2]\n else:\n # No item specified: guess from value or from available options\n if 'choices' in options:\n itemtype = 'select'\n else:\n itemtype = type(value)\n\n if itemtype == int:\n if len(item) > 3 and type(item[3] != dict):\n options['min'] = int(item[3])\n if len(item) > 4:\n options['max'] = int(item[4])\n\n elif itemtype == float:\n if len(item) > 3 and type(item[3] != dict):\n options['min'] = int(item[3])\n if len(item) > 4:\n options['max'] = int(item[4])\n if len(item) > 5:\n options['dec'] = int(item[5])\n\n elif itemtype == 'select' :\n if len(item) > 3:\n options['choices'] = item[3]\n\n elif itemtype in ['radio','hradio','vradio']:\n if len(item) > 3:\n options['choices'] = item[3]\n options['direction'] = itemtype[0]\n\n elif itemtype in ['push','hpush','vpush']:\n if len(item) > 3:\n options['choices'] = item[3]\n options['direction'] = itemtype[0]\n\n if parent is not None:\n options['parent'] = parent\n\n return inputAny(name,value,itemtype,**options)", "def setItems(self, items):\n self._nsObject.removeAllItems()\n for item in items:\n if isinstance(item, NSMenuItem):\n menu = self._nsObject.menu()\n menu.addItem_(item)\n else:\n self._nsObject.addItemWithTitle_(item)", "def compatInputItem(name,value,itemtype=None,kargs={}):\n # Create a new dict item!\n # We cannot change kargs directly like in simpleInputItem,\n # that would permanently change the value of the empty dict!\n item = {}\n if isinstance(itemtype,dict):\n # in case the itemtype was missing\n kargs = itemtype\n itemtype = None\n item.update(kargs)\n item['name'] = name\n item['value'] = value\n item['itemtype'] = itemtype\n return item", "def set(self, item, value):\r\n raise NotImplementedError", "def print_all_items_in_dict(all_items):\n if config.output.csv:\n print_all_items_in_dict_for_csv(all_items)\n else:\n print_all_items_in_dict_for_human(all_items)", "def set_all_item_field_value(**kwargs):\n\n # Filters\n filters = {\n 'disabled': 0,\n 'name': ('like', '%{0}%'.format(kwargs['keyword']))\n }\n\n # Get all Item fields based from the filters\n items = frappe.get_all('Item', filters=filters, fields=['name'])\n\n # Counters\n cur_index = 1\n max_index = len(items)\n\n print \"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\"\n print \"Setting all Item {0} field to the value {1}.\".format(kwargs['field'], kwargs['value'])\n print \"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\"\n\n for item in items:\n print \"Processing item {0}/{1}...\".format(cur_index, max_index)\n\n frappe.db.set_value('Item', item.name, kwargs['field'], kwargs['value'])\n\n cur_index = cur_index + 1\n\n print \"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\"\n print \"Done setting {0} items.\".format(max_index)\n print \"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\"", "def test_values_single(self):\n input_item = self.item_class(name=\"foo\")\n il = ItemLoader(item=input_item)\n self.assertEqual(il._values.get(\"name\"), [\"foo\"])", "def convertInputItem(data):\n if isinstance(data,dict):\n return data\n elif type(data) in [list,tuple]:\n try:\n return simpleInputItem(*data)\n except:\n try:\n return compatInputItem(*data)\n except:\n pass\n pass\n raise ValueError,\"Invalid inputItem data: %s\" % str(data)", "def set_all(self, value):\n self.__items = value", "def output_all_items(items):\n\n for item in items:\n print(item)", "def output_all_items(items):\n\n for item in items:\n print(item)", "def select_item(items, default=None, title='Items', prompt='item'):\n selected_item = None\n print \"\"\"\n*** {} ***\n{}\n\"\"\".format(title, '\\n'.join(number_list(items)))\n if default and default in items:\n default_index = items.index(default)+1\n else:\n default_index = None\n while selected_item == None:\n try:\n selected_index = raw_input('{}. Enter number (1 to {}) {}. 0 for none. :'\n .format(prompt, len(items), '' if default_index == None else '[{}]'.format(default_index)))\n selected_index = selected_index.strip().rstrip('.')\n if default_index != None and selected_index == '':\n selected_item = items[default_index-1]\n elif is_int(selected_index):\n selected_index = int(selected_index)\n if selected_index == 0:\n return None\n elif (selected_index > 0 and selected_index <= len(items)):\n selected_item = items[selected_index-1]\n except:\n pass\n return selected_item", "def replace_values(item, values: Dict[str, Any]):\n if isinstance(item, list):\n return [replace_values(it, values) for it in item]\n elif isinstance(item, tuple):\n return tuple(replace_values(it, values) for it in item)\n elif isinstance(item, dict):\n items = dict()\n for key, value in item.items():\n if key in values:\n if isinstance(value, (list, tuple, dict)):\n raise ValueError(f\"Cannot replace value for '{key}'. Type must be literal but got {type(value)}\")\n LOGGER.info(f\"Replacing value for '{key}': {value} -> {values[key]}\")\n value = values[key]\n else:\n value = replace_values(value, values)\n items[key] = value\n return items\n else:\n return item", "def setitem_key_value(self):\n raise NotImplementedError", "def items(self, value):\n if value is None:\n self._items = None\n self.active = None\n else:\n self._items = value\n self.active = [True] * len(self._items)", "def assert_keys_type_value(self,\n caller,\n extra_error_text,\n *context_items):\n assert context_items, (\"context_items parameter must be specified.\")\n\n for context_item in context_items:\n self.assert_key_type_value(context_item, caller, extra_error_text)", "def identify_items(self, items):\n\n raise NotImplementedError" ]
[ "0.62009406", "0.5768462", "0.56391984", "0.5633274", "0.5621747", "0.55339104", "0.5429804", "0.5320381", "0.53021246", "0.52935237", "0.5275827", "0.52436066", "0.52289635", "0.5216234", "0.51963955", "0.5189505", "0.5189501", "0.51155585", "0.5114263", "0.50838286", "0.5069122", "0.5056276", "0.5053313", "0.5053313", "0.50465816", "0.5016517", "0.50151515", "0.5012752", "0.49945533", "0.49761504" ]
0.7074095
0
Check if two boards are equal
def board_equals(board, newboard): return (newboard == board).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other) -> None:\n\t\tfor k, v in enumerate(self.board):\n\t\t\tif v != other.board[k]:\n\t\t\t\treturn False\n\t\treturn True", "def __eq__(self, other):\n for row in range( self.n ):\n if self.board[row] != other.board[row]:\n return False\n return True", "def __eq__(self, other):\n\n return self.board == other.board", "def __eq__(self, other):\n\n if isinstance(other, Board):\n if len(self.game_pieces) != len(other.game_pieces):\n return False\n for i in range(len(self.game_pieces)):\n if self.game_pieces[i].x != other.game_pieces[i].x or self.game_pieces[i].y != other.game_pieces[i].y:\n return False\n return True\n else:\n if hash(self) == other:\n return True\n return False", "def __eq__(self, other) -> bool:\r\n if isinstance(other, Square):\r\n if (self.board, self.file, self.rank) == (\r\n other.board, other.file, other.rank):\r\n return True\r\n \r\n return False", "def __eq__(self, other):\n h1 = [item for row in self.arr for item in row]\n h2 = [item for row in other.arr for item in row]\n for i in range(self.board_size * self.board_size):\n if h1[i] != h2[i]:\n return False\n return True", "def grid_equal (grid1, grid2):\r\n s=0 \r\n for h in range(4):\r\n for m in range(4):\r\n if grid1[h][m]==grid2[h][m]:\r\n s+=1\r\n else:\r\n ()\r\n if s==16:\r\n return True\r\n else:\r\n return False", "def testWiresAreDifferent(self):\n\n wire_original = self.board.get((0, 0))\n wire_copied = self.board.get((4, 0))\n\n self.assertIsNot(wire_original, wire_copied)", "def testNandsAreDifferent(self):\n\n nand_original = self.board.get((0, 1))\n nand_copied = self.board.get((3, 1))\n\n self.assertIsNot(nand_original, nand_copied)", "def testBoardDuplicate(self):\n self.assertEqual(self.boards.SelectBoards(['sandbox sandbox',\n 'sandbox']),\n {'all': 1, 'sandbox': 1})", "def __eq__(self, obj):\r\n\r\n #Types must be the same\r\n if type(self) != type(obj):\r\n return False\r\n\r\n #The number of players must be the same\r\n if len(self.players) != len(obj.players):\r\n return False\r\n\r\n #Players must be the same\r\n for i in range(len(self.players)):\r\n if self.players[i] != obj.players[i]:\r\n return False\r\n\r\n #The number to win must be the same \r\n if self.num_to_win != obj.num_to_win:\r\n return False\r\n\r\n #The turn number must be the same\r\n if self.turn_number != obj.turn_number:\r\n return False\r\n\r\n #The max turn numbers must be the same\r\n if self.max_turns != obj.max_turns:\r\n return False\r\n\r\n #The winner must be the same\r\n if self.winner != obj.winner:\r\n return False\r\n\r\n #The current board must be the same\r\n if self.board != obj.board:\r\n return False\r\n\r\n #The board histories must be the same length\r\n if len(self.board_history) != len(obj.board_history):\r\n return False\r\n\r\n #The histories must be the same\r\n for i in range(len(self.board_history)):\r\n if self.board_history[i] != obj.board_history[i]:\r\n return False\r\n\r\n #If all these conditions are met then we return true\r\n return True", "def grid_equal (grid1, grid2):\r\n for i in range (4):\r\n for j in range (4):\r\n if grid1[i][j] != grid2[i][j]:\r\n return False\r\n return True", "def __eq__(self, other):\n for i in range(len(self.puzzle)):\n for j in range(len(self.puzzle[0])):\n if(self.puzzle[i][j] != other.puzzle[i][j]):\n return False\n return True", "def __eq__(self, other):\n for ls, lo in zip(self.leaderboard_names, other.leaderboard_names):\n if ls != lo:\n return False\n for ls, lo in zip(self.leaderboard_groups, other.leaderboard_groups):\n if ls != lo:\n return False\n if self.top_left != other.top_left:\n return False\n if self.bottom_right != other.bottom_right:\n return False\n return True", "def test_equal(self):\r\n\r\n a_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n a_x_dist = 3\r\n a_y_dist = 3\r\n a_num_to_win = 1\r\n a_game = Game(a_players, a_x_dist, a_y_dist, a_num_to_win)\r\n\r\n b_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n b_x_dist = 3\r\n b_y_dist = 3\r\n b_num_to_win = 1\r\n b_game = Game(b_players, b_x_dist, b_y_dist, b_num_to_win)\r\n\r\n c_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n c_x_dist = 3\r\n c_y_dist = 3\r\n c_num_to_win = 1\r\n c_game = Game(c_players, c_x_dist, c_y_dist, c_num_to_win)\r\n\r\n self.assertTrue(b_game == a_game == c_game)\r\n\r\n a_game.play_game()\r\n b_game.play_game()\r\n\r\n self.assertTrue(a_game == b_game)\r\n self.assertFalse(c_game == a_game)\r\n\r\n c_game.play_game()\r\n\r\n self.assertTrue(b_game == a_game == c_game)", "def grid_equal (grid1, grid2):\r\n if grid1 == grid2:\r\n return True\r\n else:\r\n return False", "def grid_equal (grid1, grid2):\r\n if grid1 == grid2:\r\n return True\r\n return False", "def compare(self, dummy):\r\n equality = []\r\n for i in range(self.height):\r\n if self.board[i] != dummy[i]:\r\n equality.append(False)\r\n else:\r\n equality.append(True)\r\n if False in equality:\r\n self.new_tile()", "def grid_equal(grid1, grid2):\r\n for i in range(len(grid1)):\r\n for j in range(len(grid1[i])):\r\n if grid1[i][j] != grid2[i][j]:\r\n return False\r\n return True", "def __eq__(self, other_sudoku_matrix):\n equals = False\n for row in range(9):\n for col in range(9):\n if int(self.get_cell(row, col).get_cell_value()) == int(\n other_sudoku_matrix.get_cell(row, col).get_cell_value()):\n equals = True\n else:\n return False\n return equals", "def __eq__(self, other):\n return (\n self.bleed == other.bleed and\n self.width == other.width and\n self.height == other.height\n )", "def testWireConnectivity(self):\n\n wire_copied = self.board.get((4, 0))\n wire_adjacent = self.board.get((5, 0))\n\n self.assertIs(wire_adjacent, wire_copied)", "def validBoard():\r\n\r\n\tglobal move1, move2\r\n\r\n\tif move1==move2 or move1-move2==1:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False", "def __eq__(self, other):\n return np.all(self.grid == other.grid) and np.all(self.pos == other.pos)", "def _check_integrity(self):\n\n count = 0\n for (x, y) in self.__players[ChessGame.BLACK].union(\n self.__players[ChessGame.WHITE]):\n assert (x, y) in self.__board\n count += 1\n\n assert count == len(self.__board)", "def _are_equal(grid: List[List[str]], other: List[List[str]]) -> bool:\n for row in range(len(grid)):\n for col in range(len(grid[row])):\n if grid[row][col] != other[row][col]:\n return False\n return True", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid and\n self.m == other.m and\n self.n == other.n)", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.n == other.n and self.m == other.m and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid)", "def __eq__(self, other):\n if self is other:\n return True\n elif type(self) != type(other):\n return False\n else:\n # A node is considered equal if it has the exact same state as\n # another node\n if self.board_state == other.board_state:\n return True\n else:\n return False", "def test_get_board(self):\n copy1 = self.game.get_board()\n self.assertEqual(copy1._board, self.game._board)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERX\n copy2 = self.game.get_board()\n self.assertEqual(copy2._board, self.game._board)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERO\n copy3 = self.game.get_board()\n self.assertEqual(copy3._board, self.game._board)" ]
[ "0.76765525", "0.7590167", "0.7520879", "0.73166174", "0.7049794", "0.6997381", "0.69779974", "0.696249", "0.69501203", "0.6933911", "0.6929893", "0.68988365", "0.6865647", "0.68118745", "0.6808368", "0.67976785", "0.67858136", "0.6755733", "0.673824", "0.67163175", "0.6605295", "0.65788335", "0.656916", "0.6545813", "0.65295094", "0.65182745", "0.65051144", "0.64806736", "0.6480633", "0.6479943" ]
0.7748529
0
Cog unload handler. This removes any event hooks that were registered.
def cog_unload(self): self.bot.lavalink._event_hooks.clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cog_unload(self):\n self.bot.lavalink_event_hooks.clear()", "def on_unload(self):\n pass", "def unloaded():\n pass", "async def cog_unload(self) -> None:\n self.update_guild_boost.stop()", "def cog_unload(self):\n\n self._check_to_send_cookie.cancel()", "def unload(self):\n main.msgQ.removeEvent(Constants.CMSG_CHANGE_AVATAR_TYPE)\n main.msgQ.removeEvent(Constants.CMSG_CHANGE_TEAM_PVP)\n main.msgQ.removeEvent(Constants.CMSG_START_TO_READY_GAME)\n main.msgQ.removeEvent(Constants.CMSG_CANCEL_TO_JOIN_GAME)\n main.msgQ.removeEvent(Constants.CMSG_START_SIXTY_SECONDS_COUNTER)\n self.mainFrame.destroy()", "def cog_unload(self):\n self.resend_post.cancel()", "async def unload_cog(self, ctx, *, cog: str):\n\n try:\n self.bot.unload_extension(cog)\n except Exception as e:\n await ctx.send(f'**`ERROR:`** {type(e).__name__} - {e}')\n else:\n await ctx.send('**`SUCCESS`**')", "async def unload_cog(self, ctx, *, cog: str):\n\n try:\n await self.bot.unload_extension(f'cogs.{cog}')\n except Exception as e:\n await ctx.send(f'**`ERROR:`** {type(e).__name__} - {e}')\n else:\n await ctx.send('**`SUCCESS`**')", "def remove_hook(self):\n for handle in self.handlers:\n handle.remove()", "def remove_hook(self):\n for handle in self.handlers:\n handle.remove()", "async def tool_unload(self, ctx, *, cog: str):\n\n try:\n self.bot.unload_extension(cog)\n except Exception as e:\n await zb.bot_errors(ctx,sp.format(e))\n else:\n await ctx.send('**`SUCCESS`**')", "def unload(self):\r\n self.gox.signal_strategy_unload(self, None)\r\n self.strategy_object_list = []", "def on_unload(self):\n self.entity.on_unload()\n del self.server.entity_list[self.entity.id]", "async def cog_unload(self) -> None:\n self.scheduler.cancel_all()", "def unload_plugin(self):\n pass", "def unload():\r\n database.save() # Save the database\r\n\r\n \"\"\" Remove any popups \"\"\"\r\n deleted = []\r\n for popup in popuplib.gPopups:\r\n if popup.startswith('sourcerpg_'):\r\n deleted.append(popup)\r\n for popup in deleted:\r\n popuplib.delete(popup)\r\n\r\n \"\"\" Unload all skills \"\"\"\r\n for skill in skills:\r\n es.unload(\"sourcerpg/skills/\" + skill.name)\r\n\r\n \"\"\" Unload all addons \"\"\"\r\n for addon in addons:\r\n es.unload(\"sourcerpg/addons/\" + addon.name)\r\n\r\n \"\"\" Unregister the server commands \"\"\"\r\n cmdlib.unregisterServerCommand(\"srpg\")\r\n cmdlib.unregisterSayCommand(\"rpgmenu\")\r\n cmdlib.unregisterSayCommand(\"rpgupgrade\")\r\n cmdlib.unregisterSayCommand(\"rpgsell\")\r\n cmdlib.unregisterSayCommand(\"rpghelp\")\r\n cmdlib.unregisterSayCommand(\"rpgstats\")\r\n cmdlib.unregisterSayCommand(\"rpgrank\")\r\n cmdlib.unregisterSayCommand(\"rpgpopup\")\r\n cmdlib.unregisterSayCommand(\"rpgtop10\")\r\n \r\n gamethread.cancelDelayed('sourcerpg_databasesave')", "async def unload(self) -> None:", "def on_cleanup(self):\n\n pygame.quit()", "def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)", "def slot_before_unload(self, _sender, _data):\r\n pass", "def shutdown():\n\n logger.debug(\"GiantbombHandler shutdown\")", "def script_unload():\n log_threadsafe(obs.LOG_DEBUG, 'Plugin unloaded')\n \n global state\n \n if state != 0:\n with mutex_state_sending:\n state = 0\n pipe_send_state(should_seek_new_client_on_fail=False)\n \n win32file.CloseHandle(pipe)", "def cog_unload(self) -> None:\n log.debug(\"Unloading the cog and canceling the background task.\")\n self.countdown_task.cancel()\n self.status_task.cancel()", "def __del__(self):\r\n self.debug(\"%s unloaded\" % self.name)", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&ObstacleFAADigitialObstacleFileDB'),\n action)\n self.iface.removeToolBarIcon(action)", "def off_hook(self) -> None:", "def teardown(bot):\n log.warning(\"Warn un-mounted\")\n for handler in log.handlers[:]:\n log.removeHandler(handler)", "def clear(self):\n self.__hooks = odict()", "def clear_hooks(self):\n self._conn_hooks = []" ]
[ "0.8657892", "0.72803473", "0.7020988", "0.698424", "0.691849", "0.68781596", "0.6846889", "0.6832737", "0.6830664", "0.6765117", "0.6765117", "0.67053455", "0.6661772", "0.6613284", "0.6599922", "0.658577", "0.6564507", "0.6502889", "0.6488965", "0.64649194", "0.6411848", "0.6403722", "0.63892174", "0.6308406", "0.6280703", "0.6230616", "0.6229407", "0.61816126", "0.6150336", "0.61322516" ]
0.87310874
0
Returns embed with no title as fancier way of replying.
def reply_embed(self, message: str): embed = discord.Embed(color=discord.Color.blurple()) embed.title = "" embed.description = message return embed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def embed():", "async def get_hack_embed(self, channel: discord.TextChannel, perpetrator_id: int, target_id: int,) -> discord.Embed:\n\n timestamp = await self.get_timestamp()\n\n hack_embed = discord.Embed(\n title=\"Someone just got Hacked and lost Control of Everything!\",\n timestamp=datetime.utcfromtimestamp(timestamp)\n )\n hack_embed.description = f\"**<@{perpetrator_id}> hacked <@{target_id}>!** <a:hackerman:652303204809179161>\"\n # hack_embed.description=f\"**<@{perpetrator_id}> hacked <@{attacker_id}>!** <a:hackerman:802354539184259082>\"\n hack_embed.color = discord.Color.green()\n\n hack_embed.set_thumbnail(url=\"https://thelanguagesloth.com/media/sloth_classes/Cybersloth.png\")\n hack_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon_url)\n\n return hack_embed", "async def get_hack_embed(self, channel: discord.TextChannel, perpetrator_id: int, target_id: int,) -> discord.Embed:\n\n timestamp = await utils.get_timestamp()\n\n hack_embed = discord.Embed(\n title=\"Someone just got Hacked and lost Control of Everything!\",\n timestamp=datetime.fromtimestamp(timestamp)\n )\n hack_embed.description = f\"**<@{perpetrator_id}> hacked <@{target_id}>!** <a:hackerman:652303204809179161>\"\n # hack_embed.description=f\"**<@{perpetrator_id}> hacked <@{attacker_id}>!** <a:hackerman:802354539184259082>\"\n hack_embed.color = discord.Color.green()\n\n hack_embed.set_thumbnail(url=\"https://thelanguagesloth.com/media/sloth_classes/Cybersloth.png\")\n hack_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)\n\n return hack_embed", "def get_embed_url(self):\n if not self.original_url:\n return ''\n \n if not self.video_id:\n return ''\n \n return 'http://embed.bambuser.com/broadcast/%s?context=b_simple&autoplay=0&chat=0' % (self.video_id)", "def error_embed(self, message: str):\n embed = discord.Embed(color=discord.Color.red())\n embed.title = \"\"\n embed.description = message\n return embed", "def make_twitch_embed(member: discord.Member, response: dict):\n e = discord.Embed(title=\"Playing \" + response[\"stream\"][\"game\"], url=member.game.url,\n description=member.game.name, color=member.color)\n e.set_author(name=member.display_name, url=member.game.url, icon_url=member.avatar_url)\n e.set_thumbnail(url=response[\"stream\"][\"preview\"][\"small\"] + \"?date=\" + datetime.now().ctime().replace(\" \", \"%20\"))\n return e", "def _error_embed_helper(title: str, description: str) -> discord.Embed:\n return discord.Embed(title=title, description=description, colour=discord.Colour.red())", "async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e", "async def get_wire_embed(self, channel: discord.TextChannel, perpetrator_id: int, target_id: int,) -> discord.Embed:\n\n timestamp = await self.get_timestamp()\n\n wire_embed = discord.Embed(\n title=\"Someone has been wired up!\",\n timestamp=datetime.utcfromtimestamp(timestamp)\n )\n wire_embed.description = f\"**<@{perpetrator_id}> wired <@{target_id}>!** 🔌\"\n wire_embed.color = discord.Color.green()\n wire_embed.set_image(url='https://i.pinimg.com/originals/8f/e1/d1/8fe1d171c2cfc5b7cc5f6b022d2a51b1.gif')\n wire_embed.set_thumbnail(url=\"https://thelanguagesloth.com/media/sloth_classes/Cybersloth.png\")\n wire_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon_url)\n\n return wire_embed", "async def get_wire_embed(self, channel: discord.TextChannel, perpetrator_id: int, target_id: int,) -> discord.Embed:\n\n timestamp = await utils.get_timestamp()\n\n wire_embed = discord.Embed(\n title=\"Someone has been wired up!\",\n timestamp=datetime.fromtimestamp(timestamp)\n )\n wire_embed.description = f\"**<@{perpetrator_id}> wired <@{target_id}>!** 🔌\"\n wire_embed.color = discord.Color.green()\n wire_embed.set_image(url='https://i.pinimg.com/originals/8f/e1/d1/8fe1d171c2cfc5b7cc5f6b022d2a51b1.gif')\n wire_embed.set_thumbnail(url=\"https://thelanguagesloth.com/media/sloth_classes/Cybersloth.png\")\n wire_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)\n\n return wire_embed", "def embed_url(self) -> str:\n return (Endpoints.GUILD_BASE + \"/embed.png\").format(guild_id=self.id)", "async def prepembed(ctx, channel:discord.TextChannel, *, jsonInput):\n jso = json.loads(jsonInput)\n title = jso['title'] if 'title' in jso else \"\"\n desc = jso['description'] if 'description' in jso else \"\"\n titleUrl = jso['titleUrl'] if 'titleUrl' in jso else \"\"\n hexcolor = jso['hexColor'] if 'hexColor' in jso else \"#2E66B6\"\n webcolor = jso['webColor'] if 'webColor' in jso else \"\"\n thumbnailUrl = jso['thumbnailUrl'] if 'thumbnailUrl' in jso else \"\"\n authorName = jso['authorName'] if 'authorName' in jso else \"\"\n authorUrl = jso['authorUrl'] if 'authorUrl' in jso else \"\"\n authorIcon = jso['authorIcon'] if 'authorIcon' in jso else \"\"\n if 'author' in jso:\n authorName = ctx.message.author.name\n authorIcon = ctx.message.author.avatar_url_as(format=\"jpg\")\n fields = jso['fields'] if 'fields' in jso else \"\"\n footerText = jso['footerText'] if 'footerText' in jso else \"\"\n footerUrl = jso['footerUrl'] if 'footerUrl' in jso else \"\"\n imageUrl = jso['imageUrl'] if 'imageUrl' in jso else \"\"\n embed = assemble_embed(\n title=title,\n desc=desc,\n titleUrl=titleUrl,\n hexcolor=hexcolor,\n webcolor=webcolor,\n thumbnailUrl=thumbnailUrl,\n authorName=authorName,\n authorUrl=authorUrl,\n authorIcon=authorIcon,\n fields=fields,\n footerText=footerText,\n footerUrl=footerUrl,\n imageUrl=imageUrl\n )\n await channel.send(embed=embed)", "def get_embed_url(self):\n if not self._oembed:\n return ''\n \n if not self.original_url:\n return ''\n \n return 'https://w.soundcloud.com/player/?url=%s' % (self.original_url)", "def error_embed(message: str, title: Optional[str] = None) -> Embed:\n title = title or random.choice(ERROR_REPLIES)\n embed = Embed(colour=Colours.soft_red, title=title)\n embed.description = message\n return embed", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return self.original_url\n \n return 'https://www.slideshare.net/slideshow/embed_code/%s' % self.get_video_id()", "async def test_create_user_embed_uses_string_representation_of_user_in_title_if_nick_is_not_available(self):\n ctx = helpers.MockContext(channel=helpers.MockTextChannel(id=1))\n user = helpers.MockMember()\n user.public_flags = unittest.mock.MagicMock(verified_bot=False)\n user.nick = None\n user.__str__ = unittest.mock.Mock(return_value=\"Mr. Hemlock\")\n user.colour = 0\n user.created_at = user.joined_at = datetime.now(UTC)\n\n embed = await self.cog.create_user_embed(ctx, user, False)\n\n self.assertEqual(embed.title, \"Mr. Hemlock\")", "def ExecuteEmbed(self):\r\n \r\n Embed = DiscordEmbed(title=\"Test Title 123\", \r\n description=\"Test Description 321\",\r\n color=\"eb5e34\") \r\n Embed.set_timestamp()\r\n \r\n self.WEBHOOK.add_embed(Embed)\r\n Execute = self.WEBHOOK.execute()", "async def embed_editor(self, guild):\n if self.embed_pooling:\n return\n self.embed_pooling = True\n await asyncio.sleep(3.0)\n current_embed = self.games_info[guild.id][0].embeds[0].to_dict()\n current_embed['fields'][0]['value'] = '\\n'.join(f'{p}' for p in self.games_info[guild.id][2]) or \"None\"\n self.embed_pooling = False\n await self.games_info[guild.id][0].edit(embed=discord.Embed.from_dict(current_embed))", "def embed(self, x):\n if self.embedding is None:\n return x\n else:\n return self.embedding(x)", "def get_embed_url(self):\n if not self.original_url:\n return ''\n \n return 'https://vine.co/v/%s/embed/simple' % (self.get_video_id())", "async def Gnomercy(self, ctx):\n \n data = getattr(special_play, inspect.currentframe().f_code.co_name)()\n await self.send_embed(data, ctx)", "async def test_create_user_embed_uses_nick_in_title_if_available(self):\n ctx = helpers.MockContext(channel=helpers.MockTextChannel(id=1))\n user = helpers.MockMember()\n user.public_flags = unittest.mock.MagicMock(verified_bot=False)\n user.nick = \"Cat lover\"\n user.__str__ = unittest.mock.Mock(return_value=\"Mr. Hemlock\")\n user.colour = 0\n user.created_at = user.joined_at = datetime.now(UTC)\n\n embed = await self.cog.create_user_embed(ctx, user, False)\n\n self.assertEqual(embed.title, \"Cat lover (Mr. Hemlock)\")", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n if not self.embed_url:\n self.embed_url = 'https://www.youtube.com/embed/%s?wmode=transparent' % self.get_video_id()\n \n return self.embed_url", "async def discord(self, ctx):\n embed = discord.Embed(title='Join the discord today!', color=0x5643fd, description=\"This server is where \"\n \"all of \"\n \"NOVA's updates and \"\n \"important \"\n \"announcements will pass \"\n \"through. The creator of \"\n \"this \"\n \"bot, YeetVegetabales#5313, \"\n \"will also be there testing \"\n \"and letting the communtiy \"\n \"in \"\n \"on things first hand!\")\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/AQCEqCF4Yl_PWAfuA-GReZoDify6'\n '--y4hXOJVkqaDHo/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n embed.add_field(name='Server Invite', value='<:news:730866149109137520> '\n '[Join here](https://discord.gg/Uqh9NXY)')\n await ctx.send(embed=embed)", "def quote_to_embed(self,result):\n thedate = datetime.date.fromtimestamp(result[3])\n thechannel = self.bot.get_channel(result[2])\n themember = thechannel.server.get_member(result[1])\n theauthor = themember.name\n if hasattr(themember, \"nick\"):\n if themember.nick is not None:\n theauthor = themember.nick\n embed = discord.Embed(title=\"Quote #{}\".format(result[4]), description=result[0])\n embed.set_author(name=theauthor, icon_url=themember.avatar_url)\n embed.set_footer(text=\"Saved on: {}\".format(thedate.strftime(\"%d %B %y\")))\n return embed", "async def sayembed(self, ctx, text_channel: typing.Union[discord.TextChannel, str] = None, *, embed_format=None):\n embed_creator_url = \"https://embedbuilder.nadekobot.me/\"\n if isinstance(text_channel, str):\n if isinstance(embed_format, str):\n embed_format = text_channel + embed_format\n text_channel = ctx.channel\n try:\n if not embed_format or not text_channel:\n return await ctx.send(f\"> **This command follows the format from {embed_creator_url}**\")\n else:\n author_name = None\n author_icon_url = None\n embed_footer_text = None\n embed_footer_url = None\n embed_format = json.loads(embed_format)\n embed_image = embed_format.get('image')\n embed_footer = embed_format.get('footer')\n embed_thumbnail = embed_format.get('thumbnail')\n embed_author = embed_format.get('author')\n if embed_author:\n author_name = embed_author.get(\"name\")\n author_icon_url = embed_author.get(\"icon_url\")\n if embed_footer:\n embed_footer_text = embed_footer.get('text')\n embed_footer_url = embed_footer.get('icon_url')\n author_url = embed_format.get('url')\n\n if author_icon_url or author_url:\n embed_format.pop('author')\n if embed_footer_url:\n embed_format.pop('footer')\n if embed_image:\n embed_format.pop('image')\n if embed_thumbnail:\n embed_format.pop('thumbnail')\n\n embed = discord.Embed.from_dict(embed_format)\n\n if embed_image:\n embed.set_image(url=embed_image)\n if embed_footer_url:\n embed.set_footer(text=embed_footer_text, icon_url=embed_footer_url)\n if embed_thumbnail:\n embed.set_thumbnail(url=embed_thumbnail)\n if author_url and author_icon_url:\n embed.set_author(name=author_name, url=author_url, icon_url=author_icon_url)\n elif not author_icon_url and author_url:\n embed.set_author(name=author_name, url=author_url)\n elif not author_url and author_icon_url:\n embed.set_author(name=author_name, icon_url=author_icon_url)\n\n plain_body = embed_format.get('plainText')\n if plain_body:\n return await text_channel.send(plain_body, embed=embed)\n else:\n return await text_channel.send(embed=embed)\n except Exception as e:\n await ctx.send(f\"ERROR - {e}.\\nFollow the format from {embed_creator_url}\")\n log.console(e)", "def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any:\n retry_decorator = _create_retry_decorator(embeddings)\n\n @retry_decorator\n def _embed_with_retry(**kwargs: Any) -> Any:\n return embeddings.client.create(**kwargs)\n\n return _embed_with_retry(**kwargs)", "def _get_error_embed(self, title: str, body: str) -> Embed:\n return Embed(\n title=title,\n colour=Colours.soft_red,\n description=body\n )", "async def get_contagious_hack(self, channel: discord.TextChannel, perpetrator_id: int, lenhacks) -> discord.Embed:\n\n timestamp = await utils.get_timestamp()\n\n contagious_embed = discord.Embed(\n title=\"Viruses are Everywhere!\",\n timestamp=datetime.fromtimestamp(timestamp)\n )\n contagious_embed.description = f\"**<@{perpetrator_id}> just made his `{lenhacks}` active hacks contagious, beware!** ⚜️\"\n contagious_embed.color = discord.Color.green()\n\n\n contagious_embed.set_thumbnail(url=\"https://thelanguagesloth.com/media/sloth_classes/Cybersloth.png\")\n contagious_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)\n\n return contagious_embed", "def message_no_reply(cmd, name, data, version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message(0, cmd, name, data, version = version, order=order)" ]
[ "0.6679662", "0.59994054", "0.5966713", "0.5960028", "0.59088314", "0.58046204", "0.57281506", "0.5677738", "0.566666", "0.5624049", "0.56213224", "0.5620119", "0.55971074", "0.5560919", "0.5556021", "0.55310297", "0.54588825", "0.54498017", "0.5408814", "0.53532535", "0.5351325", "0.5341671", "0.533607", "0.5296733", "0.52947515", "0.5289275", "0.5288456", "0.52709615", "0.52627355", "0.5252168" ]
0.6799117
0
Parses human readable duration to ms.
def parse_duration_str(self, duration): try: dl = duration.split(":") except Exception: return None if len(dl) > 4: return None while len(dl) < 4: dl.insert(0, 0) ret = int(dl[0]) * 60 * 60 * 24 + int(dl[1]) * \ 60 * 60 + int(dl[2]) * 60 + int(dl[3]) return ret * 1000
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_duration_string_ms(duration):\n pattern = r'(?P<value>[0-9]+\\.?[0-9]*?)(?P<units>\\D+)'\n matches = list(re.finditer(pattern, duration))\n assert matches, 'Failed to parse duration string %s' % duration\n\n times = {'h': 0, 'm': 0, 's': 0, 'ms': 0}\n for match in matches:\n parsed = match.groupdict()\n times[parsed['units']] = float(parsed['value'])\n\n return (times['h'] * 60 * 60 + times['m'] * 60 + times['s']) * 1000 + times['ms']", "def parse_duration(duration):\n command_parse = re.compile(r\"(!mute|/mute) ?(\\d+)? ?([\\w+\\D]+)?\")\n parsed = command_parse.match(duration.text)\n time = parsed.group(2)\n reason = parsed.group(3)\n\n if not time:\n time = 5\n time = int(time)\n\n if not reason:\n reason = 'for no reason'\n\n until_date = datetime.now() + timedelta(minutes=time)\n return until_date, reason, time", "def parse_time_ms(time_string):\n try:\n return int(1000 * parse_duration(time_string))\n except:\n logging.exception('Unable to extract seconds from {}'.format(time_string))\n logging.info('Defaulting time to 1 second.')\n return 1000", "def parse_duration(duration):\n duration = str(duration).upper().strip()\n\n elements = ELEMENTS.copy()\n\n for pattern in (SIMPLE_DURATION, COMBINED_DURATION):\n if pattern.match(duration):\n found = pattern.match(duration).groupdict()\n del found['time']\n\n elements.update(dict((k, int(v or 0))\n for k, v\n in found.items()))\n\n return datetime.timedelta(days=(elements['days'] +\n _months_to_days(elements['months']) +\n _years_to_days(elements['years'])),\n hours=elements['hours'],\n minutes=elements['minutes'],\n seconds=elements['seconds']) \n \n return ParseError()", "def parse_duration(duration: Union[str, float], hour_format: bool = True) -> str:\n\n if duration == \"LIVE\":\n return duration\n\n time_format = \"%H:%M:%S\" if hour_format else \"%M:%S\"\n\n return time.strftime(time_format, time.gmtime(round(duration)))", "def _duration_to_secs(duration):\n secs = int(duration[:-1])\n if duration[-1] == 's':\n pass\n elif duration[-1] == 'm':\n secs *= 60\n elif duration[-1] == 'h':\n secs *= 60 * 60\n elif duration[-1] == 'd':\n secs *= 60 * 60 * 24\n else:\n raise ValueError('Invalid duration: %r' % duration)\n\n return secs", "def test_get_human_readable_duration():\n\n human_readable = common.get_human_readable_duration(-1)\n assert human_readable == '0 seconds'\n\n human_readable = common.get_human_readable_duration(10)\n assert human_readable == '10 seconds'\n\n human_readable = common.get_human_readable_duration(1000)\n assert human_readable == '16 minutes, 40 seconds'\n\n human_readable = common.get_human_readable_duration(10000)\n assert human_readable == '2 hours, 46 minutes, 40 seconds'", "def _parse_duration(path):\n tag = \"[FlowShaper] Application complete after \" # xxx ms\n found = None\n with (path / \"stdout.txt\").open(mode=\"r\") as stdout:\n found = [line for line in stdout if line.startswith(tag)][-1]\n assert found, f\"Run never completed! {path}\"\n\n # Parse the next word as an integer\n return int(found[len(tag):].split()[0])", "def parse_duration(self, duration: int):\n live_duration = [0, 9223372036854775807]\n if duration > 0 and duration not in live_duration:\n x = duration / 1000\n seconds = int(x % 60)\n x /= 60\n minutes = int(x % 60)\n x /= 60\n hours = int(x % 24)\n x /= 24\n days = int(x)\n\n duration = []\n if days > 0:\n if len(str(days)) == 1:\n days = \"0\" + str(days)\n duration.append('{}'.format(days))\n if hours > 0:\n if len(str(hours)) == 1:\n hours = \"0\" + str(hours)\n duration.append('{}'.format(hours))\n if minutes >= 0:\n if len(str(hours)) <= 1:\n hours = \"0\" + str(hours)\n duration.append('{}'.format(minutes))\n if seconds > 0:\n if len(str(seconds)) == 1:\n seconds = \"0\" + str(seconds)\n elif len(str(seconds)) == 0:\n seconds = \"00\"\n duration.append('{}'.format(seconds))\n\n value = ':'.join(duration)\n\n elif duration in live_duration:\n value = \"LIVE\"\n\n return value", "def parse_duration_string_ns(duration):\n pattern = r'(?P<value>[0-9]+\\.?[0-9]*?)(?P<units>\\D+)'\n matches = list(re.finditer(pattern, duration))\n assert matches, 'Failed to parse duration string %s' % duration\n\n times = {'h': 0, 'm': 0, 's': 0, 'ms': 0, 'us': 0, 'ns': 0}\n for match in matches:\n parsed = match.groupdict()\n times[parsed['units']] = float(parsed['value'])\n\n value_ns = (times['h'] * 60 * 60 + times['m'] * 60 + times['s']) * 1000000000\n value_ns += times['ms'] * 1000000 + times['us'] * 1000 + times['ns']\n\n return value_ns", "def parse_duration(duration: int) -> str:\n minutes, seconds = divmod(duration, 60)\n hours, minutes = divmod(minutes, 60)\n days, hours = divmod(hours, 24)\n\n duration = []\n if days > 0:\n duration.append(f\"{days} days\")\n if hours > 0:\n duration.append(f\"{hours} hours\")\n if minutes > 0:\n duration.append(f\"{minutes} minutes\")\n if seconds > 0:\n duration.append(f\"{seconds} seconds\")\n\n return ', '.join(duration)", "def minutes(duration):\n if not duration:\n return 0\n try:\n h, m, s = duration_parts(duration)\n return m\n except (ValueError, TypeError):\n return 0", "def normalize_time_string(duration: str) -> str:\n no_ws_duration = duration.replace(' ', '')\n duration_split = [el for el in re.split(r'(\\D+)', no_ws_duration) if el]\n\n if len(duration_split) != 2:\n raise ValueError(\n f\"Invalid duration string: '{duration}'. Expected one value (as integer in string) and one unit, such as '1 hour'.\"\n )\n\n value = duration_split[0]\n unit = duration_split[1]\n\n first_letter_of_unit = unit[0]\n return value + first_letter_of_unit", "def parse_time(s):\n if s[-1].lower() in secs:\n return int(s[:-1]) * secs[s[-1].lower()]\n else:\n return int(s)", "def ms_from_timedelta(td):\n return (td.seconds * 1000) + (td.microseconds / 1000.0)", "def parse_time(s: str):\n return utils.parsers.parse_eng_unit(s, base_unit='s', default=1e-12)", "def get_track_length(duration):\n try:\n length = time.strptime(duration, '%M:%S')\n except ValueError:\n return None\n return length.tm_min * 60 + length.tm_sec", "def _parse_test_duration(duration_str):\n try:\n if duration_str.endswith(\"s\"):\n duration_str = duration_str[:-1]\n return float(duration_str)\n except:\n return None", "def get_duration_us_from_str(duration_str):\n match_res = re.search(r\"\\((\\d+) us\\)\", duration_str)\n if match_res:\n return int(match_res.group(1))\n raise Exception(\"Illegal duration string: \" + duration_str)", "def parse_duration(duration: str) -> int:\n\n def _get_value(match_obj, group_name):\n val = match_obj.group(group_name)\n return int(val) if val is not None else 0\n\n match = DURATION_REGEX.match(duration)\n err_msg = DURATION_MSG.format(pattern=duration)\n\n if not match:\n raise ValueError(err_msg)\n\n hours = _get_value(match, \"hours\")\n minutes = _get_value(match, \"minutes\")\n seconds = _get_value(match, \"seconds\")\n\n result = (hours * 3600) + (minutes * 60) + seconds\n\n if result <= 0:\n raise ValueError(err_msg)\n\n return (hours * 3600) + (minutes * 60) + seconds", "def __get_duration_from_string(cls, dstr):\n mtch = re.search(r'^(\\d+)$', dstr)\n if mtch is not None:\n return int(mtch.group(1))\n mtch = re.search(r'^(\\d+)s(?:ec(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1))\n mtch = re.search(r'^(\\d+)m(?:in(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 60\n mtch = re.search(r'^(\\d+)h(?:r(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 3600\n mtch = re.search(r'^(\\d+)d(?:ay(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 86400\n raise FlashFileException(('String \"%s\" is not a known duration'\n ' format. Try 30sec, 10min, 2days etc.') %\n str(dstr))", "def decode(self, data):\r\n return Duration.from_sec(float(data))", "def ParseDurationValue(self, allowFraction=True):\n value = self.ParseDIGITRepeat()\n if value is None:\n return None, None\n if self.the_char in \".,\":\n if not allowFraction:\n raise DateTimeError(\n \"fractional component in duration must have lowest order\")\n format = \"n\" + self.the_char + \"n\"\n value = value + self.ParseFraction()\n else:\n format = \"n\"\n return value, format", "def formatDuration(asciiDuration):\n duration = re.split(\"[+ /]\", asciiDuration)\n\n if len(duration) == 1:\n duration = float(duration[0])\n elif len(duration) == 2:\n nominator = float(duration[0])\n denominator = float(duration[1])\n try:\n duration = nominator / denominator\n except ZeroDivisionError:\n duration = nominator\n elif len(duration) == 3:\n wholeNumber = float(duration[0])\n nominator = float(duration[1])\n denominator = float(duration[2])\n try:\n duration = wholeNumber + nominator / denominator\n except ZeroDivisionError:\n duration = wholeNumber\n else: # should never get here\n duration = 1\n\n if duration < 0.5:\n duration = 16\n elif duration >= 0.5 and duration < .75:\n duration = -8\n elif duration >= 0.75 and duration < 1:\n duration = 8\n elif duration >= 1 and duration < 1.5:\n duration = 4\n elif duration >= 1.5 and duration < 2:\n duration = -4\n elif duration >= 2 and duration < 3:\n duration = 2\n elif duration >= 3 and duration < 4:\n duration = -2\n else:\n duration = 1\n\n return duration", "def _getDuration(v, line, text):\n if \"/\" in v:\n try:\n return eval(v + \".\")\n except:\n raise ValueError(\"invalid duration value '%s' on line %d: %s\" %\n (v, line, text))\n return float(v)", "def translate_duration_to_minutes(text, context=None):\n\n # define regex formats\n formats = ('^(\\d+)$', # match positive integers\n '^(\\d+)\\.(\\d+)?(h|hr|hrs|hour|hours)?$', # match positive decimal numbers (optional numbers after\n # decimal and optional hours nouns)\n '^((\\d+) *?(d|dy|dys|day|days){1})? *?((\\d+) *?(h|hr|hrs|hour|hours){1})? *?((\\d+) *?'\n '(m|min|mins|minute|minutes){1})?$', # match #d#h#m format, each part is optional\n '^(\\d+)?:?(\\d+):(\\d+)$') # match #:#:# format\n\n # init vars for days, hours, and minutes\n days = 0\n hours = 0\n minutes = 0\n\n # set days, hours, and minutes with supported formats\n import re\n matched = False\n for i, format in enumerate(formats):\n m = re.match(format, text, re.I)\n if m != None:\n groups = m.groups('0')\n if i == 0: # positive integer\n minutes = int(text)\n elif i == 1: # match positive decimal numbers (optional numbers after decimal and option h for hours)\n hours = int(groups[0])\n minutes = int(60 * float('0.' + groups[1]))\n elif i == 2: # match #d#h#m format, each part is optional\n days = int(groups[1])\n hours = int(groups[4])\n minutes = int(groups[7])\n elif i == 3: # match #:#:# format\n days = int(groups[0])\n hours = int(groups[1])\n minutes = int(groups[2])\n matched = True\n break # break after we find a match\n\n if matched == False:\n return False, None\n\n # calculate minutes from days, hours, and minutes\n minutes = minutes + (60 * hours) + (1440 * days)\n\n # return total minutes\n return True, minutes", "def test_parse_duration(\n test_input: int,\n expected: datetime.timedelta,\n):\n assert tvmaze.parsers.parse_duration(test_input) == expected", "def __get_duration_from_line(self, line):\n # TODO: catch exceptions\n duration_str = line.split('=')[1]\n return int(duration_str)", "def parse_durn(durn):\n durn_value = 0\n while durn:\n durn = durn.strip()\n if not durn or not durn[0].isdigit():\n return None\n i = 0\n while (i < len(durn)) and (durn[i].isdigit()):\n i += 1\n if i >= len(durn):\n return None\n chunk = int(durn[:i])\n unit = durn[i]\n durn = durn[i + 1:]\n if unit == 'm':\n unit = 60\n elif unit == 's':\n unit = 1\n else:\n return None\n durn_value += chunk * unit\n return durn_value", "def from_str(duration):\n\n if duration in (\"0\", \"+0\", \"-0\"):\n return datetime.timedelta()\n\n pattern = re.compile('([\\d\\.]+)([a-zµμ]+)')\n total = 0\n sign = -1 if duration[0] == '-' else 1\n matches = pattern.findall(duration)\n\n if not len(matches):\n raise Exception(\"Invalid duration {}\".format(duration))\n\n for (value, unit) in matches:\n if unit not in units:\n raise Exception(\n \"Unknown unit {} in duration {}\".format(unit, duration))\n try:\n total += float(value) * units[unit]\n except:\n raise Exception(\n \"Invalid value {} in duration {}\".format(value, duration))\n\n microseconds = total / _microsecond_size\n return datetime.timedelta(microseconds=sign * microseconds)" ]
[ "0.7170057", "0.64764357", "0.64503825", "0.64143217", "0.641086", "0.63711834", "0.636905", "0.63616914", "0.6334855", "0.62753356", "0.6236887", "0.6220458", "0.6178621", "0.6171785", "0.6161835", "0.6149162", "0.6109983", "0.6077356", "0.6068427", "0.6047436", "0.60183203", "0.59578913", "0.5956552", "0.5888993", "0.5878759", "0.5877831", "0.58719647", "0.58591396", "0.5858439", "0.5846265" ]
0.67639077
1
Get the dates where this series has values ts.get_dates() gets all dates where ts has values ts.get_dates(start=d1,end=d2) get all valid dates, d, where d1<=d<=d2 ts.get_dates(candidates=dates) get all valid dates, d, for d in dates
def get_dates(self, candidates=None, start=None, end=None): if candidates is not None: return [date for date in candidates if date in self.data] if start is None: start = self.first_date if end is None: end = self.last_date return [date for date in self.data if start <= date <= end]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dates(self):\n dates = []\n d = self.date_a\n while d < self.date_b:\n dates.append(d)\n d += datetime.timedelta(1)\n\n return dates", "def get_date_values(self, all_dates):\n\n if self.day_value == 'all':\n working_dates = all_dates[:]\n non_working_dates = []\n elif self.day_value == 'weekdays':\n working_dates, non_working_dates = self.working_days(all_dates)\n elif self.day_value == 'custom':\n working_dates, non_working_dates = self.working_days(all_dates,\n blacklisted_dates)\n\n # we always want the day before the milestone starts to be a working day\n # regardless if it is a weekday or weekend\n # if it was a non working day the ideal effort curve would not decrease\n # by the end of the actual start date\n day_before = all_dates[0]\n if day_before not in working_dates:\n non_working_dates.remove(day_before)\n working_dates.insert(0, day_before)\n # else it must be in working dates already\n\n return working_dates, non_working_dates", "def _check_dates(self, cr, uid, ids, context=None):\n for act in self.browse(cr, uid, ids, context):\n date_from = self.get_date(act.date_from)\n date_to = self.get_date(act.date_to)\n previous_ids = self.search(cr, uid, [('id','!=',act.id), ('alternative_setting_id','=',act.alternative_setting_id.id)],context=context)\n dates = self.read(cr, uid, previous_ids, ['date_from','date_to'], context=context)\n\n dates = [{'date_from':self.get_date(x['date_from']),'date_to':self.get_date(x['date_to'])} for x in dates]\n for date in dates:\n case0 = date['date_from'] >= date_from and date['date_to'] <= date_to\n\n case1 = date['date_from'] <= date_from and date['date_to'] >= date_to\n\n case2 = date['date_from'] <= date_from and date_from <= date['date_to'] \n\n case3 = date_from <= date['date_from'] and date['date_from'] <= date_to\n \n if case0 or case1 or case2 or case3:\n raise osv.except_osv(_('Error'), _(\"THIS RANGE OF DATE HAVE BEEN FETCHED BEFORE\"))\n return True", "def _check_dates(self, cr, uid, ids, context=None):\n for act in self.browse(cr, uid, ids, context):\n date_from = self.get_date(act.date_from)\n date_to = self.get_date(act.date_to)\n previous_ids = self.search(cr, uid, [('id','!=',act.id)],context=context)\n dates = self.read(cr, uid, previous_ids, ['date_from','date_to'], context=context)\n\n dates = [{'date_from':self.get_date(x['date_from']),'date_to':self.get_date(x['date_to'])} for x in dates]\n for date in dates:\n case0 = date['date_from'] >= date_from and date['date_to'] <= date_to\n\n case1 = date['date_from'] <= date_from and date['date_to'] >= date_to\n\n case2 = date['date_from'] <= date_from and date_from <= date['date_to'] \n\n case3 = date_from <= date['date_from'] and date['date_from'] <= date_to\n \n if case0 or case1 or case2 or case3:\n raise osv.except_osv(_('Error'), _(\"THIS RANGE OF DATE HAVE BEEN FETCHED BEFORE\"))\n return True", "def date_search(data, start_date, end_date):\n # change dates for date search\n data['timestamp'] = pd.to_datetime(data['timestamp']).dt.date\n d1 = datetime.datetime.strptime(f'{start_date}', '%Y-%m-%d').date()\n d2 = datetime.datetime.strptime(f'{end_date}', '%Y-%m-%d').date()\n\n # constrict data by date search parameters\n less_data = data[(data['timestamp'] >= d1) & (data['timestamp'] <= d2)]\n\n return less_data", "def find_within_dates(self,\r\n datefrom=(1,1,1),\r\n dateto=(3000,12,31),\r\n withinrange=None,\r\n orequal=False,\r\n most_recent=False):\r\n\r\n def convert (date):\r\n\r\n if isinstance(date,str):\r\n #If input is a string convert to a tuple\r\n date += '-01-01'\r\n date = datefrom.split(DASH)\r\n year, month, day = date[0].replace(PLUS,DASH), date[1], date[2]\r\n date = int(year), int(month), int(day)\r\n if isinstance(date, (list,tuple)):\r\n #If a tuple, convert to a datetime object\r\n date = datetime.datetime(date[0],date[1],date[2])\r\n return date\r\n\r\n if withinrange is None:\r\n #If not range assigned, default to all indexes\r\n withinrange = self.indexes()\r\n\r\n datefrom = convert(datefrom)\r\n dateto = convert(dateto)\r\n\r\n\r\n if not orequal:\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True)> datefrom\r\n and self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) < dateto]\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) >= datefrom and\r\n self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) <= dateto]", "def get_values(self, dates):\n ret = []\n for d in dates:\n ret.append(self.data[d])\n return ret", "def getDates(self, startDate, endDate, endpoint=False):\n\n if self._type == 'M2': # If MERRA2 data type\n return self._merra2Dates(startDate, endDate, endpoint)\n else:\n raise Exception('Data type not supported : {}'.format(self._type))", "def get_values_between_dates(self, date_start=None, date_end=None, dt_max=0.0, start_strict=False, end_strict=True):\n \n if start_strict:\n start_diff_operator = '>'\n else:\n start_diff_operator = '>='\n if end_strict:\n end_diff_operator = '<'\n else:\n end_diff_operator = '<='\n \n if dt_max < 0.:\n raise Exception('dt_max must be > 0')\n \n if (date_start is not None) and (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) AND datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%(start_diff_operator, end_diff_operator), \\\n params=[self.date2str(date_start-timedelta(dt_max)), self.date2str(date_end+timedelta(dt_max))])\n elif (date_start is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%start_diff_operator, \\\n params=[self.date2str(date_start-timedelta(dt_max))])\n elif (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%end_diff_operator, \\\n params=[self.date2str(date_end+timedelta(dt_max))])\n else:\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO ORDER BY datetime(date_data)\")", "def calc_range(self, months_offset: int = None, from_dt: Union[dt.date, None] = None,\n to_dt: Union[dt.date, None] = None) -> Tuple[dt.date, dt.date]:\n self.setup_class()\n if months_offset is not None or from_dt is not None or to_dt is not None:\n if months_offset is not None:\n earlier = date_offset_foll(self.last_idx, calendar=CDay(calendar=self.sweden),\n months_offset=-months_offset)\n assert earlier >= self.first_idx, 'Function calc_range returned earlier date < series start'\n later = self.last_idx\n else:\n if from_dt is not None and to_dt is None:\n assert from_dt >= self.first_idx, 'Function calc_range returned earlier date < series start'\n earlier, later = from_dt, self.last_idx\n elif from_dt is None and to_dt is not None:\n assert to_dt <= self.last_idx, 'Function calc_range returned later date > series end'\n earlier, later = self.first_idx, to_dt\n elif from_dt is not None and to_dt is not None:\n assert to_dt <= self.last_idx and \\\n from_dt >= self.first_idx, 'Function calc_range returned dates outside series range'\n earlier, later = from_dt, to_dt\n else:\n earlier, later = from_dt, to_dt\n\n earlier = date_fix(earlier)\n later = date_fix(later)\n\n while not self.tsdf.index.isin([earlier]).any():\n earlier -= dt.timedelta(days=1)\n\n while not self.tsdf.index.isin([later]).any():\n later += dt.timedelta(days=1)\n\n else:\n earlier, later = self.first_idx, self.last_idx\n\n return earlier, later", "def _exclude_dates(self, X, y, exclude_dates):\n self.exclude_dates = exclude_dates\n if len(self.exclude_dates) != 0:\n for exclude_date_range in self.exclude_dates:\n t0,t1 = [datetimeify(dt) for dt in exclude_date_range]\n inds = (y.index<t0)|(y.index>=t1)\n X = X.loc[inds]\n y = y.loc[inds]\n return X,y", "def dates_inbetween(self, start, end):\n\n return [start + timedelta(days=i) for i in xrange((end - start).days + 1)]", "def dates_between_two_dates(start_date, end_date, frequency='m', complete_period=True):\n year1 = None\n month1 = None\n day1 = None\n year2 = None\n month2 = None\n day2 = None\n if '/' in start_date:\n year1 = str(start_date).split('/')[2]\n month1 = str(start_date).split('/')[1]\n day1 = str(start_date).split('/')[0]\n\n year2 = str(end_date).split('/')[2]\n month2 = str(end_date).split('/')[1]\n day2 = str(end_date).split('/')[0]\n\n\n elif '-' in start_date:\n year1 = str(start_date).split('-')[2]\n month1 = str(start_date).split('-')[1]\n day1 = str(start_date).split('-')[0]\n\n year2 = str(end_date).split('-')[2]\n month2 = str(end_date).split('-')[1]\n day2 = str(end_date).split('-')[0]\n\n list_official_dates = [date(int(year1), int(month1), int(day1))]\n\n sdate = date(int(year1), int(month1), int(day1)) # start date\n edate = date(int(year2), int(month2), int(day2)) # end date\n dates = pandas.date_range(sdate, edate, freq=frequency, normalize=True)\n\n\n for i in range(len(dates)):\n list_official_dates.append(dates[i])\n\n list_official_dates.append(date(int(year2), int(month2), int(day2)))\n\n\n for i in range(len(list_official_dates)):\n list_official_dates[i] = str(list_official_dates[i]).replace(' 00:00:00', '')\n\n\n return list_official_dates", "def get_dates(df, frequency=\"weekly\"):\n if frequency == \"daily\":\n interval = 1\n\n elif frequency == \"monthly\":\n interval = 28\n\n else:\n interval = 7\n\n # Get dates from dateframe\n dates = df[\"date\"]\n\n # cast to datetime objects\n dates = pd.to_datetime(dates)\n\n # Create list of dates\n date_list = []\n\n # Get start date of simulation\n start = dates.min()\n date_list.append(start)\n\n # Iterate by interval until end is reached\n next_date = start\n while next_date < dates.max():\n next_date = next_date + timedelta(days=interval)\n\n if next_date <= dates.max():\n date_list.append(next_date)\n\n return date_list", "def check_dataset_dates(self):\n # TODO: graph traverse and date checking\n pass", "def dates_between(sdate, edate):\n\n days = dt.datetime.strptime(edate, '%Y%m%d') - \\\n dt.datetime.strptime(sdate, '%Y%m%d')\n\n all_dates = [dt.datetime.strptime(sdate, '%Y%m%d') + dt.timedelta(days=d)\n for d in range(days.days + 1)]\n\n return all_dates", "def daily_date_range(date1, date2):\n num_days = (date2-date1).days\n return np.array([datetime(date1.year, date1.month, date1.day, 0)+timedelta(days=i) for i in range(num_days)])", "def get_excluded_dates(self):\n raise NotImplementedError", "def get_excluded_dates(self):\n raise NotImplementedError", "def checkValidDate(self, nextDate, startDate):\n\t\tcurrentDay = nextDate.day\n\t\tcurrentMonth = nextDate.month\n\t\tcurrentYear = nextDate.year\n\t\t\n\t\tstartDay = startDate.day\n\t\tcurrentDate = datetime.datetime(currentYear, currentMonth, startDay)\n\t\t\n\t\tminDaysDiff = 99\n\t\tresult = None\n\t\t\n\t\tfor dataPoint in self.dataPoints:\n\t\t\tdataPointDate = dataPoint.getDate()\n\t\t\tdaysDiff = (dataPointDate - currentDate).days\n\t\t\t\t\t\t\n\t\t\tif daysDiff == 0:\n\t\t\t\treturn dataPointDate\n\t\t\telif daysDiff >= 0 and daysDiff < minDaysDiff:\n\t\t\t\tminDaysDiff = daysDiff\n\t\t\t\tresult = dataPointDate\n\t\treturn result", "def getValuesBetween(self, startIndex, endIndex):\r\n assert (type(startIndex) == type(endIndex))\r\n if isinstance(startIndex, datetime.datetime):\r\n startIndex = self._dateTimeHash.get(startIndex, None)\r\n endIndex = self._dateTimeHash.get(endIndex, None)\r\n if startIndex is None or endIndex is None:\r\n return np.asarray([np.nan])\r\n if startIndex < endIndex:\r\n if endIndex + 1 > len(self._listTimePoints):\r\n return np.asarray([np.nan])\r\n vals = self._listTimePoints[startIndex:endIndex + 1, -1:]\r\n else:\r\n if startIndex + 1 > len(self._listTimePoints)-1:\r\n return np.asarray([np.nan])\r\n vals = self._listTimePoints[endIndex:startIndex + 1, -1:]\r\n for mask in self._maskFunctions:\r\n getVals = lambda x : mask(x[0])\r\n vals = np.apply_along_axis(getVals, axis=1, arr=vals)\r\n return vals", "def get_ticker_start_and_end_dates(df_data):\n if df_data.empty:\n start_date, end_date = get_start_and_end_dates()\n else:\n new_start_date = df_data.index.max() + timedelta(days=1)\n logger.debug(f'new start date = {new_start_date}')\n start_date, end_date = get_start_and_end_dates(new_start_date)\n logger.debug(f'returning {start_date} and {end_date} from get_ticker_start_and_end_dates')\n return start_date, end_date", "def working_days(self, dates, blacklisted_dates=None):\n\n if not blacklisted_dates:\n work_dates = [date2 for date2 in dates if date2.weekday() < 5]\n else:\n work_dates = [date2 for date2 in dates \\\n if date2 not in set(blacklisted_dates)]\n non_working_dates = [date2 for date2 in dates \\\n if date2 not in set(work_dates)]\n\n return work_dates, non_working_dates", "def filter_data_by_date(df, ticker, start_date, end_date):\n if start_date is None:\n start_date = MIN_DATE\n\n if end_date is None:\n end_date = MAX_DATE\n\n filtered = df[\n (df[\"ticker\"] == ticker) & (df[\"date\"] >= start_date) & (df[\"date\"] <= end_date)\n ]\n return filtered", "def _get_output_date_range_for(self, from_input_dt, to_input_dt):\n return from_input_dt, to_input_dt", "def get_day_range(self, order_date, reg_date):\n\t\tdays = (order_date - reg_date).days\n\t\tfor day_range in self.day_ranges:\n\t\t\tif days >= day_range[0] and days <= day_range[1]:\n\t\t\t\treturn day_range\n\t\treturn []", "def search_by_date_range(self, tl):\n print(\"Search by date range\")\n dates = input(\"Please use YYYYMMDD-YYYYMMDD for date range: \")\n date1_str, date2_str = dates.split('-')\n try:\n date1 = datetime.datetime.strptime(date1_str, utils.fmt)\n date2 = datetime.datetime.strptime(date2_str, utils.fmt)\n except ValueError as err:\n utils.print_error(err)\n return self.search_by_date_range(tl)\n else:\n return tl.findall_date_range(date1, date2)", "def check_daterange(sdate, edate, st_dict):\n \n reflist = pd.date_range(start=sdate, end=edate, freq='MS').to_list()\n checklist = [] \n for y in st_dict['years']:\n for m in st_dict[y]['months']:\n checklist.append(pd.to_datetime(y + '-' + m))\n \n if list(set(checklist) & set(reflist)):\n return True", "def period_check(init_dates, fcst_dates):\n check_dates(init_dates)\n check_dates(fcst_dates)\n \n if max(init_dates) > min(fcst_dates):\n raise ValueError('Forecast date, %s comes before initialisation '\\\n 'date, %s.' % (min(fcst_dates), max(init_dates)))\n\n fcst_dates = change_zeroth_hour(fcst_dates)\n return init_dates, fcst_dates", "def generate_dates(self, event):\n dates = []\n dtstart = self.tz_localize(event['dtstart'].dt)\n if 'dtend' in event:\n dtend = self.tz_localize(event['dtend'].dt)\n # DTEND is exclusive, so the real ending date is one day before\n if is_date(dtend):\n dtend -= datetime.timedelta(days=1)\n else:\n dtend = None\n # Normal case: no repetition\n if not 'rrule' in event:\n dates.append(self.format_dateinterval(dtstart, dtend))\n # Handle recurrent events\n else:\n ruleset = rrule.rruleset()\n rule = rrule.rrulestr(event['rrule'].to_ical().decode('utf-8'),\n dtstart=dtstart)\n ruleset.rrule(rule)\n # Parse all types of recurrence constraints\n for prop in ['rdate', 'exdate']:\n if not prop in event:\n continue\n # This can return either a single value or a list, so it's\n # a mess...\n prop_dates = event[prop]\n if not isinstance(prop_dates, list):\n prop_dates = [prop_dates]\n for prop_date in prop_dates:\n # This is a vDDDLists\n for vddd in prop_date.dts:\n dt = vddd.dt\n # EXDATE and RDATE are allowed to be dates,\n # convert them to datetime. TODO: should the time\n # be midnight, or the time from DTSTART?\n if is_date(dt):\n dt = datetime.datetime.combine(dt, datetime.time())\n dt = self.tz_localize(dt)\n ruleset.__getattribute__(prop)(dt)\n # We now have a ruleset that expands to a list of starting\n # date or datetime, one for each repetition.\n for dtstart_repeat in itertools.islice(ruleset, MAX_RECURRING_EVENTS):\n # Handle case where dtstart is a date, since rrule always\n # returns datetime objects.\n if is_date(dtstart):\n dtstart_repeat = dtstart_repeat.date()\n # Compute matching dtend if applicable\n if dtend == None:\n dtend_repeat = None\n else:\n dtend_repeat = dtend + (dtstart_repeat - dtstart)\n dates.append(self.format_dateinterval(dtstart_repeat, dtend_repeat))\n return dates" ]
[ "0.64756805", "0.6388395", "0.6278105", "0.624782", "0.6238439", "0.617077", "0.6128857", "0.60810596", "0.60660315", "0.60517985", "0.59450334", "0.59196717", "0.58980954", "0.5856224", "0.5839549", "0.5835172", "0.58209354", "0.5815195", "0.5815195", "0.5802644", "0.57646024", "0.57526636", "0.56874585", "0.5677234", "0.56533074", "0.56360507", "0.5630495", "0.5609375", "0.5605391", "0.5582294" ]
0.70858955
0
Get the values for the specified dates.
def get_values(self, dates): ret = [] for d in dates: ret.append(self.data[d]) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_returns(self, dates):\n return get_price_returns(self, dates)", "def get_returns(self, dates):\n return get_price_returns(self, dates)", "def Dates(self):\n data = self.DictData()\n dates = [ row[ \"Date\"] for row in data ]\n return dates", "def get_date_values(self, all_dates):\n\n if self.day_value == 'all':\n working_dates = all_dates[:]\n non_working_dates = []\n elif self.day_value == 'weekdays':\n working_dates, non_working_dates = self.working_days(all_dates)\n elif self.day_value == 'custom':\n working_dates, non_working_dates = self.working_days(all_dates,\n blacklisted_dates)\n\n # we always want the day before the milestone starts to be a working day\n # regardless if it is a weekday or weekend\n # if it was a non working day the ideal effort curve would not decrease\n # by the end of the actual start date\n day_before = all_dates[0]\n if day_before not in working_dates:\n non_working_dates.remove(day_before)\n working_dates.insert(0, day_before)\n # else it must be in working dates already\n\n return working_dates, non_working_dates", "def get_values_by_date(now, request):\n reg_data = get_reg_data(now, request)\n data = {\n \"is_after_7d_before_last_instruction\":\n is_after_7d_before_last_instruction(now, request),\n \"is_after_grade_submission_deadline\":\n is_before_bof_term(now, request),\n \"is_after_last_day_of_classes\":\n not is_before_last_day_of_classes(now, request),\n \"is_after_start_of_registration_display_period\":\n reg_data[\"after_start\"],\n \"is_after_start_of_summer_reg_display_period1\":\n reg_data[\"after_summer1_start\"],\n \"is_after_start_of_summer_reg_display_periodA\":\n reg_data[\"after_summerA_start\"],\n \"is_before_eof_7days_of_term\":\n is_before_eof_7d_after_class_start(now, request),\n \"is_before_end_of_finals_week\":\n is_before_eof_finals_week(now, request),\n \"is_before_end_of_registration_display_period\":\n reg_data[\"after_start\"],\n \"is_before_end_of_summer_reg_display_periodA\":\n reg_data[\"after_summerA_start\"],\n \"is_before_end_of_summer_reg_display_period1\":\n reg_data[\"after_summer1_start\"],\n \"is_before_first_day_of_term\":\n is_before_bof_term(now, request),\n \"is_before_last_day_of_classes\":\n is_before_last_day_of_classes(now, request),\n \"myplan_peak_load\": during_myplan_peak_load(now, request),\n \"reg_period1_started\": reg_data[\"period1_started\"],\n \"is_summer\": is_in_summer_quarter(request),\n \"is_after_summer_b\": is_in_summer_b_term(request),\n \"in_coursevel_fetch_window\": in_coursevel_fetch_window(request),\n \"within_grading_period\": within_grading_period(request),\n \"comparison_date\": get_comparison_datetime(request)\n }\n try:\n last_term = get_previous_quarter(request)\n data[\"current_summer_term\"] = \"{},summer\".format(last_term.year)\n data[\"last_term\"] = \"{},{}\".format(last_term.year, last_term.quarter)\n except Exception:\n log_err(logger, \"get_previous_quarter\", traceback, request)\n return data", "def get_values_between_dates(self, date_start=None, date_end=None, dt_max=0.0, start_strict=False, end_strict=True):\n \n if start_strict:\n start_diff_operator = '>'\n else:\n start_diff_operator = '>='\n if end_strict:\n end_diff_operator = '<'\n else:\n end_diff_operator = '<='\n \n if dt_max < 0.:\n raise Exception('dt_max must be > 0')\n \n if (date_start is not None) and (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) AND datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%(start_diff_operator, end_diff_operator), \\\n params=[self.date2str(date_start-timedelta(dt_max)), self.date2str(date_end+timedelta(dt_max))])\n elif (date_start is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%start_diff_operator, \\\n params=[self.date2str(date_start-timedelta(dt_max))])\n elif (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%end_diff_operator, \\\n params=[self.date2str(date_end+timedelta(dt_max))])\n else:\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO ORDER BY datetime(date_data)\")", "def get_metric_for_all_dates(inargs, exp_id):\n date_list = []\n # Loop over dates and collect data\n for date in h.make_timelist(inargs.date_start, inargs.date_stop,\n inargs.hours_inc):\n date_list.append(get_metric_for_one_day(inargs, exp_id, date))\n return np.array(date_list)", "def get_returns(self, start_date=None, end_date=None, stocks=None):\n if stocks is None:\n stocks = self.stocks\n\n if start_date is None:\n start_date = self.dates[0]\n\n if end_date is None:\n end_date = self.dates[-1]\n\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n dates_to_check = self.dates[self.dates.index(start_date): self.dates.index(end_date) + 1]\n\n stock_money = []\n\n for date in dates_to_check:\n stock_money += [self.get_day_returns(stocks, date)]\n\n stock_money = pd.DataFrame({\"stock value\": stock_money}).set_index([self.dates])\n\n return_info = join_features(stock_money, self.cash)\n return_info['value'] = return_info['cash'] + return_info['stock value']\n\n return return_info", "def _series_date_value_iter(data_points: List[dict]) -> Generator:\n for data_point in data_points:\n yield data_point[\"generic:ObsDimension\"][\"@value\"], data_point[\"generic:ObsValue\"][\"@value\"]", "def _get_all_data(self, start_date, end_date):\n return [self._prep_data(self._get_input_data(var, start_date,\n end_date),\n self.var.func_input_dtype)\n for n, var in enumerate(self.variables)]", "def get_daily_goals(self, surface, dates):\n iterator = DjuDay.objects.filter(day__in=dates).order_by('day')\n return [\n [x.day, x.average * DJU_TO_KWH * KWH_TO_EUROS * surface] for x in iterator\n ]", "def getPurchaseDates(self):\n\t\treturn self.dateList", "def dates(self):\n dates = []\n d = self.date_a\n while d < self.date_b:\n dates.append(d)\n d += datetime.timedelta(1)\n\n return dates", "def getComparableDateValues(self, days):\n dates = []\n for i in days:\n date = i[:10]\n dates.append(date)\n return dates", "def process_dates(reports: dict):\n confirmed = reports[CONFIRMED]\n deaths = reports[DEATHS]\n recovered = reports[RECOVERED]\n\n dates1 = confirmed.columns.to_numpy()[4:]\n dates2 = deaths.columns.to_numpy()[4:]\n dates3 = deaths.columns.to_numpy()[4:]\n assert np.array_equal(dates1, dates2)\n assert np.array_equal(dates2, dates3)\n\n return dates1.tolist()", "def daily_price():\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']", "def get_dates(self):\r\n return self.__dates", "def get_dates(self, candidates=None, start=None, end=None):\n if candidates is not None:\n return [date for date in candidates if date in self.data]\n if start is None:\n start = self.first_date\n if end is None:\n end = self.last_date\n return [date for date in self.data if start <= date <= end]", "def daily_values(self) -> List[RecipeObjectNutrientsCalories]:\n return self._daily_values", "def getValuesBetween(self, startIndex, endIndex):\r\n assert (type(startIndex) == type(endIndex))\r\n if isinstance(startIndex, datetime.datetime):\r\n startIndex = self._dateTimeHash.get(startIndex, None)\r\n endIndex = self._dateTimeHash.get(endIndex, None)\r\n if startIndex is None or endIndex is None:\r\n return np.asarray([np.nan])\r\n if startIndex < endIndex:\r\n if endIndex + 1 > len(self._listTimePoints):\r\n return np.asarray([np.nan])\r\n vals = self._listTimePoints[startIndex:endIndex + 1, -1:]\r\n else:\r\n if startIndex + 1 > len(self._listTimePoints)-1:\r\n return np.asarray([np.nan])\r\n vals = self._listTimePoints[endIndex:startIndex + 1, -1:]\r\n for mask in self._maskFunctions:\r\n getVals = lambda x : mask(x[0])\r\n vals = np.apply_along_axis(getVals, axis=1, arr=vals)\r\n return vals", "def dates(start, end):\n \n sel4 = [\n func.min(Measurement.tobs),\n func.max(Measurement.tobs),\n func.avg(Measurement.tobs),]\n\n if end is None: \n start_date = dt.datetime.strptime(start , '%Y-%m-%d')\n temp_analysis = session.query(*sel4).filter(Measurement.date >= start_date).all() \n else\n end_date = dt.datetime.strptime(end , '%Y-%m-%d')\n temp_analysis = session.query(*sel4).filter(Measurement.date.between (start_date, end_date)).all() \n\n# Create a dictionary from the row data and append to a list of all_dates\n all_dates = []\n for Measurement.tobs in temp_analysis:\n date_dict = {}\n date_dict['TMIN'] = func.min(Measurement.tobs)\n date_dict['TMAX'] = func.max(Measurement.tobs)\n date_dict['TAVG'] = func.avg(Measurement.tobs)\n all_dates.append(date_dict)\n\n return jsonify(date_dict)", "def dates(self):\n pass", "def get_dates():\n return {\n \"years\": range(datetime.date.today().year, datetime.date.today().year + 5),\n \"months\": range(1, 13),\n \"days\": range(1, 32)\n }", "def get_udis_series(initial_date: str, end_date:str) -> dict:\n\n url = f\"{BANXICO_URL}/{BANXICO_UDIS_SERIE}/datos/{initial_date}/{end_date}\"\n udis_response = _request_handler.get(url, headers=_headers)\n udis_values_per_day = {}\n response = {}\n if udis_response:\n name = udis_response.get(\"bmx\", {}).get(\"series\", [])[0].get(\"titulo\", \"\")\n dates = udis_response.get(\"bmx\", {}).get(\"series\", [])[0].get(\"datos\", \"\")\n if dates:\n for date in dates:\n udis_values_per_day[date.get(\"fecha\", \"\")] = float(date.get(\"dato\"))\n\n max_udi_value = (max(dates, key=lambda x:float(x.get(\"dato\", -1))))\n min_udi_value = (min(dates, key=lambda x:float(x.get(\"dato\", -1))))\n average_udi = float(sum(float(d['dato']) for d in dates)) / len(dates)\n response= {\n \"name\": name,\n \"average_udi_value\": average_udi,\n \"max_udi_value\": {\n \"value\": float(max_udi_value.get(\"dato\", -1)),\n \"date\": max_udi_value.get(\"fecha\", -1)\n },\n \"min_udi_value\":{\n \"value\": float(min_udi_value.get(\"dato\", -1)),\n \"date\": min_udi_value.get(\"fecha\", -1)\n },\n \"dates_udis\": udis_values_per_day\n }\n\n return response\n else:\n return {}", "def _extract_series_date_value_mapping(series):\n return {date: value for date, value in _series_date_value_iter(_extract_data_points_from_series(series))}", "def getSelectedDates(view):\n context = aq_inner(view.context)\n request = view.request\n ctx_selected_dates = request.SESSION.get('rendezvous', {})\n uid = context.UID()\n if uid in ctx_selected_dates:\n return ctx_selected_dates[uid].keys()\n else:\n return context.getPropositionsByDates().keys()", "def fetch_daterange(self, start_date, end_date=None, table='fashion'):\n\n if end_date is None:\n end_date = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n\n end_date_obj = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')\n end_day = '{:04d}-{:02d}-{:02d}'.format(end_date_obj.year, \n end_date_obj.month, \n end_date_obj.day)\n\n start_date_obj = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day)\n \n record_lookup_stmt = \"SELECT * FROM {} WHERE date=%s AND t>%s and t<%s\".format(table)\n \n record_list = []\n while curr_day <= end_day: \n record_list += self.session.execute(record_lookup_stmt, [curr_day, \n start_date,\n end_date])\n start_date_obj += timedelta(days=1)\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day) \n\n return record_list", "def getDates(self, startDate, endDate, endpoint=False):\n\n if self._type == 'M2': # If MERRA2 data type\n return self._merra2Dates(startDate, endDate, endpoint)\n else:\n raise Exception('Data type not supported : {}'.format(self._type))", "def converted_values(self):\n for i in range(11):\n lista = []\n for j in self.clean_double_values():\n if self.get_days_index()[i+1] > j[1] >= \\\n self.get_days_index()[i]:\n lista.append(self.calculate_data_value(j[0].item()))\n if not self.data_type_name == 'rain':\n yield lista\n else:\n yield [sum(lista)]", "def get_records_date(start_date, end_date):\n start = minus_one(start_date)\n temp = pd.read_sql_query(_query['by_date'],\n connect(),\n params=[start, end_date])\n return temp" ]
[ "0.6465752", "0.6465752", "0.6337017", "0.63190037", "0.6302423", "0.63015485", "0.6172592", "0.6114428", "0.6096258", "0.60900587", "0.60705006", "0.6069238", "0.60506237", "0.6049215", "0.6042307", "0.59802204", "0.59703356", "0.59669536", "0.5939113", "0.59387743", "0.5902314", "0.58783895", "0.5876928", "0.58576906", "0.5824534", "0.58121157", "0.57763004", "0.5761013", "0.5753088", "0.5728187" ]
0.8803145
0
Calculate the Pearson correlation coefficient between this series and another on all days when they both have values. Uses scipy.stats.pearsonr to calculate it.
def correlation(self, other): dates=self.get_dates(other.get_dates()) #print(len(self.get_values(dates))) #print(len(other.get_values(dates))) #print(self.get_values(dates)) r,p=stats.pearsonr(self.get_values(dates), other.get_values(dates)) return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pearsonCorrelation(x, y):\n\tsum_sq_x = 0\n\tsum_sq_y = 0\n\tsum_coproduct = 0\n\tmean_x = x[0]\n\tmean_y = y[0]\n\tif len(x) != len(y):\n\t\traise StatsError(\"Data sets are of different lengths.\")\n\tn = len(x)\n\tfor i in range(1,n):\n\t\tsweep = i / (i+1.0)\n\t\tdelta_x = x[i] - mean_x\n\t\tdelta_y = y[i] - mean_y\n\t\tsum_sq_x += delta_x * delta_x * sweep\n\t\tsum_sq_y += delta_y * delta_y * sweep\n\t\tsum_coproduct += delta_x * delta_y * sweep\n\t\tmean_x += delta_x / (i+1.0)\n\t\tmean_y += delta_y / (i+1.0)\n\tpop_sd_x = math.sqrt( sum_sq_x / n )\n\tpop_sd_y = math.sqrt( sum_sq_y / n )\n\tcov_x_y = sum_coproduct / n\n\tr = cov_x_y / (pop_sd_x * pop_sd_y)\n\tz = math.fabs(r) * math.sqrt(n) / math.sqrt(2.0)\n\tp = Prob_Z(z)\n\tif not (0.0 <= p <= 1.0):\n\t\traise StatsError(\"Invalid P-value of %r.\" % r)\n\treturn (r, p, n)", "def calculate_correlation(self):\n self.network.index_nodes()\n self._calculate_dist()\n pearson_correlation, pearson_pvalue = scipy.stats.pearsonr(self.dist[:,0], self.dist[:,1])\n spearman_correlation, spearman_pvalue = scipy.stats.spearmanr(self.dist[:,0], self.dist[:,1])\n return pearson_correlation, pearson_pvalue, spearman_correlation, spearman_pvalue", "def cor(x, y):\n scaler = TimeSeriesScalerMeanVariance()\n x_norm = scaler.fit_transform(x)\n y_norm = scaler.fit_transform(y)\n pcc = np.mean(x_norm * y_norm) # Pearson correlation coefficients\n d = np.sqrt(2.0 * (1.0 - pcc + 1e-9)) # correlation-based similarities\n return np.sum(d)", "def _pearsonr(x: xr.DataArray, y: xr.DataArray, monitor: Monitor) -> xr.Dataset:\n with monitor.starting(\"Calculate Pearson correlation\", total_work=6):\n n = len(x['time'])\n\n xm, ym = x - x.mean(dim='time'), y - y.mean(dim='time')\n xm['time'] = [i for i in range(0, len(xm.time))]\n ym['time'] = [i for i in range(0, len(ym.time))]\n xm_ym = xm * ym\n r_num = xm_ym.sum(dim='time')\n xm_squared = np.square(xm)\n ym_squared = np.square(ym)\n r_den = np.sqrt(xm_squared.sum(dim='time') * ym_squared.sum(dim='time'))\n r_den = r_den.where(r_den != 0)\n r = r_num / r_den\n\n # Presumably, if abs(r) > 1, then it is only some small artifact of floating\n # point arithmetic.\n # At this point r should be a lon/lat dataArray, so it should be safe to\n # load it in memory explicitly. This may take time as it will kick-start\n # deferred processing.\n # Comparing with NaN produces warnings that can be safely ignored\n default_warning_settings = np.seterr(invalid='ignore')\n with monitor.child(1).observing(\"task 1\"):\n negativ_r = r.values < -1.0\n with monitor.child(1).observing(\"task 2\"):\n r.values[negativ_r] = -1.0\n with monitor.child(1).observing(\"task 3\"):\n positiv_r = r.values > 1.0\n with monitor.child(1).observing(\"task 4\"):\n r.values[positiv_r] = 1.0\n np.seterr(**default_warning_settings)\n r.attrs = {'description': 'Correlation coefficients between'\n ' {} and {}.'.format(x.name, y.name)}\n\n df = n - 2\n t_squared = np.square(r) * (df / ((1.0 - r.where(r != 1)) * (1.0 + r.where(r != -1))))\n\n prob = df / (df + t_squared)\n with monitor.child(1).observing(\"task 5\"):\n prob_values_in = prob.values\n with monitor.child(1).observing(\"task 6\"):\n prob.values = betainc(0.5 * df, 0.5, prob_values_in)\n prob.attrs = {'description': 'Rough indicator of probability of an'\n ' uncorrelated system producing datasets that have a Pearson'\n ' correlation at least as extreme as the one computed from'\n ' these datsets. Not entirely reliable, but reasonable for'\n ' datasets larger than 500 or so.'}\n\n retset = xr.Dataset({'corr_coef': r,\n 'p_value': prob})\n return retset", "def series_corr(series, other, method=\"pearson\", min_periods=None):\n op = DataFrameCorr(other=other, method=method, min_periods=min_periods)\n return op(series)", "def pearsoncor(X, Y, code = 0):\r\n n = len(X)\r\n sx = ssd(X)\r\n sy = ssd(Y)\r\n xbar = float(sum(X)) / n\r\n ybar = float(sum(Y)) / n\r\n if code == 0:\r\n return sum([(x - xbar) * (y - ybar) for x, y in zip (X,Y)])/(sx * sy*(n-1.0))\r\n else:\r\n numer = sum([x*y for x,y in zip(X,Y)]) - n*(xbar * ybar)\r\n denom = sqrt((sum([x*x for x in X]) - n* xbar**2)*(sum([y*y for y in Y]) -n* ybar**2))\r\n return (numer /denom)", "def _pearson_correlation_coeff(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.rvalue", "def pearson(x, y):\n data = np.vstack((x, y))\n ms = data.mean(axis=1)[(slice(None, None, None), None)]\n datam = data - ms\n datass = np.sqrt(ss(datam, axis=1))\n temp = np.dot(datam[1:], datam[0].T)\n rs = temp / (datass[1:] * datass[0])\n return rs", "def pearsons(measurements_x, measurements_y):\n print(measurements_x.name + ' to ' + measurements_y.name + ' with Pearson')\n coefficient, p_value = ss.pearsonr(measurements_x, measurements_y)\n return coefficient, p_value, r'$r$'", "def correlation_test(x1, x2):\r\n x = pd.DataFrame([x1, x2]).T.dropna().values\r\n return pearsonr(x[:, 0], x[:, 1])", "def Corr(x,y):\n \n cocoeff1 = np.empty((y.shape[1],y.shape[2]))\n cocoeff2 = np.empty((y.shape[1],y.shape[2]))\n for i in xrange(y.shape[1]):\n for j in xrange(y.shape[2]):\n cocoeff1[i,j],cocoeff2[i,j] = sts.pearsonr(x[:,i,j],y[:,i,j])\n \n print 'Completed: Correlation calculations!'\n \n return cocoeff1, cocoeff2", "def pearson_correlation(X, Y):\n # should not need X_norm_squared because if you could precompute that as\n # well as Y, then you should just pre-compute the output and not even\n # call this function.\n if X is Y:\n X = Y = np.asanyarray(X)\n else:\n X = np.asanyarray(X)\n Y = np.asanyarray(Y)\n\n if X.shape[1] != Y.shape[1]:\n raise ValueError(\"Incompatible dimension for X and Y matrices\")\n\n XY = ssd.cdist(X, Y, 'correlation', 2)\n\n return 1 - XY", "def _pearson_r(x, y):\n if _allequal(x) or _allequal(y):\n return np.nan\n\n return (np.mean(x * y) - np.mean(x) * np.mean(y)) / np.std(x) / np.std(y)", "def _compute_pearson(self, user1_id, user2_id):\n shared_movies = self.get_shared_ratings(user1_id, user2_id)\n ratings1, ratings2 = shared_movies['rating_x'].as_matrix(), shared_movies['rating_y'].as_matrix()\n mean_user1, mean_user2 = self.get_mean_user_rating(user1_id), self.get_mean_user_rating(user2_id)\n return pearson_correlation(ratings1, ratings2, mean_user1, mean_user2)", "def _pearson_r(x, y):\n return (np.mean(x * y) - np.mean(x) * np.mean(y)) / np.std(x) / np.std(y)", "def pearson_correlation(sim, obs, dim=\"time\"):\n # wrap numpy function\n kwargs = dict(\n input_core_dims=[[dim], [dim]], dask=\"parallelized\", output_dtypes=[float]\n )\n pearsonr = xr.apply_ufunc(_pearson_correlation, sim, obs, **kwargs)\n pearsonr.name = \"pearson_coef\"\n return pearsonr", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_correlation(pairs: Collection[Tuple[float, float]]) -> float:\n pairs = list(pairs)\n n = len(pairs)\n \n # Compute the means\n x = sum(t[0] for t in pairs) / n\n y = sum(t[1] for t in pairs) / n\n\n # Compare the overall sums\n numerator = sum(\n (xi - x) * (yi - y)\n for xi, yi in pairs\n )\n denominator = sum((xi - x) ** 2 for xi, yi in pairs)\n denominator *= sum((yi - y) ** 2 for xi, yi in pairs)\n denominator **= (1/2)\n\n try:\n return (1 + numerator / denominator) * 50\n except ZeroDivisionError:\n return 0", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x, y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x, y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x, y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x, y)\n\n # Return entry [0,1]\n return corr_mat[0, 1]", "def pearson_r(x, y):\r\n # Compute correlation matrix: corr_mat\r\n \r\n corr_mat=np.corrcoef(x,y)\r\n\r\n # Return entry [0,1]\r\n return corr_mat[0,1]", "def correlation(x, y):\n # Use data in natural key-value form\n xs = {}\n for (_, date, value) in x:\n xs[date] = value\n ys = {}\n for (_, date, value) in y:\n ys[date] = value\n\n # Fill 0s for missing dates\n for d in set(ys.keys()) - set(xs.keys()):\n xs[d] = 0\n for d in set(xs.keys()) - set(ys.keys()):\n ys[d] = 0\n\n x_avg = sum(xs.values()) / len(xs.values())\n y_avg = sum(ys.values()) / len(ys.values())\n\n # Pearson correlation coefficient for given sample\n covariance = 0\n x_variance = 0\n y_variance = 0\n for d in xs.keys():\n x_diff = xs[d] - x_avg\n y_diff = ys[d] - y_avg\n covariance += x_diff * y_diff\n x_variance += math.pow(x_diff, 2)\n y_variance += math.pow(y_diff, 2)\n if x_variance == 0:\n return -1\n elif y_variance == 0:\n return -2\n return covariance / (math.sqrt(x_variance) * math.sqrt(y_variance))", "def pearson_cor_co(X, Y, n):\n\n mean_X = get_mean(X,n)\n mean_Y = get_mean(Y,n)\n\n std_X = get_std_dev(X,n)\n std_Y = get_std_dev(Y,n)\n\n err_X = []\n err_Y = [] \n for i in range(0,n):\n err_X.append(X[i] - mean_X)\n err_Y.append(Y[i] - mean_Y)\n\n return sum(map( multiply, err_X, err_Y))/(n*std_X*std_Y)", "def pearson_r2(self):\n y_pred, y_true = self._finalize_labels_and_prediction()\n\n return pearsonr(y_true[:, 0].numpy(), y_pred[:, 0].numpy())[0] ** 2" ]
[ "0.7360399", "0.7208227", "0.7155166", "0.713183", "0.71158606", "0.711191", "0.70218587", "0.6996039", "0.6992368", "0.69837546", "0.6965201", "0.6962038", "0.69537663", "0.6944156", "0.68943065", "0.68788373", "0.6865398", "0.6854346", "0.6850651", "0.6850651", "0.6850651", "0.68437165", "0.68437165", "0.68437165", "0.68437165", "0.6839021", "0.6818596", "0.67923653", "0.6755447", "0.674332" ]
0.8398194
0
Move all nodes in cluster1 to cluster2. And kill cluster1.
def combine_cluster_from_to(cl1, cl2): for node_id in clusters[cl1]: nodes[node_id] = cl2 clusters[cl2].append(node_id) clusters.pop(cl1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def leave_cluster(\n self,\n nodes_names,\n ):\n for node_name in nodes_names:\n # Gets the node IP address.\n ip = self.get_node_ip(node_name)\n\n # Gets the token.\n docker_utils.swarm_leave(\n hostname=ip,\n ssh_port=SSH_PORT,\n ssh_username=self.get_ssh_username(node_name),\n ssh_private_key_file=self.get_ssh_private_key_file(node_name),\n executor=node_name,\n logger=self._logger,\n )", "def two_clusters_reconfiguration(self):\n\n self.show_step(1)\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(2)\n cluster_id_1 = self.fuel_web.create_cluster(\n name=\"env1\",\n mode=settings.DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": settings.NEUTRON_SEGMENT_TYPE,\n }\n )\n cluster_id_2 = self.fuel_web.create_cluster(\n name=\"env2\",\n mode=settings.DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": settings.NEUTRON_SEGMENT_TYPE,\n }\n )\n\n self.fuel_web.update_nodes(\n cluster_id_1,\n {\n 'slave-01': ['compute'],\n 'slave-02': ['controller']\n })\n\n self.fuel_web.update_nodes(\n cluster_id_2,\n {\n 'slave-03': ['compute'],\n 'slave-04': ['controller']\n })\n\n networks_1 = self.fuel_web.client.get_networks(\n cluster_id_1)[\"networks\"]\n self.change_default_range(networks_1,\n number_excluded_ips=30,\n cut_from_start=True)\n helpers.wait(lambda: not self.is_update_dnsmasq_running(\n self.fuel_web.client.get_tasks()), timeout=60,\n timeout_msg=\"Timeout exceeded while waiting for task \"\n \"'update_dnsmasq' is finished!\")\n floating_list = [self.fuel_web.get_floating_ranges()[0][0]]\n networking_parameters = {\n \"floating_ranges\": floating_list}\n self.fuel_web.client.update_network(\n cluster_id_1,\n networks=networks_1,\n networking_parameters=networking_parameters\n )\n\n networks_2 = self.fuel_web.client.get_networks(\n cluster_id_2)[\"networks\"]\n self.change_default_range(networks_2,\n number_excluded_ips=30,\n cut_from_start=False)\n helpers.wait(lambda: not self.is_update_dnsmasq_running(\n self.fuel_web.client.get_tasks()), timeout=60,\n timeout_msg=\"Timeout exceeded while waiting for task \"\n \"'update_dnsmasq' is finished!\")\n floating_list = [self.fuel_web.get_floating_ranges()[0][1]]\n\n vlan_range_1 = self.fuel_web.client.get_networks(\n cluster_id_1)[\"networking_parameters\"][\"vlan_range\"]\n vlan_range_2 = [vlan_range_1[-1] + 1, vlan_range_1[-1] + 31]\n\n networking_parameters = {\n \"floating_ranges\": floating_list,\n \"vlan_range\": vlan_range_2}\n self.fuel_web.client.update_network(\n cluster_id_2,\n networks=networks_2,\n networking_parameters=networking_parameters\n )\n self.show_step(3)\n self.fuel_web.verify_network(cluster_id_1)\n self.show_step(4)\n self.fuel_web.verify_network(cluster_id_2)\n self.show_step(5)\n self.fuel_web.deploy_cluster_wait(cluster_id_1, check_services=False)\n self.show_step(6)\n self.fuel_web.run_ostf(cluster_id=cluster_id_1)\n self.show_step(7)\n self.fuel_web.deploy_cluster_wait(cluster_id_2, check_services=False)\n self.show_step(8)\n self.fuel_web.run_ostf(cluster_id=cluster_id_2)\n\n self.show_step(9)\n config_new = utils.get_config_template('nova_cpu')\n structured_config = get_structured_config_dict(config_new)\n self.fuel_web.client.upload_configuration(config_new,\n cluster_id_1,\n role=\"controller\")\n\n service_name = \"nova-scheduler\"\n\n controller_env_1 = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id_1, ['controller'])\n controller_env_2 = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id_2, ['controller'])\n uptimes = self.get_service_uptime(controller_env_1, service_name)\n task = self.fuel_web.client.apply_configuration(cluster_id_1,\n role=\"controller\")\n\n self.show_step(10)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(11)\n self.check_service_was_restarted(controller_env_1,\n uptimes,\n service_name)\n\n self.show_step(12)\n self.check_config_on_remote(controller_env_1, structured_config)\n\n self.show_step(13)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id_1))\n\n self.check_overcommit_ratio(os_conn, cluster_id_1)\n\n self.show_step(14)\n config_revert = utils.get_config_template('nova_cpu_old')\n structured_config_revert = get_structured_config_dict(config_revert)\n self.fuel_web.client.upload_configuration(config_revert,\n cluster_id_2,\n role=\"controller\")\n uptimes = self.get_service_uptime(controller_env_2, service_name)\n task = self.fuel_web.client.apply_configuration(cluster_id_2,\n role=\"controller\")\n self.show_step(15)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(16)\n self.check_service_was_restarted(controller_env_2,\n uptimes,\n service_name)\n\n self.show_step(17)\n self.check_config_on_remote(controller_env_2,\n structured_config_revert)\n\n self.env.make_snapshot(\"two_clusters_reconfiguration\")", "def _merge_clusters(self, cl1, cl2):\n label = ''\n to_delete = ''\n if cl1 < cl2:\n label = cl1\n to_delete = cl2\n else:\n label = cl2\n to_delete = cl1\n to_keep = self.get_cluster(label)\n to_remove = self._clusters.pop(to_delete)\n to_keep.merge(to_remove)", "def shutdown_cluster(self):\n self.cluster.shutdown()", "def delete_cluster(self):", "def remove_nodes_from_cluster(self, nodes, redeploy=True,\n check_services=False):\n self.fuel_web.update_nodes(\n self.cluster_id,\n nodes,\n pending_addition=False, pending_deletion=True,\n )\n if redeploy:\n self.fuel_web.deploy_cluster_wait(self.cluster_id,\n check_services=check_services)", "def cluster_shutdown():\n map(shutdown, cluster)", "def _rename_clusters(self):\n all_clusters = []\n temp_clusters = self._clusters.copy()\n for clu in temp_clusters:\n all_clusters.append(self._clusters.pop(clu))\n idx = 0\n for clu in all_clusters:\n label = 'S' + str(idx)\n clu.rename(label)\n self._clusters[label] = clu\n idx += 1", "def cluster_nodes_recycle(self):\n\n self._client.post(\n \"{}/recycle\".format(LKECluster.api_endpoint), model=self\n )", "def fail_without_replace_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3)\n node1, node2, node3 = cluster.nodelist()\n cluster.seeds.remove(node3)\n NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')\n if DISABLE_VNODES:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 1})\n else:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS})\n cluster.start()\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n mark = None\n for auto_bootstrap in (True, False):\n debug(\"Stopping node 3.\")\n node3.stop(gently=False)\n\n # completely delete the data, commitlog, and saved caches\n for d in chain([os.path.join(node3.get_path(), \"commitlogs\")],\n [os.path.join(node3.get_path(), \"saved_caches\")],\n node3.data_directories()):\n if os.path.exists(d):\n rmtree(d)\n\n node3.set_configuration_options(values={'auto_bootstrap': auto_bootstrap})\n debug(\"Starting node 3 with auto_bootstrap = {val}\".format(val=auto_bootstrap))\n node3.start(wait_other_notice=False)\n node3.watch_log_for('Use cassandra.replace_address if you want to replace this node', from_mark=mark, timeout=20)\n mark = node3.mark_log()", "def test_cluster_works_fine_after_deleting_CA_folder(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n random_nodes = random.sample(self.servers[1:self.nodes_init], 1)\n self.log.info(\"Uploading root certs from {0}\".format(random_nodes[0]))\n self.x509.upload_root_certs(random_nodes[0])\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n self.x509.delete_unused_out_of_the_box_CAs(server=self.master)\n self.x509.upload_client_cert_settings(server=self.master)\n shell = RemoteMachineShellConnection(random_nodes[0])\n shell.remove_directory(self.x509.install_path + x509main.CHAINFILEPATH +\n \"/\" + x509main.TRUSTEDCAPATH)\n shell.disconnect()\n\n failover_nodes = random_nodes\n nodes_in_cluster = self.servers[:self.nodes_init]\n for operation in [\"recovery\", \"out\"]:\n shell = RemoteMachineShellConnection(failover_nodes[0])\n shell.stop_server()\n self.cluster.async_failover(self.servers[:self.nodes_init],\n failover_nodes,\n graceful=False)\n self.wait_for_failover_or_assert(1)\n if operation == \"out\":\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n rest = RestConnection(self.master)\n otp_nodes = []\n ejected_nodes = []\n for node in nodes_in_cluster:\n otp_nodes.append('ns_1@'+node.ip)\n for node in failover_nodes:\n ejected_nodes.append('ns_1@' + node.ip)\n status = rest.rebalance(otpNodes=otp_nodes, ejectedNodes=ejected_nodes)\n if not status:\n shell.start_server(failover_nodes[0])\n self.fail(\"rebalance/failover failed\")\n CbServer.use_https = https_val\n nodes_in_cluster = nodes_in_cluster.remove(failover_nodes[0])\n shell.start_server(failover_nodes[0])\n if operation == \"recovery\":\n rest = RestConnection(self.master)\n for node in failover_nodes:\n rest.set_recovery_type(\"ns_1@\" + node.ip, recoveryType=\"delta\")\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n self.auth(servers=nodes_in_cluster)", "def cluster_leaves(self, node1, node2, new_cluster):\n self.tree, self.cluster_map = _cluster_leaves(self.tree, self.cluster_map, \n self.work_dist_matrix, node1, node2, self.class_map, new_cluster)\n\n self.work_dist_matrix = _update_distances(self.work_dist_matrix, node1, node2, new_cluster)", "def _replace_node_test(self, gently):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n session.default_timeout = 45\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node, query should not work with consistency 3\n debug(\"Stopping node 3.\")\n node3.stop(gently=gently, wait_other_notice=True)\n\n debug(\"Testing node stoppage (query should fail).\")\n with self.assertRaises(NodeUnavailable):\n try:\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n session.execute(query)\n except (Unavailable, ReadTimeout):\n raise NodeUnavailable(\"Node could not be queried.\")\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=True)\n\n debug(\"Verifying tokens migrated sucessfully\")\n moved_tokens = node4.grep_log(\"Token .* changing ownership from /127.0.0.3 to /127.0.0.4\")\n debug(\"number of moved tokens: {}\".format(len(moved_tokens)))\n self.assertEqual(len(moved_tokens), num_tokens)\n\n # check that restarting node 3 doesn't work\n debug(\"Try to restart node 3 (should fail)\")\n node3.start(wait_other_notice=False)\n collision_log = node1.grep_log(\"between /127.0.0.3 and /127.0.0.4; /127.0.0.4 is the new owner\")\n debug(collision_log)\n self.assertEqual(len(collision_log), 1)\n node3.stop(gently=False)\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def test_three_nodes_cluster_teardown(three_nodes_cluster, ssh_key,\n test_config, module_tmpdir, logger):\n node1, node2, node3 = three_nodes_cluster\n nodes_list = [node1, node2, node3]\n logger.info('Asserting cluster status')\n _assert_cluster_status(node1.client, logger)\n\n logger.info('Installing example deployment')\n example = get_example_deployment(node1, ssh_key, logger,\n 'cluster_teardown', test_config)\n example.inputs['server_ip'] = node1.ip_address\n example.upload_and_verify_install()\n\n logger.info('Removing example deployment')\n example.uninstall()\n logger.info('Removing cluster')\n for node in nodes_list:\n for config_name in ['manager', 'rabbit', 'db']:\n node.run_command('cfy_manager remove -v -c /etc/cloudify/'\n '{0}_config.yaml'.format(config_name))\n\n credentials = _get_new_credentials()\n logger.info('New credentials: %s', credentials)\n\n for node in nodes_list:\n node.install_config = copy.deepcopy(node.basic_install_config)\n\n logger.info('Installing Cloudify cluster again')\n run_cluster_bootstrap(nodes_list, nodes_list, nodes_list,\n skip_bootstrap_list=[], pre_cluster_rabbit=True,\n high_security=True, use_hostnames=False,\n tempdir=module_tmpdir, test_config=test_config,\n credentials=credentials)\n node1.download_rest_ca(force=True)\n\n logger.info('Asserting cluster status')\n _assert_cluster_status(node1.client, logger)", "def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()", "def multiple_repair_test(self):\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n session = self.patient_cql_connection(node1)\n create_ks(session, 'ks', 3)\n create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})\n\n debug(\"insert data\")\n\n insert_c1c2(session, keys=range(1, 50), consistency=ConsistencyLevel.ALL)\n node1.flush()\n\n debug(\"bringing down node 3\")\n node3.flush()\n node3.stop(gently=False)\n\n debug(\"inserting additional data into node 1 and 2\")\n insert_c1c2(session, keys=range(50, 100), consistency=ConsistencyLevel.TWO)\n node1.flush()\n node2.flush()\n\n debug(\"restarting and repairing node 3\")\n node3.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node3.repair()\n else:\n node3.nodetool(\"repair -par -inc\")\n\n # wait stream handlers to be closed on windows\n # after session is finished (See CASSANDRA-10644)\n if is_win:\n time.sleep(2)\n\n debug(\"stopping node 2\")\n node2.stop(gently=False)\n\n debug(\"inserting data in nodes 1 and 3\")\n insert_c1c2(session, keys=range(100, 150), consistency=ConsistencyLevel.TWO)\n node1.flush()\n node3.flush()\n\n debug(\"start and repair node 2\")\n node2.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node2.repair()\n else:\n node2.nodetool(\"repair -par -inc\")\n\n debug(\"replace node and check data integrity\")\n node3.stop(gently=False)\n node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042))\n cluster.add(node5, False)\n node5.start(replace_address='127.0.0.3', wait_other_notice=True)\n\n assert_one(session, \"SELECT COUNT(*) FROM ks.cf LIMIT 200\", [149])", "def test_replace_cluster_network(self):\n pass", "def multi_dc_replace_with_rf1_test(self):\n cluster = self.cluster\n cluster.populate([1, 1])\n cluster.start()\n node1, node2 = cluster.nodelist()\n\n node1 = cluster.nodes['node1']\n yaml_config = \"\"\"\n # Create the keyspace and table\n keyspace: keyspace1\n keyspace_definition: |\n CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};\n table: users\n table_definition:\n CREATE TABLE users (\n username text,\n first_name text,\n last_name text,\n email text,\n PRIMARY KEY(username)\n ) WITH compaction = {'class':'SizeTieredCompactionStrategy'};\n insert:\n partitions: fixed(1)\n batchtype: UNLOGGED\n queries:\n read:\n cql: select * from users where username = ?\n fields: samerow\n \"\"\"\n with tempfile.NamedTemporaryFile(mode='w+') as stress_config:\n stress_config.write(yaml_config)\n stress_config.flush()\n node1.stress(['user', 'profile=' + stress_config.name, 'n=10k', 'no-warmup',\n 'ops(insert=1)', '-rate', 'threads=50'])\n\n session = self.patient_cql_connection(node1)\n\n # change system_auth keyspace to 2 (default is 1) to avoid\n # \"Unable to find sufficient sources for streaming\" warning\n if cluster.cassandra_version() >= '2.2.0':\n session.execute(\"\"\"\n ALTER KEYSPACE system_auth\n WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};\n \"\"\")\n\n # Save initial data\n stress_table = 'keyspace1.users'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.TWO)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node2.stop(wait_other_notice=True)\n\n node3 = new_node(cluster, data_center='dc2')\n node3.start(replace_address='127.0.0.2', wait_for_binary_proto=True)\n\n assert_bootstrap_state(self, node3, 'COMPLETED')\n\n # Check that keyspace was replicated from dc1 to dc2\n self.assertFalse(node3.grep_log(\"Unable to find sufficient sources for streaming range\"))\n\n # query should work again with node1 stopped\n node1.stop(wait_other_notice=True)\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node3)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.LOCAL_ONE)", "def decommission(self):\n print \"NODE: Decommissioning node: \" + self.name\n keyspace = env_vars['keyspace']\n timer = Timer.get_timer()\n self.vm.run_command(\"nodetool repair -h %s %s\" % (self.name, keyspace))\n self.vm.run_command(\"nodetool decommission\")\n print \"NODE: %s is decommissioned (took %d secs)\" % (self.name, timer.stop())\n #self.vm.shutdown()", "def update_cluster_merge_across_nodes(request):\n ksm_merge_across_nodes = getattr(\n request.node.cls, \"ksm_merge_across_nodes\"\n )\n\n def fin():\n \"\"\"\n 1) Disable KSM\n \"\"\"\n ll_clusters.updateCluster(\n positive=True, cluster=sla_conf.CLUSTER_NAME[0], ksm_enabled=False\n )\n request.addfinalizer(fin)\n\n assert ll_clusters.updateCluster(\n positive=True,\n cluster=sla_conf.CLUSTER_NAME[0],\n ksm_enabled=True,\n ksm_merge_across_nodes=ksm_merge_across_nodes\n )", "def cluster_reboot(cluster):\n map(reboot, cluster)", "def transfer_skincluster(source_object, target_objects, prune_after = False):\n source_skin_node = get_skincluster_node(source_object)\n assert source_skin_node, 'Skincluster not found in source object.'\n skin_info = get_skincluster_info(source_skin_node)\n joint_list = skin_info['joint_list']\n skin_method = skin_info['skin_method']\n for tgt_obj in target_objects:\n old_tgt_skin_node = get_skincluster_node(tgt_obj)\n if old_tgt_skin_node:\n old_tgt_skin_node.unbind()\n try:\n tgt_skin_node = pm.skinCluster(joint_list, tgt_obj, skinMethod = skin_method)\n except:\n tgt_skin_node = pm.skinCluster(joint_list, tgt_obj)\n pm.copySkinWeights(\n sourceSkin = source_skin_node,\n destinationSkin = tgt_skin_node,\n noMirror = True,\n surfaceAssociation = 'closestPoint',\n influenceAssociation = ['name', 'oneToOne', 'closestJoint'],\n )\n remove_unused_influence(tgt_skin_node)\n\n if prune_after:\n prune_skincluster(tgt_skin_node)", "def multiple_subsequent_repair_test(self):\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n debug(\"Inserting data with stress\")\n node1.stress(['write', 'n=5M', 'no-warmup', '-rate', 'threads=10', '-schema', 'replication(factor=3)'])\n\n debug(\"Flushing nodes\")\n cluster.flush()\n\n debug(\"Waiting compactions to finish\")\n cluster.wait_for_compactions()\n\n if self.cluster.version() >= '2.2':\n debug(\"Repairing node1\")\n node1.nodetool(\"repair\")\n debug(\"Repairing node2\")\n node2.nodetool(\"repair\")\n debug(\"Repairing node3\")\n node3.nodetool(\"repair\")\n else:\n debug(\"Repairing node1\")\n node1.nodetool(\"repair -par -inc\")\n debug(\"Repairing node2\")\n node2.nodetool(\"repair -par -inc\")\n debug(\"Repairing node3\")\n node3.nodetool(\"repair -par -inc\")\n\n # Using \"print\" instead of debug() here is on purpose. The compactions\n # take a long time and don't print anything by default, which can result\n # in the test being timed out after 20 minutes. These print statements\n # prevent it from being timed out.\n print \"compacting node1\"\n node1.compact()\n print \"compacting node2\"\n node2.compact()\n print \"compacting node3\"\n node3.compact()\n\n # wait some time to be sure the load size is propagated between nodes\n debug(\"Waiting for load size info to be propagated between nodes\")\n time.sleep(45)\n\n load_size_in_kb = float(sum(map(lambda n: n.data_size(), [node1, node2, node3])))\n load_size = load_size_in_kb / 1024 / 1024\n debug(\"Total Load size: {}GB\".format(load_size))\n\n # There is still some overhead, but it's lot better. We tolerate 25%.\n expected_load_size = 4.5 # In GB\n assert_almost_equal(load_size, expected_load_size, error=0.25)", "def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()", "def multiple_full_repairs_lcs_test(self):\n cluster = self.cluster\n cluster.populate(2).start(wait_for_binary_proto=True)\n node1, node2 = cluster.nodelist()\n for x in xrange(0, 10):\n node1.stress(['write', 'n=100k', 'no-warmup', '-rate', 'threads=10', '-schema', 'compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=10)', 'replication(factor=2)'])\n cluster.flush()\n cluster.wait_for_compactions()\n node1.nodetool(\"repair -full keyspace1 standard1\")", "def test_add_remove_cluster_resources(cluster_start):\n cluster = cluster_start\n assert ray.global_state.cluster_resources()[\"CPU\"] == 1\n nodes = []\n nodes += [cluster.add_node(num_cpus=1)]\n cluster.wait_for_nodes()\n assert ray.global_state.cluster_resources()[\"CPU\"] == 2\n\n cluster.remove_node(nodes.pop())\n cluster.wait_for_nodes()\n assert ray.global_state.cluster_resources()[\"CPU\"] == 1\n\n for i in range(5):\n nodes += [cluster.add_node(num_cpus=1)]\n cluster.wait_for_nodes()\n assert ray.global_state.cluster_resources()[\"CPU\"] == 6", "def remove_nodes(self, count=1):\n for i in range(count):\n dead_guy = self.all_nodes.pop()\n self.log.info(\"Removing node %s\" % dead_guy.name)\n dead_guy.decommission()\n self.log.info(\"Client %s is removed\" % dead_guy.name)\n self.save_cluster()\n self.inject_hosts_files()", "def changes_while_node_down_test(self):\n debug(\"changes_while_node_down_test()\")\n cluster = self.cluster\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n wait(2)\n session = self.patient_cql_connection(node2)\n\n self.prepare_for_changes(session, namespace='ns2')\n node1.stop()\n wait(2)\n self.make_schema_changes(session, namespace='ns2')\n wait(2)\n node2.stop()\n wait(2)\n node1.start()\n node2.start()\n wait(20)\n self.validate_schema_consistent(node1)", "def cleanup_cluster(self, cluster):\n self.log.info(\"removing xdcr/nodes settings\")\n rest = RestConnection(cluster.get_master_node())\n rest.remove_all_replications()\n rest.remove_all_remote_clusters()\n rest.remove_all_recoveries()\n cluster.cleanup_cluster(\"upgradeXDCR\")", "def automerge_clusters(self):\n all_clusters = self.get_clusters().copy()\n\n if not self._single: # if not in single mode mode\n # initialize the variable to check if some change has happened \n changed = False\n for cl_1 in all_clusters: # cycle over clusters\n c_c1 = all_clusters[cl_1]\n for cl_2 in all_clusters: # inner cycle over clusters\n c_c2 = all_clusters[cl_2]\n # if two clusters have the same speaker and have different \n # cluster identifiers\n if cl_1 != cl_2 and c_c1.get_speaker() != 'unknown' and c_c1.get_speaker() == c_c2.get_speaker() and self._clusters.has_key(cl_1) and self._clusters.has_key(cl_2):\n changed = True\n # merge the clusters an record that something changed\n self._merge_clusters(cl_1, cl_2)\n if changed: # if something has changed\n # rename all the clusters starting from S0\n self._rename_clusters()\n # remove also the old waves and seg files of the old clusters\n shutil.rmtree(self.get_file_basename())\n # rebuild all seg files\n self.generate_seg_file(set_speakers=False)\n # resplit the original wave file according to the new clusters\n self._to_trim()" ]
[ "0.6145484", "0.61324465", "0.5954599", "0.5924748", "0.58999866", "0.5896793", "0.5887858", "0.56757903", "0.56663275", "0.5655591", "0.5650627", "0.5638231", "0.5611794", "0.56020385", "0.559868", "0.5595428", "0.5595001", "0.55839825", "0.55317724", "0.55285084", "0.5525447", "0.55151755", "0.5509048", "0.54984593", "0.54955155", "0.5483921", "0.54733336", "0.5469633", "0.5464469", "0.5454448" ]
0.6545622
0
get all neighbours that are 1 or 2 steps away from node.
def get_2_step_neighbours(node): for i in range(len(node)): yield node[0:i] + (flip(node[i]),) + node[i+1:] for i, j in itertools.permutations(range(len(node)), 2): if i < j: yield node[0:i] + (flip(node[i]),) + node[i+1:j] + (flip(node[j]),) + node[j+1:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_further_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) > 1 or abs(x-i)+abs(y-j) == 0: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def neighbours(self, node: list) -> list:\n dirs = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n neighbours = []\n for direction in dirs:\n neighbour = (node[0] + direction[0], node[1] + direction[1])\n if neighbour in self.all_nodes:\n neighbours.append(neighbour)\n return neighbours", "def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]", "def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n", "def __get_neighbors(self, goal):\n neighbors = set()\n start = self.__get_position(0, self.puzzle)\n # start_x = start[0]\n # start_y = start[1]\n # Get the below neighbor.\n if(start[0] - 1 >= 0):\n temp = self.__swap(start[0], start[1], start[0] - 1, start[1])\n neighbors.add(State(temp, self.g + 1, 'D', goal))\n # Get the above neighbor\n if(start[0] + 1 <= len(self.puzzle) -1):\n temp = self.__swap(start[0], start[1], start[0] + 1, start[1])\n neighbors.add(State(temp, self.g + 1, 'U', goal))\n # Get the right neighbor\n if(start[1] - 1 >= 0):\n temp = self.__swap(start[0], start[1], start[0], start[1] - 1)\n neighbors.add(State(temp, self.g + 1, 'R', goal))\n # Get the left neighbor\n if(start[1] + 1 <= len(self.puzzle[0]) -1):\n temp = self.__swap(start[0], start[1], start[0], start[1] + 1)\n neighbors.add(State(temp, self.g + 1, 'L', goal))\n\n return neighbors", "def get_neighbours(self):\n return []", "def get_neighbors(self, node):\n neighbors = []\n\n for state in node.neighboring_states():\n neighbor = self.get_node(state)\n\n if neighbor:\n neighbors.append(neighbor)\n else:\n neighbor = self.add_node(state)\n\n neighbors.append(neighbor)\n \n return neighbors", "def neighbours(self):\n return [x.node for x in self.edges]", "def get_other_neighbors(self, node):\n neighbors = self.get_neighbors()\n return list(set(neighbors) - set([node]))", "def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours", "def neighbor_nodes(self,node):\n\n neighbors = []\n if node > self.cols:\n neighbors.append(node-self.cols)\n if node <= self.cols*(self.rows-1):\n neighbors.append(node+self.cols)\n if node % self.cols != 1:\n neighbors.append(node-1)\n if node % self.cols != 0:\n neighbors.append(node+1)\n\n return neighbors", "def get_neighbours_8(x, y):\n return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), \\\n (x - 1, y), (x + 1, y), \\\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]", "def get_neighbours(x, y, board):\n return [get_left(x, y, board), get_upper(x, y, board), get_right(x, y, board), get_lower(x, y, board)]", "def trace_neighbours(self, x, y):\r\n return list(filter(lambda n: n != None, (self.see_neighbour(x, y, i, j) for i in [-1, 0, 1] for j in [-1, 0, 1])))", "def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]", "def neighbor_nodes(self, node):\n row = node[0]\n col = node[1]\n if row == -1 and col == -1:\n # The nodes that can be accessed from the start node\n # (i.e. all the nodes in the first column)\n for r in range(self.num_rows):\n yield (r, 0)\n else:\n if row < (self.num_rows - 1):\n # We can still go down\n yield (row + 1, col)\n if row > 0:\n # We can still go up\n yield (row - 1, col)\n if col < (self.num_cols - 1):\n # We can still go to the right\n yield (row, col + 1)", "def neighbours(self):\n seen = set()\n return [l.other(self) for l in self.dovetails \\\n if id(l) not in seen and not seen.add(id(l))]", "def get_neighbours(self):\n return self.neighbours", "def __getNeighbours(self, x: int, y: int) -> List:\n\t\tneighbours = []\n\t\tneighbours.append((x, y + 1))\n\t\tneighbours.append((x, y - 1))\n\t\tneighbours.append((x + 1, y))\n\t\tneighbours.append((x - 1, y))\n\t\tneighbours.append((x + 1, y + 1))\n\t\tneighbours.append((x - 1, y + 1))\n\t\tneighbours.append((x - 1, y - 1))\n\t\tneighbours.append((x + 1, y - 1))\n\n\t\tvalid_neighbours = [x for x in neighbours if x[0] > 0 and x[0] <= 5 and x[1] > 0 and x[1] <= 5]\n\n\t\treturn valid_neighbours", "def neighbors(self, node):\r\n return list(self.graph.neighbors(node))", "def get_neighbours(self):\n return self._neighbours", "def get_neighbors(grid, x, y):\n out = []\n if x > 0:\n out.append(grid[x-1, y])\n if y > 0:\n out.append(grid[x, y-1])\n if y < grid.shape[1] - 1:\n out.append(grid[x, y+1])\n if x < grid.shape[0] - 1:\n out.append(grid[x+1, y])\n return out", "def iter_neighbors(x: int, y: int) -> t.Generator[COORDINATE, None, None]:\n yield x - 1, y\n yield x + 1, y\n yield x, y - 1\n yield x, y + 1", "def get_neighbouring_nodes(node) :\r\n\r\n connected_nodes = [] #A list of the connected nodes\r\n\r\n #Checking if the node belongs to the 1st row\r\n if(node.coords[0] != 0) :\r\n connected_node = Node((node.coords[0] - 1, node.coords[1]), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the last row\r\n if(node.coords[0] != grid_dims[0] - 1) :\r\n connected_node = Node((node.coords[0] + 1, node.coords[1]), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != 0) :\r\n connected_node = Node((node.coords[0], node.coords[1] - 1), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != grid_dims[1] - 1) :\r\n connected_node = Node((node.coords[0], node.coords[1] + 1), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n return connected_nodes", "def neighbors(\n self, state: Grid2D.State\n ) -> Iterable[Tuple[Grid2D.Action, Grid2D.State]]:\n # pylint: disable=invalid-name\n for a, cell in self.adjacent_coordinates(cell=state.agent_position):\n if not self.is_wall(cell):\n yield (a, Grid2D.State(cell))", "def neighbours_L(self):\n seen = set()\n return [l.other(self) for l in self.dovetails_L \\\n if id(l) not in seen and not seen.add(id(l))]", "def get_neighbours(self, row, col):\n neighbour_location_diffs = [(-1, -1),\n ( 0, -1),\n ( 1, -1),\n ( 1, 0),\n ( 1, 1),\n ( 0, 1),\n (-1, 1),\n (-1, 0)]\n neighbours = []\n for diff in neighbour_location_diffs:\n if (row + diff[0] >= 0 and\n row + diff[0] < self.height and\n col + diff[1] >= 0 and\n col + diff[1] < self.width):\n neighbours.append(self.cells[row + diff[0]][col + diff[1]])\n return neighbours", "def get_neighbors(self, x, y):\n\n if not self.has_vertex(x, y): return []\n neighbors = [(x + 1, y), (x, y - 1), (x - 1, y), (x, y + 1)]\n return [(x, y) for (x, y) in neighbors if self.has_vertex(x, y)]", "def neighbours((u,v)):\r\n return ((u,v+1), (u+1,v), (u,v-1), (u-1,v))", "def _set_node_neighbours(self, node):\n all_neighbours = [self.BOARD[node.y + y][node.x + x] for x in reversed(range(-1, 2)) for y in\n reversed(range(-1, 2))\n if 0 <= node.x + x < self.len_x and 0 <= node.y + y < self.len_y]\n non_traversable_neighbours = []\n for neighbour in all_neighbours:\n if not neighbour.traversable:\n non_traversable_neighbours.append(neighbour)\n elif neighbour.x != node.x and neighbour.y != node.y:\n x_diff = neighbour.x - node.x\n y_diff = neighbour.y - node.y\n if not self.BOARD[node.y + y_diff][node.x].traversable and \\\n not self.BOARD[node.y][node.x + x_diff].traversable:\n non_traversable_neighbours.append(neighbour)\n node.neighbours = [neighbour for neighbour in all_neighbours if neighbour not in non_traversable_neighbours]" ]
[ "0.7377711", "0.72533816", "0.717392", "0.7123747", "0.7041916", "0.70339006", "0.6999877", "0.69711643", "0.69493777", "0.69083476", "0.69074553", "0.6895527", "0.68819803", "0.6857781", "0.6856988", "0.684746", "0.6839912", "0.6798075", "0.6754088", "0.67450863", "0.67410076", "0.6735187", "0.6696648", "0.6692018", "0.666742", "0.66599673", "0.6650316", "0.66501236", "0.6620846", "0.6609022" ]
0.7481798
0
Translate with a given direction scaled by dist
def translate(self, axis: Vector, dist: float): self.origin = self.origin + axis * dist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _define_transdist(self, loc: torch.Tensor, scale: torch.Tensor):\n\n return _define_transdist(loc, scale, self.increment_dist, self.ndim)", "def ty(self, dist: float) -> \"Mate\":\n self.translate(self.y_dir, dist)\n return self", "def zoom(self, dr):\n d = self.getDistance()\n vn = self.getViewNormal()\n vn *= dr*d\n GL.glTranslatef(vn[0], vn[1], vn[2])", "def translate(self, tr):\n c = self.c -self.a*tr[0] -self.b*tr[1]\n self.c =c\n self.pointN = self.pointN+tr\n self.point1 = self.point1+tr\n self.points +=tr", "def translate(surf, center=(0.0, 0.0, 0.0), direction=(1.0, 0.0, 0.0)):\n normx = np.array(direction) / np.linalg.norm(direction)\n # assume temporary normy to calculate normz\n norm_y_temp = [0.0, 1.0, 0.0]\n normz = np.cross(normx, norm_y_temp)\n if np.array_equal(normz, (0.0, 0.0, 0.0)):\n # the assumed normy axis is parallel to normx, so shift its\n # axis and recalculate normz\n norm_y_temp = np.roll(norm_y_temp, 1)\n normz = np.cross(normx, norm_y_temp)\n normz /= np.linalg.norm(normz)\n normy = np.cross(normz, normx)\n\n trans = np.zeros((4, 4))\n trans[:3, 0] = normx\n trans[:3, 1] = normy\n trans[:3, 2] = normz\n trans[3, 3] = 1\n\n surf.transform(trans)\n if not np.allclose(center, [0.0, 0.0, 0.0]):\n surf.points += np.array(center)", "def translate(self, displacement):\n\n self.center = (self.center[0] + displacement[0],\n self.center[1] + displacement[1])", "def tx(self, dist: float) -> \"Mate\":\n self.translate(self.x_dir, dist)\n return self", "def TransformDistance(*args, **kwargs):\n return _gdi_.GraphicsMatrix_TransformDistance(*args, **kwargs)", "def translate(self, dx, dy):\n self.origin = (self.origin[0] + dx, self.origin[1] + dy)\n return self", "def translate(self, dx, dy):\n self.origin = (self.origin[0] + dx, self.origin[1] + dy)\n return self", "def zoom(self, dr):\n d = self.getDistance()\n vn = self.getViewNormal()\n vn *= dr*d\n GL.glTranslate(vn[0], vn[1], vn[2])", "def move_forward(self, dist):\r\n self.send_command_without_response(f'forward {dist}')", "def translate_point(pt, length, direction):\n if isinstance(direction,float):\n # direction is a float (in radians)\n return (pt[0]+length*np.cos(direction), pt[1]+length*np.sin(direction))\n elif str(direction)==\"NORTH\":\n return (pt[0], pt[1]+length)\n elif str(direction)==\"SOUTH\":\n return (pt[0], pt[1]-length)\n elif str(direction)==\"WEST\":\n return (pt[0]-length, pt[1])\n elif str(direction)==\"EAST\":\n return (pt[0]+length, pt[1])", "def apply_direction_scale( vectors, direction, scale ):\n \"\"\"\n scaling is defined as:\n \n [p'][1 + (k - 1)n.x^2, (k - 1)n.x n.y^2, (k - 1)n.x n.z ]\n S(n,k) = [q'][(k - 1)n.x n.y, 1 + (k - 1)n.y, (k - 1)n.y n.z ]\n [r'][(k - 1)n.x n.z, (k - 1)n.y n.z, 1 + (k - 1)n.z^2 ]\n \n where:\n v' is the resulting vector after scaling\n v is the vector to scale\n n is the direction of the scaling\n n.x is the x component of n\n n.y is the y component of n\n n.z is the z component of n\n k is the scaling factor\n \"\"\"\n scaleMinus1 = scale - 1\n matrix = numpy.array(\n [\n # m1\n [\n # m11 = 1 + (k - 1)n.x^2\n 1 + scaleMinus1 * (direction[ 0 ]**2),\n # m12 = (k - 1)n.x n.y^2\n scaleMinus1 * direction[ 0 ] * direction[ 1 ]**2,\n # m13 = (k - 1)n.x n.z\n scaleMinus1 * direction[ 0 ] * direction[ 2 ]\n ],\n # m2\n [\n # m21 = (k - 1)n.x n.y\n scaleMinus1 * direction[ 0 ] * direction[ 1 ],\n # m22 = 1 + (k - 1)n.y\n 1 + scaleMinus1 * direction[ 1 ],\n # m23 = (k - 1)n.y n.z\n scaleMinus1 * direction[ 1 ] * direction[ 2 ]\n ],\n # m3\n [\n # m31 = (k - 1)n.x n.z\n scaleMinus1 * direction[ 0 ] * direction[ 2 ],\n # m32 = (k - 1)n.y n.z\n scaleMinus1 * direction[ 1 ] * direction[ 2 ],\n # m33 = 1 + (k - 1)n.z^2\n 1 + scaleMinus1 * direction[ 2 ]**2\n ]\n ],\n dtype = numpy.float\n )\n \n return numpy.dot( vectors, matrix )", "def translate ( self, dx, dy, dz):\n self.x = self.x + dx\n self.y = self.y + dy\n self.z = self.z + dz\n self.xyz = np.array((self.x, self.y, self.z))", "def translate(self, tr):\n self.points = self.points + tr", "def translate(self, displacement):\n self._center = self._center + np.array(displacement)\n self._position = self._position + np.array(displacement)", "def forward(self, dist):\n start = (self.pos_x, self.pos_y)\n self.pos_x += dist * math.cos(math.radians(self.angle))\n self.pos_y += dist * math.sin(math.radians(self.angle))\n self._update_limits()\n end = (self.pos_x, self.pos_y)\n if self.pen_down:\n self.draw.line([start, end], fill=self.colour, width=self.width)", "def move(self, axis, dist):\n t = self.moveTime\n N = self.moveSamples\n # read initial position for all channels\n texts = [getattr(self, ax + \"Label\").text()\n for ax in self.activeChannels]\n initPos = [re.findall(r\"[-+]?\\d*\\.\\d+|[-+]?\\d+\", t)[0] for t in texts]\n initPos = np.array(initPos, dtype=float)[:, np.newaxis]\n fullPos = np.repeat(initPos, self.nSamples, axis=1)\n\n # make position ramp for moving axis\n ramp = makeRamp(0, dist, self.nSamples)\n fullPos[self.activeChannels.index(axis)] += ramp\n\n# factors = np.array([convFactors['x'], convFactors['y'],\n# convFactors['z']])[:, np.newaxis]\n# fullSignal = fullPos/factors\n toc = ptime.time()\n for i in range(self.nSamples):\n# self.aotask.write(fullSignal, auto_start=True)\n# time.sleep(t / N)\n borrar = 1+1\n\n print(\"se mueve en\", np.round(ptime.time() - toc, 3), \"segs\")\n\n # update position text\n newPos = fullPos[self.activeChannels.index(axis)][-1]\n# newText = \"<strong>\" + axis + \" = {0:.2f} µm</strong>\".format(newPos)\n newText = \"{}\".format(newPos)\n getattr(self, axis + \"Label\").setText(newText)\n self.paramChanged()", "def translate(self, dx, dy):\n self.position = numpy.array((dx + self.position[0],\n dy + self.position[1]))\n\n return self", "def Translate(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_Translate(self, *args)", "def move_right(self, dist):\r\n self.send_command_without_response(f'right {dist}')", "def move(distribution, delta):\r\n\r\n # --->>> Copy your previous code here.\r\n new_center = distribution.offset + delta\r\n new_values = distribution.values\r\n new_distribution = Distribution(new_center,new_values)\r\n \r\n return new_distribution # Replace this by your own result.\r", "def setDirectionTowardPoint(self, x, y, speed):\n currX = self.xcor()\n currY = self.ycor()\n # get actual vector from t to x,y\n dXactual = x - currX\n dYactual = y - currY\n\n # get the length of that vector. Can also use turtle.distance\n length = math.hypot(dXactual, dYactual)\n\n # now scale the vector\n try:\n self.dx = dXactual / length * speed\n self.dy = dYactual / length * speed\n except:\n self.dx = 0\n self.dy = 0", "def translate_direction(self):\n xpart = math.sin(self.direction)\n ypart = math.cos(self.direction)\n if ypart > 0:\n print(\"oben \", end='')\n else:\n print(\"unten \", end='')\n if xpart > 0:\n print(\"rechts\")\n else:\n print(\"links\")", "def tf_dist2deg(dist):\n x_rad = tf_dist2rad(dist)\n return tf_rad2deg(x_rad)", "def pan(self, dx, dy):\n d = self.getDistance()\n vr = self.getViewRight()\n vr *= dx*d\n GL.glTranslatef(vr[0], vr[1], vr[2])\n vu = self.getViewUp()\n vu *= dy*d\n GL.glTranslatef(vu[0], vu[1], vu[2])", "def translate_center(self, dx, dy, dz):\n center = self.center\n center[0] -= dx\n center[1] -= dy\n center[2] -= dz\n center[0] = min(max(center[0], self.bounds[0]), self.bounds[1])\n center[1] = min(max(center[1], self.bounds[0]), self.bounds[1])\n center[2] = min(max(center[2], self.bounds[0]), self.bounds[1])\n self.program[\"center\"] = self.center = center", "def movement(scale, direction):\n try:\n if direction == left:\n args[0].umvr(-scale, log=False, newline=False)\n elif direction == right:\n args[0].umvr(scale, log=False, newline=False)\n elif direction == up:\n args[1].umvr(scale, log=False, newline=False)\n elif direction == down:\n args[1].umvr(-scale, log=False, newline=False)\n except Exception as exc:\n logger.error('Error in tweak move: %s', exc)\n logger.debug('', exc_info=True)", "def move_point(p, direction, d=1):\n direction_guard(direction)\n x, y = p\n dx, dy = directions[direction]\n return (x + dx * d, y + dy * d)" ]
[ "0.64987594", "0.63573056", "0.6215362", "0.619352", "0.61720383", "0.6116733", "0.6090099", "0.6078002", "0.60694516", "0.60694516", "0.60559475", "0.60549414", "0.601227", "0.6008165", "0.5907597", "0.58837587", "0.5868495", "0.5858269", "0.5742285", "0.57237685", "0.57149464", "0.5713418", "0.5711941", "0.56370926", "0.56313366", "0.56279093", "0.56229645", "0.5569838", "0.55299443", "0.552977" ]
0.7711936
0
Return a new mate moved by the given Location
def moved(self, loc: Location) -> "Mate": def move(origin: Vector, vec: Vector, loc: Location) -> Tuple[Vector, Vector]: reloc = cast(Edge, Edge.makeLine(origin, origin + vec).moved(loc)) v1, v2 = reloc.startPoint(), reloc.endPoint() return v1, v2 - v1 origin, x_dir = move(self.origin, self.x_dir, loc) _, z_dir = move(self.origin, self.z_dir, loc) return Mate(origin, x_dir, z_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self, deltaX, deltaY):\n\t\treturn Location(self.x + deltaX, self.y + deltaY)", "def move(self, deltaX, deltaY):\n return Location(self.x + deltaX, self.y + deltaY)", "def navigateNew(self, location):\r\n self.history = self.history[:self.currLoc+1]\r\n self.currLoc += 1\r\n self.history.append(location)\r\n return self.history[self.currLoc]", "def getNewLocation(self, currentLocation, directionalMovement):\n x = currentLocation[0] + directionalMovement[0]\n y = currentLocation[1] + directionalMovement[1]\n return (x, y)", "def _move_molecule_to(self, location):\n assert len(location) == 3, \"location must have len 3\"\n displacement = np.array(location, dtype=float) - self._get_molecule_center_of_mass()\n for atom_ind in range(len(self._crd)):\n self._crd[atom_ind] += displacement\n return None", "def move_toward(state, location):\n return move_relative(state, location, True)", "def move_away(state, location):\n\n return move_relative(state, location, False)", "def move(self, location):\n disp_x = location[0] - self._x_coord\n disp_y = location[1] - self._y_coord\n board = self._board\n\n # Instantiate dictionary of displaced locations to value they will take\n mov_map = dict()\n for position in self._area:\n mov_map[(position[0] + disp_x, position[1] + disp_y)] = board[position[0]][position[1]]\n\n # Clear previous locations\n for position in self._area:\n board[position[0]][position[1]] = \" \"\n\n # Place stones to displaced location\n for position in self._area:\n board[position[0] + disp_x][position[1] + disp_y] = \\\n mov_map[(position[0] + disp_x, position[1] + disp_y)]\n\n # Return the new stone locations for processing\n return set(mov_map.keys())", "def move(self, new_location):\n self.current_location = new_location", "def get_location(self, currentlocation, action):\n\t\t# Retrieve movement tuple from dictionary\n\t\tmovement = self.actions_dict[action]\n\t\t# Get new location using modulo of gridsize\n\t\tnewlocation = ((currentlocation[0]+movement[0]) % self.gridsize[0], (currentlocation[1]+movement[1]) % self.gridsize[1])\n\n\t\treturn newlocation", "def move(self, delta_x, delta_y):\n return Location(self._x + delta_x, self._y + delta_y)", "def move(self, point: Point) -> \"Location\":\n\n return Location(point=self.point + point, labware=self._given_labware)", "def __move(self):\n if self.goal is None:\n if self.tasks:\n self.goal = self.tasks.pop()\n self.goal_history.append(self.goal)\n self.logger.log(\n f\"Crewmate {self.agent_id} set as goal: {self.goal.name} in\" +\n f\" {self.game_map.room_names[self.goal.room_id]}\",\n Logger.LOG | Logger.PRINT_VISUAL)\n else:\n self.room = self.game_map.move_random(self)\n self.location_history.append(self.room)\n return\n\n if self.room is not self.goal.room_id:\n self.room = self.game_map.next_toward(self, self.goal.room_id)\n\n # Log the current room we are in: Either the room we moved to, or the room that happens to be the goal room\n self.location_history.append(self.room)", "def moveTo(self, location):\n self.currentLocation = location", "def move(self, cardinal, maze):\n\n adjacent_room = getattr(self.location, cardinal)\n\n if not adjacent_room:\n out = f\"You cannot go {cardinal} from here.\"\n else:\n adjacent_room.data[\"players\"][self.name] = \\\n self.location.data[\"players\"].pop(self.name)\n\n adjacent_room.players[self.name] = \\\n self.location.players.pop(self.name)\n\n maze[self.location.x][self.location.y] = MazeFactory.room_color\n self.location = adjacent_room\n maze[self.location.x][self.location.y] = MazeFactory.player_color\n\n out = \"You have entered \" + self.location.description\n MazeFactory.update(maze)\n return out", "def get_location_metres(original_location, dNorth, dEast): \n [r_center, r_level] = eclipse_compensate(original_location)\n \n # coordinate offsets in radians\n dLat = dNorth / r_center\n dLon = dEast / r_level\n \n # new position in decimal degrees\n newlat = original_location.lat + math.degrees(dLat)\n newlon = original_location.lon + math.degrees(dLon)\n \n # return according to the input coordinate Class\n if isinstance(original_location,LocationGlobal):\n targetlocation = LocationGlobal(newlat, newlon,original_location.alt)\n \n elif isinstance(original_location,LocationGlobalRelative):\n targetlocation = LocationGlobalRelative(newlat, newlon,original_location.alt)\n \n else:\n raise Exception(\"Invalid Location object passed\")\n \n return targetlocation", "def move1(self):\n\n options = self.location.exits.keys()\n self.location.objects.remove(a)\n print('fred is moving..')\n self.location = self.location.exits[random.choice(list(options))]\n self.location.objects.append(a)", "def update_location(self):\n if self.simulation:\n return (self.y, self.x)\n else:\n raise NotImplementedError\n\n self.y = new_y\n self.x = new_x\n\n return (new_y, new_x)", "def _calculate_move_location(self, direction):\n current_row = self._current_loc.get_row()\n current_column = self._current_loc.get_column()\n\n # Calculate the new location for a left move\n if (direction == \"l\"):\n return Location(current_row, current_column - 1)\n # Calculate the new location for an up move\n elif (direction == \"u\"):\n return Location(current_row - 1, current_column)\n # Calculate the new location for a right move\n elif (direction == \"r\"):\n return Location(current_row, current_column + 1)\n # Calculate the new location for a down move\n elif (direction == \"d\"):\n return Location(current_row + 1, current_column)\n return Location()", "def make_random_move(self):\n #raise NotImplementedError\n # Take out moves_made as well as mines detected\n self.available_cells = self.available_cells - self.moves_made - self.mines\n available_cells = self.available_cells.copy()\n\n # I'll first try and see if there's any move not within the nearby of\n # The mines, I think this can maximise survivability in some cases\n # It'll still work even if didn't do the below\n for sentence in self.knowledge:\n available_cells -= sentence.cells\n #print(sentence)\n #print(self.mines)\n\n # Making a random move\n length = len(available_cells)\n if length != 0:\n index = random.randint(0, length - 1)\n move = list(available_cells)[index]\n self.moves_made.add(move)\n self.mark_safe(move)\n return move\n\n length = len(self.available_cells)\n if length != 0:\n index = random.randint(0, length - 1)\n move = list(self.available_cells)[index]\n self.moves_made.add(move)\n self.mark_safe(move)\n return move\n return None", "def getMovement(self):\n # store the robot's current location and set the directional movement to 0,0 so that the robot won't move by default\n currentLocation = (self.me['x'], self.me['y'])\n directionalMovement = (0,0)\n\n # ensure that target location is not none and not equal to the current location\n if self.targetLocation and not currentLocation == self.targetLocation:\n\n # store the direction, directional movement, and the new map location we will trying to move the robot to this round\n direction = self.getDirection(currentLocation, self.targetLocation)\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # store the current direction for use later\n initialDirection = direction\n\n # by default, the robot is ready to move in the event that the new map location is already passable\n readyToMove = True\n\n # while the new map location is not passable\n while not self.isPassable(newLocation):\n # if unit is a crusader moving diagonally at their fastest pace, set their directional movement to (1,1)\n if self.isCrusader and directionalMovement[0] == 2 and directionalMovement[1] == 2:\n directionalMovement[0] = 1\n directionalMovement[1] = 1\n # or if the unit is traveling faster than 1 block East\n elif directionalMovement[0] > 1:\n # lower the unit's movement East by 1 block\n directionalMovement[0] -= 1\n # or if the unit is traveling faster than 1 block West\n elif directionalMovement[0] < -1:\n # lower the unit's movement West by 1 block\n directionalMovement[0] += 1\n # or if the unit is traveling faster than 1 block South\n elif directionalMovement[1] > 1:\n # lower the unit's movement South by 1 block\n directionalMovement[1] -= 1\n # or if the unit is traveling faster than 1 block North\n elif directionalMovement[1] < -1:\n # lower the unit's movement North by 1 block\n directionalMovement[1] += 1\n # else the unit is already moving the shortest distance they can in the current direction\n else:\n # rotate the robots direction clockwise and proceed\n direction = self.getRotatedDirection(direction, 1)\n\n # if we ened up facing the same direction we started in\n if direction == initialDirection:\n # let the code know we're not ready to move\n readyToMove = False\n # break out of the while loop\n break\n\n # overwrite the directional movement with a new one based on the direction we just got\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n\n # overwrite the new location with the location we get from the directional movement we just got\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # if the robot ended up not being ready to move\n if not readyToMove:\n # change the directional movement back to (0,0) so that it doesn't move\n directionalMovement = (0,0)\n else :\n self.targetLocation = self.getRandomPassableLocation()\n # return the directional movement\n return directionalMovement", "def move(self):\n \n # checks for bots nearby\n next_move = self.follow()\n \n # finds a random move if no bot\n if next_move is self.position:\n self.position = self.wander()\n else:\n self.position = next_move", "def get_location_metres(original_location, dNorth, dEast):\n earth_radius = 6378137.0 #Radius of \"spherical\" earth\n #Coordinate offsets in radians\n dLat = dNorth/earth_radius\n dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))\n\n #New position in decimal degrees\n newlat = original_location.lat + (dLat * 180/math.pi)\n newlon = original_location.lon + (dLon * 180/math.pi)\n if type(original_location) is LocationGlobal:\n targetlocation=LocationGlobal(newlat, newlon,original_location.alt)\n elif type(original_location) is LocationGlobalRelative:\n targetlocation=LocationGlobalRelative(newlat, newlon,original_location.alt)\n else:\n raise Exception(\"Invalid Location object passed\")\n \n return targetlocation;", "def get_location_metres(original_location, dNorth, dEast):\n earth_radius = 6378137.0 # Radius of \"spherical\" earth\n # Coordinate offsets in radians\n dLat = dNorth / earth_radius\n dLon = dEast / (earth_radius * math.cos(math.pi * original_location.lat / 180))\n\n # New position in decimal degrees\n newlat = original_location.lat + (dLat * 180 / math.pi)\n newlon = original_location.lon + (dLon * 180 / math.pi)\n if type(original_location) is LocationGlobal:\n targetlocation = LocationGlobal(newlat, newlon, original_location.alt)\n elif type(original_location) is LocationGlobalRelative:\n targetlocation = LocationGlobalRelative(newlat, newlon, original_location.alt)\n else:\n raise Exception(\"Invalid Location object passed\")\n\n return targetlocation;", "def move_to_location(cardinal_point):\r\n\r\n\ttry:\r\n\r\n\t\told_room = config[\"GAMEDATA\"][\"CURRENTZONE\"]\r\n\t\tnew_room = world.WORLD_ROOMS[old_room][cardinal_point]\r\n\t\t\r\n\t\tif new_room == None:\r\n\t\t\ttprint(\"You cannot go there.\")\r\n\t\t\treturn\r\n\r\n\t\t\r\n\t\tnew_room_name = getstring(world.WORLD_ROOMS[new_room][\"NAME\"])\r\n\t\t\r\n\t\tdebug(\"new_room = \" + str(new_room))\r\n\t\tdebug(\"new_room_name = \" + str(new_room_name))\r\n\r\n\t\tif world.WORLD_ROOMS[new_room][\"NEEDITEM\"] != None: # If an item is required to go there...\r\n\t\t\tcurrent_inventory = config[\"GAMEDATA\"][\"INVENTORY\"]\r\n\t\t\tneeded_item_id = world.WORLD_ITEMS[world.WORLD_ROOMS[new_room][\"NEEDITEM\"]][\"ID\"]\r\n\t\t\tneeded_item_name = world.WORLD_ITEMS[world.WORLD_ROOMS[new_room][\"NEEDITEM\"]][\"NAME\"]\r\n\t\t\t\r\n\t\t\tif current_inventory == None:\r\n\t\t\t\ttprint(\"You do not have the required item in your inventory,\")\r\n\t\t\t\ttprint(\"You need to have '\" + needed_item_name + \"'\")\r\n\t\t\t\treturn\r\n\t\t\t\t\r\n\t\t\telse: # Inventory isn't blank\r\n\t\t\t\tfor item_id in current_inventory:\r\n\t\t\t\t\tif item_id == needed_item_id: # If the player have the needed item in his inventory...\r\n\t\t\t\t\t\ttprint(\"You entered by using \" + needed_item_name)\r\n\t\t\t\t\t\ttprint(\"you are now at : \" + new_room_name)\r\n\t\t\t\t\t\tconfig[\"GAMEDATA\"][\"CURRENTZONE\"] = new_room\r\n\t\t\t\t\t\treturn # Exits the function\r\n\t\t\t\t\t\r\n\t\t\t\t# If we arrive here, this means that the player doesn't have the needed item.\r\n\t\t\t\ttprint(\"You do not have the required item in your inventory,\")\r\n\t\t\t\ttprint(\"You need to have '\" + needed_item_name + \"'\")\r\n\t\t\t\treturn\r\n\t\t\t\r\n\t\telse: # The room doesn't requires an item...\r\n\t\t\tconfig[\"GAMEDATA\"][\"CURRENTZONE\"] = new_room\r\n\t\t\ttprint(\"You are now at : \" + new_room_name)\r\n\t\t\treturn\r\n\t\r\n\texcept Exception as error: # If we arrive here, this means that there is a bug in there, oops.\r\n\t\tprint(\"ERROR! in function move_to_location() try block raised an exception !\")\r\n\t\tprint(str(error))\r\n\t\ttraceback.print_exc()\r\n\t\treturn", "def set_new_location(self, xPos, yPos):", "def move(babka, current_location, destination):\r\n global npc\r\n if destination == -1:\r\n print('Текущая локация:', helper.locations[current_location][0])\r\n else:\r\n print('Бабка', babka.name, 'перемещается...')\r\n print('Текущая локация:', helper.locations[destination][0])\r\n npc = func.loc_gen(babka, destination)\r\n return destination", "def tryout_new_location(self):\n try_location = [0, 0]\n \n # try locations until a not-occupied location is found and not all folds are checked\n while try_location in self.occupied:\n\n # folds north everytime\n current_type = 2\n \n # check if location is possible \n try_location = self.assign_location(current_type)\n\n # if location is not possible, try next fold\n if try_location in self.occupied:\n continue\n # if location is possible, use location\n else:\n self.next_location = try_location\n return", "def neighbor(self, move):\n move_coord = Maze.dirs_to_moves[move]\n return Maze(self.grid, (self.location[0]+move_coord[0], self.location[1]+move_coord[1]))", "def change_to_spawnbox_coords(loc: np.ndarray) -> np.ndarray:\r\n spawnbox: bpy.types.Object = bpy.data.objects[cng.SPAWNBOX_OBJ]\r\n new_origo = np.array(spawnbox.location) # spawnbox location is center point\r\n new_loc = loc - np.array(new_origo)\r\n return new_loc / np.array(spawnbox.dimensions) * 2" ]
[ "0.6578063", "0.6445667", "0.64384043", "0.63514256", "0.6349512", "0.6325681", "0.62559164", "0.62311095", "0.61734366", "0.6162421", "0.6156318", "0.59647125", "0.5897715", "0.58834577", "0.58276635", "0.5812422", "0.57135344", "0.5685478", "0.56570804", "0.5655626", "0.5641067", "0.56272596", "0.5610346", "0.559594", "0.5513689", "0.5492951", "0.5476827", "0.54742134", "0.54498655", "0.5446333" ]
0.77116907
0
Add document to the termdocument matrix.
def add_doc(self, document): # Split document up into list of strings #words = self.tokenize(document) words = document # Count word frequencies in this document word_counts = {} for word in words: word_counts[word] = word_counts.get(word, 0) + 1 # Add word counts as new row to sparse matrix self.sparse.append(word_counts) # Add to total document count for each word for word in word_counts: self.doc_count[word] = self.doc_count.get(word, 0) + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_document(self, document: Document) -> None:\n self._index[document.url] = document", "def add(self, document):\n #words=[word.lower() for word in words if word.isalpha()] #added on 0415\n for token in [t.lower() for t in nltk.word_tokenize(document)]:\n if not token.isalpha():\n continue\n\n if token in self.stopwords:\n continue\n \n if self.stemmer:\n token = self.stemmer.stem(token)\n \n if self.__unique_id not in self.index[token]:\n self.index[token].append(self.__unique_id)\n \n self.documents[self.__unique_id] = document\n self.__unique_id += 1", "def append(self, doc):\n pass", "def append(self, document):\n raise NotImplemented(\"Corpus does not allow appending\")", "def add_document(self, doc_info):\n docname = doc_info[u'name']\n docid = doc_info[u'url']\n terms = doc_info[u'terms']\n text = doc_info[u'text']\n reader_name = doc_info[u'reader']\n doc = self.get_document(docid)\n if doc is not None:\n if doc.text == text and doc.reader == reader_name:\n # nothing has changed, so return\n return\n self._clear_document(docid)\n \n term_counts = defaultdict(int)\n for term in terms:\n if isinstance(term, tuple):\n # this is a (term, value) tuple\n term, value = term\n else:\n value = 1\n term_counts[term] += value\n term_items = term_counts.items()\n total = 0\n for term, value in term_counts.items():\n self._increment_term_count(term, abs(value), True)\n total += abs(value)\n self._increment_term_count(ANY, total, True)\n\n for key, value in doc_info.get('tags', []):\n self.set_tag_on_document(docid, key, value)\n \n doc = Document(docid, docname, reader_name, text, term_items)\n self.sql_session.add(doc)\n #self.commit()", "def store(self, doc):\n if doc is None:\n return\n assert isinstance(doc, Document)\n idx = doc.features.get(self.idxfeatname())\n if idx is None:\n raise Exception(\"Cannot append document, no __idx_ID feature\")\n self.__setitem__(idx, doc)", "def add(self, document):\n return self.db.update({document['id']: document})", "def build_term_doc_matrix(self):\n\n print(\"Inside build_term_doc_matrix >>> \")\n self.term_doc_matrix = np.zeros([self.number_of_documents,self.vocabulary_size])\n for kVal in range(0, self.number_of_documents):\n for lVal,wordVocab in enumerate(self.vocabulary):\n wrd_doc = 0\n for oVal in range(0, len(self.documents[kVal])):\n if (wordVocab == self.documents[kVal][oVal]):\n wrd_doc = wrd_doc +1\n self.term_doc_matrix[kVal][lVal] = wrd_doc\n #print(\"term_doc_matrix >>> \" + self.term_doc_matrix)", "def add(self, documents):\n\n if self.cluster:\n self.cluster.add(documents)\n else:\n super().add(documents)\n\n return documents", "def add_document(self, index: str, doc_id: str, document: Dict[str, Any]):\n self.__client__.index(index=index, body=document, id=doc_id, refresh=\"wait_for\")", "def add(cls, doc):\n cls.get_collection().add(doc)", "def append(self, doc):\n if doc is None:\n return\n assert isinstance(doc, Document)\n self.fh.write(doc.save_mem(fmt=\"json\"))\n self.fh.write(\"\\n\")\n self.n += 1", "def index_document(self, document):\n # Recursively collect records\n records = []\n if document.get_type() is document.TYPE_DIR:\n dirname = document.get_filename()\n subdirs, files = document.get_contents()\n for subdir in subdirs:\n document.set_filename(os.path.join(dirname, subdir))\n self.index_document(document)\n for filename in files:\n document.set_filename(os.path.join(dirname, filename))\n record = self.create_record(document)\n if record is not None:\n records.append(record)\n\n if len(records) == 0:\n return\n\n # Store records\n writer = self.get_index().writer()\n for record in records:\n writer.add_document(**record)\n writer.commit()", "def CreateNewFile(self):\n\t\tself.acad.Documents.Add()", "def add_documents_to_gensim_dictionary(gensim_dictionary_model, text):\n gensim_dictionary_model.add_documents(text)", "def add_doc(self, name, boring):\n\n self.documents[name] = boring", "def add_new_doc(self, document, end_of_corpus):\n max_tf = 0\n unique_terms_counter = 0\n document_dictionary = document.term_doc_dictionary\n # Go over each term in the doc\n for term in document_dictionary:\n try:\n # Update inverted index and posting\n if term not in self.inverted_idx:\n self.inverted_idx[term] = 1\n unique_terms_counter += 1\n else:\n self.inverted_idx[term] += 1\n if term not in self.posting_dict:\n self.posting_dict[term] = []\n\n self.posting_dict[term].append(\n (document.tweet_id, document_dictionary[term])) # key: str , value: array of tuples\n\n max_tf = max(document_dictionary[term], max_tf)\n\n except:\n\n print('problem with the following key {}'.format(term[0]))\n\n document.max_tf = max_tf\n document.unique_terms = unique_terms_counter\n self.docs_count += 1\n\n modulo = int(document.tweet_id) % 10\n self.documents[modulo][document.tweet_id] = [document.term_doc_dictionary, document.max_tf]\n\n if self.docs_count == self.DOCS_SIZE or end_of_corpus: # if we reach chunk size or end of corpus\n self.add_to_file(end_of_corpus)\n self.docs_count = 0\n self.posting_dict = {}\n\n for i in self.documents: # 0 - 9\n if self.documents[i].__len__() > 15000:\n doc = utils.load_obj(self.out + \"document\" + str(i))\n doc.update(self.documents[i])\n utils.save_obj(doc, self.out + \"document\" + str(i))\n self.documents[i] = {}", "def add(self, docs: DocumentArray, *args, **kwargs):\n cursor = self.connection.cursor()\n try:\n psycopg2.extras.execute_batch(\n cursor,\n f'INSERT INTO {self.table} (ID, DOC) VALUES (%s, %s)',\n [\n (\n doc.id,\n doc.SerializeToString(),\n )\n for doc in docs\n ],\n )\n except psycopg2.errors.UniqueViolation as e:\n self.logger.warning(\n f'Document already exists in PSQL database. {e}. Skipping entire transaction...'\n )\n self.connection.rollback()\n self.connection.commit()", "def add_new_doc(self, document):\n self.counterOfTweets += 1\n docID = document.tweet_id\n document_dictionary = document.term_doc_dictionary # document_dictionary = {term:[[indexes],freq]}\n self.tweetTerms[docID] = list(document_dictionary.keys())\n freq_max = sorted(list(document_dictionary.values()), key=itemgetter(1), reverse=True)[0][1] # Gets the maxFreq\n postingFileName = \"\"\n\n # Go over each term in the doc\n for term in sorted(list(document_dictionary.keys())):\n\n # Deciding the type of the term\n if (str(term[0]).lower() not in self.letters): # others\n type = 1\n elif (len(term) > 1): # 'J'\n if str(term[1]).lower() not in self.letters and str(term[1]).lower() != '.': # 1400 -> 1.400K\n type = 1\n else: # strings\n type = 2\n else: # strings\n type = 2\n\n if (' ' in term): # alone entities\n if term not in self.alone_entities_dict: # fix it\n self.alone_entities_dict[term] = 0\n self.alone_entities_dict[term] += 1\n\n if (type == 1):\n if (postingFileName != \"postingOthers\"):\n postingFileName = \"postingOthers\"\n\n elif (len(term) == 1):\n if postingFileName != \"posting_\" + term.lower():\n postingFileName = \"posting_\" + term.lower()\n\n elif (term[1] == '.'):\n if postingFileName != \"posting_\" + term[0].lower():\n postingFileName = \"posting_\" + term[0].lower()\n else:\n if postingFileName != \"posting_\" + str(term[0]).lower() + str(term[1]).lower():\n postingFileName = \"posting_\" + term[0].lower() + term[1].lower()\n\n indexes_t = document_dictionary[term][0]\n freq_t = document_dictionary[term][1]\n tf = freq_t / freq_max\n\n if term not in self.inverted_idx.keys():\n self.postingFiles[postingFileName][term] = []\n self.postingFiles[postingFileName][term].append([freq_t, docID, indexes_t, tf])\n self.inverted_idx[term] = [1, freq_t, postingFileName]\n\n else:\n # update inv_dict:\n self.inverted_idx[term][0] += 1 # add another doc to the count in the inv_dict\n self.inverted_idx[term][1] += freq_t\n self.postingFiles[postingFileName][term].append([freq_t, docID, indexes_t, tf])", "def append(self, doc):\n if doc is None:\n return\n assert isinstance(doc, Document)\n data = {}\n if self.data_fields:\n if isinstance(self.data_fields, list):\n for fname in self.data_fields:\n data[fname] = doc.features[self.data_feature][fname]\n else:\n data.update(doc.features[self.data_feature])\n # assign the document field last so it overwrites anything that comes from the data feature!\n if self.document_bdocjs:\n data[self.document_field] = doc.save_mem(fmt=\"json\")\n else:\n data[self.document_field] = doc.text\n self.fh.write(json.dumps(data))\n self.fh.write(\"\\n\")\n self.n += 1", "def add_document(self, portal_name, document):\n if isinstance(document, dict):\n document = json.dumps(document)\n r = requests.post('/'.join([self.base_url, self.DOCUMENTS_ENDPOINT, portal_name]),\n data=document,\n headers={'Content-Type': 'application/json'})\n return r.json()", "def add_document_wordcount(self) -> None:\n\n wordcount_metadata = self.md_env.get(\"wordcount\", {})\n if not wordcount_metadata:\n return\n\n # save the wordcount to the sphinx BuildEnvironment metadata\n if self.sphinx_env is not None:\n meta = self.sphinx_env.metadata.setdefault(self.sphinx_env.docname, {})\n meta[\"wordcount\"] = wordcount_metadata\n\n # now add the wordcount as substitution definitions,\n # so we can reference them in the document\n for key in (\"words\", \"minutes\"):\n value = wordcount_metadata.get(key, None)\n if value is None:\n continue\n substitution_node = nodes.substitution_definition(\n str(value), nodes.Text(str(value))\n )\n substitution_node.source = self.document[\"source\"]\n substitution_node[\"names\"].append(f\"wordcount-{key}\")\n self.document.note_substitution_def(substitution_node, f\"wordcount-{key}\")", "def document(self, document):\n\n self._document = document", "def document(self, document):\n\n self._document = document", "def append(self, xml):\r\n doc = self.kml_doc.documentElement.getElementsByTagName('Document')[0]\r\n doc.appendChild(xml.documentElement)", "def add_document(self, doc):\n assert isinstance(doc, pylastica.document.Document), \"doc must be of type Document: %r\" % doc\n path = urllib.quote_plus(str(doc.doc_id))\n request_type = pylastica.request.Request.PUT\n if path is None or path == '':\n #no doc id has been given; use post so that an id is automatically created\n request_type = pylastica.request.Request.POST\n options = doc.get_options([\n 'version',\n 'version_type',\n 'routing',\n 'percolate',\n 'parent',\n 'ttl',\n 'timestamp',\n 'op_type',\n 'consistency',\n 'replication',\n 'refresh',\n 'timeout'\n ])\n response = self.request(path, request_type, doc.data, options)\n data = response.data\n if (doc.auto_populate or self.index.client.get_config_value(['document', 'autoPopulate'], False)) and response.is_ok():\n if doc.has_id():\n if '_id' in data:\n doc.doc_id = data['_id']\n if '_version' in data:\n doc.version = data['_version']\n return response", "def add_new_word(data_matrix, new_word):\n\n\t# Read old data_matrix into dm_rows\n\twith open(data_matrix, 'r') as old_data_matrix:\n\t\treader = csv.reader(old_data_matrix)\n\t\tdm_rows = [row for row in reader]\n\n\t# Append new word to dm_rows\n\t# Append zero as value for the new word to all previous documents\n\tdm_rows[0].append(new_word)\n\tfor row in dm_rows[1:]:\n\t\trow.append('0')\n\n\n\t# Rewrite results to original data_matrix file\n\twith open(data_matrix, 'w') as new_data_matrix:\n\t\twriter = csv.writer(new_data_matrix, lineterminator='\\n')\n\t\tfor row in dm_rows:\n\t\t\twriter.writerow(row)", "def add_doc(self, et):\n if et.target_namespace not in self.docs:\n self.docs[et.target_namespace] = set()\n if et.target_namespace not in self.types:\n self.types[et.target_namespace] = {}\n if et.target_namespace not in self.elements:\n self.elements[et.target_namespace] = {}\n if et.docfile:\n assert et.docfile not in self.docs[et.target_namespace], \\\n (et.docfile, et.target_namespace)\n self.resolver.notify(et.docpath)\n self.docs[et.target_namespace].add(et.docfile)", "def document_add(app_id, group_tag, document_model):\n return 'doc_add_%s_%s_%s' % (str(app_id), str(document_model), group_tag)", "def add_document(\n self,\n doc_id,\n nosave=False,\n score=1.0,\n payload=None,\n replace=False,\n partial=False,\n no_create=False,\n **fields,\n ):\n self.client._add_document(\n doc_id,\n conn=self._pipeline,\n nosave=nosave,\n score=score,\n payload=payload,\n replace=replace,\n partial=partial,\n no_create=no_create,\n **fields,\n )\n self.current_chunk += 1\n self.total += 1\n if self.current_chunk >= self.chunk_size:\n self.commit()" ]
[ "0.6929864", "0.6883895", "0.65847003", "0.65210354", "0.64119345", "0.63931704", "0.6141145", "0.6057907", "0.60547733", "0.60378766", "0.59707755", "0.5894285", "0.58623147", "0.58588594", "0.5857861", "0.5845867", "0.58418286", "0.5770271", "0.5749252", "0.57470435", "0.57424533", "0.57239044", "0.5719849", "0.5719849", "0.5706481", "0.5693982", "0.56810546", "0.5678554", "0.56690365", "0.56543875" ]
0.7465959
0
Write termdocument matrix to a CSV file. filename is the name of the output file (e.g. 'mymatrix.csv'). cutoff is an integer that specifies only words which appear in 'cutoff' or more documents should be written out as columns in the matrix.
def write_csv(self, filename, cutoff=2): f = csv.writer(open(filename, 'wb')) for row in self.rows(cutoff=cutoff): f.writerow(row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_out(matrix, filename):\n with open(filename, 'w') as csvfile:\n writer = csv.writer(csvfile)\n for r in matrix:\n writer.writerow(r)\n print(filename + ' writen!')", "def WriteToCsv(matrix, csvFileName, csvDelimiter=','):\r\n \r\n if os.path.isfile(csvFileName) == True:\r\n os.remove(csvFileName) # Deletes the CSV file\r\n\r\n filePermission = \"w\" # Platform-specific file reading privileges\r\n #if platform.system() == \"Windows\":\r\n # filePermission = \"wb\"\r\n \r\n with open(csvFileName, filePermission) as csvfile:\r\n writer = csv.writer(csvfile, delimiter=csvDelimiter, quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n for row in matrix:\r\n if row != []:\r\n writer.writerow(row)\r\n csvfile.close()", "def write_output(basis, filename):\n\n logging.info('Writing output to {}'.format(filename))\n\n basis.to_csv(filename)", "def writeCSV(filename):\n if not filename.endswith('.csv'):\n filename += '.csv'\n with open(filename, 'wb') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for x in range(numRows):\n scores = quizScores()\n types = getTypes(quizScores())\n extra = []\n filewriter.writerow([x] + types + scores + extra)", "def write_csv(self, filename: str, author_data: list):\n split_file = filename.split('/')\n new_filename = \"\"\n for index, component in enumerate(split_file):\n if index == len(split_file) - 1:\n break\n new_filename += component + \"/\"\n new_filename += \"results_\" + split_file[-1]\n with open(new_filename, 'w') as csvfile:\n result_writer = csv.writer(csvfile)\n result_writer.writerow(self._csv_headings)\n for author in author_data:\n result_writer.writerow(self.author_data_to_str(author))\n csvfile.close()", "def write_file(file):\n file.to_csv('data_set.csv', encoding='utf-8', index=False)", "def write_output_csv(filename, **kwargs):\n import csv\n import time\n\n intermediate = kwargs.pop(\"intermediate\", False)\n\n keys = sorted(kwargs.keys())\n num_vars = len(keys)\n\n if intermediate:\n full_filename = filename + \"_interm\"\n else:\n dot_index = filename.rfind('.')\n if dot_index != -1:\n full_filename = (filename[:dot_index]\n + time.strftime(\"%Y-%m-%d-%H.%M.%S\")\n + filename[dot_index:])\n else:\n full_filename = filename + time.strftime(\"%Y-%m-%d-%H.%M.%S\")\n\n # add current time to filename as an identifier\n with open(full_filename, 'w', newline='') as csvfile:\n\n writer = csv.writer(csvfile)\n\n # write header\n writer.writerow(keys)\n\n num_entries = len(kwargs[keys[0]])\n for i in range(num_entries):\n writer.writerow(kwargs[keys[j]][i] for j in range(num_vars))", "def saveCSV(self):\n filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',\n initialdir=os.getcwd(),\n filetypes=[(\"csv\",\"*.csv\"),(\"All files\",\"*.*\")])\n if not filename:\n return\n for m in self.matrices:\n matrix = self.matrices[m] \n if matrix != None: \n c=matrix.csvRepresentation()\n f=open(filename,'w')\n f.write(c)\n f.close()\n return", "def exportMmf(self, filename):\n self.matrix.export_mtx(filename)", "def sparse_matrix_to_csv(filename: str,\n X: scipy.sparse.csr_matrix,\n halve_ratings=False):\n data, rows, cols = X.data, *X.nonzero()\n with open(filename, mode='w') as file:\n file_matrix = csv.writer(file,\n delimiter=',',\n quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n for rating, user_id, movie_id in zip(data, rows, cols):\n # restore ratings to their original scale\n if halve_ratings:\n rating = rating / 2.\n file_matrix.writerow([user_id, movie_id, rating])", "def mat_to_csv(\n self,\n input_matrix,\n output_csv,\n fields=None,\n n_tab=1,\n debug=False,\n i='origin',\n j='destination'\n ):\n script_text = r\"\"\"\n RUN PGM=MATRIX PRNFILE=\"format_env\\mat_to_csv.prn\" MSG='mat_to_csv'\n\n FILEI MATI[1] = filei_mati\n FILEO PRINTO[1] = fileo_printo\n\n print_headers\n JLOOP\n print_in_jloop\n ENDJLOOP\n\n ENDRUN\n \"\"\"\n if fields is None:\n tabs = ['tab_%i' % (i + 1) for i in range(n_tab)]\n fields = tabs\n else:\n n_tab = len(fields)\n field_names = ', '.join(fields)\n\n filei_mati = '\"%s\"' % input_matrix\n fileo_printo = '\"%s\"' % output_csv\n\n print_headers = 'IF (I = 1) \\n PRINT LIST =\"' + '\" ,\";\" ,\"'.join([i, j] + fields) + '\" PRINTO = 1 \\n ENDIF'\n print_assignation = ' '.join(['%s = MI.1.%s \\n' % (fields[i].replace(' ', '_'), i + 1) for i in range(n_tab)])\n print_statement = 'PRINT LIST = I, \";\", J, \";\", ' + ',\";\",'.join([f.replace(' ', '_') for f in fields]) + ' PRINTO = 1'\n print_in_jloop = print_assignation + ' \\n' + print_statement\n\n # creating a cube script\n script = open(self.environment + r'\\mat_to_csv.s', 'w', encoding='latin')\n script.write(script_text.replace(\n 'format_env', self.environment).replace(\n 'filei_mati', filei_mati).replace(\n 'fileo_printo', fileo_printo).replace(\n 'field_names', field_names).replace(\n 'print_in_jloop', print_in_jloop).replace('print_headers', print_headers))\n script.close()\n\n # runs the script with voyager.exe\n options = \"\"\"/Start /CloseWhenDone /Minimize /NoSplash\"\"\" if not debug else \"\"\n os.system('voyager.exe \"' + self.environment + r'\\mat_to_csv.s\" ' + options)", "def write_csv(self, outfile, collapse_orders=False, show_age=False):\r\n # Write header row\r\n outfile.write(self.get_csv_header(collapse_orders, show_age).encode())\r\n\r\n # Write content\r\n for x in self.records:\r\n x.write_csv(outfile, collapse_orders, show_age)", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def print_matrix_to_file(matrix, fileName):\n with open(fileName, 'w') as f:\n for row in matrix:\n print('\\t'.join(map(str, row)), file=f)", "def write(self):\n \n self.df.to_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/mls.csv')", "def to_csv(self, filename):\n self.data.to_csv(filename)", "def to_csv(self, filename):\n self.data.to_csv(filename)", "def writetoCSV(self, fileName):\n\n with open(fileName, 'w') as writeFile:\n writeFile.write(\"ID,Fx,Fy,Fz\\n\")\n for fstnr in F:\n writeFile.write(str(fstnr.ID))\n for i in fstnr.force:\n writeFile.write(',' + str(i))\n writeFile.write('\\n')", "def write_to_csv(results, filename):\r\n fieldnames = ('datetime_utc', 'distance_au', 'velocity_km_s',\r\n 'designation', 'name', 'diameter_km',\r\n 'potentially_hazardous')\r\n\r\n with open(filename, 'w') as outfile:\r\n writer = csv.writer(outfile)\r\n writer.writerow(fieldnames)\r\n for row in results:\r\n r = [row.time, row.distance, row.velocity, row.neo.designation,\r\n row.neo.name, row.neo.diameter, row.neo.hazardous]\r\n writer.writerow(r)", "def write_index(self, file_name):\n self.df_index.to_csv(file_name, sep='\\t')", "def write_file(self, filename):\n\n with open(filename, 'w', newline = '') as csvfile:\n langwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for key in self.features:\n value = self.features[key]\n l = []\n for val in value:\n l.append(str(val))\n langwriter.writerow([l])\n return", "def write_tocsv(file_name, dataframe) :\n print(\"\\nSaved result to {}\\n\".format(file_name))\n dataframe.to_csv(file_name, mode='a', header=False,index=False)", "def MatrixToFile(self):\n # open text file\n file = open(\"intersection_matrix.txt\", 'w')\n # write opening square bracket for matrix\n file.write(\"[\")\n # use for loop to write in the matrix\n for i in range(self.rows):\n # square brackets to append in elements of a row of the matrix\n mat = []\n if i != 0:\n # separate each row with a comma\n file.write(\",\")\n for j in range(self.cols):\n # append elements of the row\n mat.append(self.matrix[i][j])\n # avoid having space as the first row in the text file\n if i != 0:\n file.write(\"\\n\")\n # write in the row\n file.write(str(mat))\n # write closing bracket for the matrix\n file.write(\"]\")\n # close file\n file.close()\n return", "def cluster_data(self, doc_matrix, ldamodel, to_csv=False, keywords=None, filenames=None, num_categories=-1):\n test_clusters = []\n for doc in doc_matrix:\n scores = ldamodel[doc]\n # TODO check argmax\n test_clusters.append(scores[np.argmax([s[1] for s in scores])][0])\n if to_csv and keywords is not None and filenames is not None and num_categories is not -1:\n filename = '%s_%d_categories.csv' % (self.lang, num_categories)\n with open(filename, 'w', encoding='utf-8') as f:\n f.write('file,language,num_categories,cluster,keywords')\n for i, fn in enumerate(filenames):\n f.write('\\n%s,%s,%d,%d,%s' % (\n fn,\n self.lang,\n num_categories,\n test_clusters[i],\n '|'.join(keywords[i])\n ))\n return test_clusters", "def to_csv(self, filename, **kwargs):\n self.data.to_csv(filename, **kwargs)", "def csv_export(df_to_export, norm_term, strain_info):\n \n # Makes a list of unique and sorted 'Condition' values to use for the iteration processes.\n i_term = 0\n cond_list = df_to_export.index.get_level_values('Condition')\n cond_list = sorted(list(set(cond_list)))\n \n # Checks for presence of './archive' directory, and creates it if it doesn't exist.\n if not os.path.isdir('./archive'):\n os.mkdir('archive')\n\n # Generate files for all condition sets with 'Temp' = -80.\n for idx, condi in df_to_export[-80].groupby('Condition'):\n csv_f_name = './archive/' + strain_info + '_' + cond_list[i_term] + '_' + '-80_' + norm_term + '.csv'\n condi.to_csv(path_or_buf=csv_f_name, index_label=['Condition', 'Dose'])\n i_term += 1\n\n # Generate files for all condition sets with 'Temp' = 'RT'.\n i_term = 0\n for idx, condi in df_to_export['RT'].groupby('Condition'):\n csv_f_name = './archive/' + strain_info + '_' + cond_list[i_term] + '_' + 'RT_' + norm_term + '.csv'\n condi.to_csv(path_or_buf=csv_f_name, index_label=['Condition', 'Dose'])\n i_term += 1", "def write_table_to_csv(table: List[List], filename: str):\n with open(filename, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t')\n for row in table:\n writer.writerow(row)", "def save_cosine_matrix_df(cosine_matrix_df, run_parameters):\n new_file_name = kn.create_timestamped_filename(\"cosine_matrix\", \"df\")\n cosine_matrix_df.to_csv(\n os.path.join(run_parameters['results_directory'], new_file_name), header=True, index=True, sep='\\t')", "def write_csv(output_filename, filenames, truncate=None):\n num_tokens = defaultdict(list)\n with open(output_filename, 'w') as fout:\n fout.write('label,sentence\\n')\n for filename in tqdm(filenames):\n sentiment = int('pos' in filename)\n # sentiment = int(os.path.basename(filename)[:-4].split(\"_\")[1])\n\n with open(filename) as f:\n doc = f.read()\n # Remove HTML\n soup = BeautifulSoup(doc)\n doc = soup.get_text()\n\n if truncate:\n doc = \" \".join(doc.split()[:truncate])\n\n num_tokens[sentiment].append(len(doc.split()))\n fout.write(\"{},{}\\n\".format(sentiment, doc))\n\n for k, v in num_tokens.items():\n print(\"Sentiment {}: Count: {:<10,} Tokens Mean: {:<10,.2f} Min: {:<5} Max: {}\".format(\n k, len(v), np.mean(v), np.min(v), np.max(v)))", "def to_file(self, target_filename, **to_csv_kwargs):\n # TODO: to_file saves will need trigger log file in future versions\n self.df.to_csv(target_filename, index=False, **to_csv_kwargs)" ]
[ "0.69864655", "0.63416266", "0.61624026", "0.61446565", "0.5999539", "0.59739774", "0.59661436", "0.59453785", "0.5905278", "0.59002036", "0.58691335", "0.58681005", "0.5839974", "0.58317584", "0.5826506", "0.58209157", "0.58209157", "0.5786416", "0.5757284", "0.575689", "0.57474923", "0.5733939", "0.57284695", "0.5708526", "0.57060045", "0.56843245", "0.56693304", "0.56284064", "0.5616903", "0.5610576" ]
0.74569845
0
Returns the name of the channel for the given radio and channel.
def channel_name(radio_id: int, channel_id: int) -> str: return f"COMM{radio_id} Ch {channel_id}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def channelName(self):\n channel_list = (\"Neutral\",\n \"BBC1\",\n \"BBC2\",\n \"ITV\",\n \"Channel 4\",\n \"Channel 5\")\n channel_name = channel_list[self.channel]\n return channel_name", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> str:\n return self._channel_name", "def getChannel(self, channel):\n channel = channel.lower()\n if channel in self.channels:\n return self.channels[channel]\n else:\n c = IrcChannel()\n self.channels[channel] = c\n return c", "async def get_project_channel_name(self, workspace: WorkspaceEntity):\n await self.client.login(os.environ['DISCORD_BOT_TOKEN'], bot=self.is_bot)\n channel_name = \"\"\n try:\n channel = await self.get_channel(workspace.project_channel_id)\n except HTTPException as error:\n # project channel id may result in Missing Access (code 50001)\n self.logger.critical(\n f\"discord {self.get_project_channel_name.__name__} request failed for workspace {workspace.id} and raised error: {error.text} (code {error.code})\")\n else:\n channel_name = channel.name\n\n await self.client.logout()\n return channel_name", "def getchannel(self, channel):\r\n if isinstance(channel, basstring):\r\n try:\r\n channel = self.getbands().index(channel)\r\n except ValueError:\r\n raise ValueError(\r\n 'The image has no channel \"{}\"'.format(channel))\r\n return self.getband(channel)", "def channel(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"channel\")", "def channel(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"channel\")", "def channel(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"channel\")", "async def test_get_rpc_channel_name(mock_rpc_device) -> None:\n assert get_rpc_channel_name(mock_rpc_device, \"input:0\") == \"test switch_0\"\n assert get_rpc_channel_name(mock_rpc_device, \"input:3\") == \"Test name switch_3\"", "def format_channel_name(channel_number: str, channel_name: str | None = None) -> str:\n if channel_name is not None and channel_name != \"\":\n return f\"{channel_name} ({channel_number})\"\n\n return channel_number", "def format_channel_name(channel_number: str, channel_name: str | None = None) -> str:\n if channel_name is not None and channel_name != \"\":\n return f\"{channel_name} ({channel_number})\"\n\n return channel_number" ]
[ "0.72019213", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.7178412", "0.70424455", "0.63369983", "0.63366276", "0.62782836", "0.6268224", "0.62431204", "0.62431204", "0.619592", "0.6152598", "0.6152598" ]
0.8595431
0
List of playable units in the flight.
def client_units(self) -> List[FlyingUnit]: return [u for u in self.units if u.is_human()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_alive_units(self):\n alive_units = []\n for unit in self.units:\n if not unit.is_alive():\n continue\n alive_units.append(unit)\n return alive_units", "def get_units(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[3])\n return result", "def getListOfUnits(self, *args):\n return _libsbml.UnitDefinition_getListOfUnits(self, *args)", "def list_data_units(self):\n return self.data_units.items()", "def get_active_units(self):\n alive_units = self.get_alive_units()\n active_units = []\n for alive_unit in alive_units:\n if not alive_unit.ready_to_attack():\n continue\n active_units.append(alive_unit)\n return active_units", "def get_units(self, obj: Dimension) -> [Unit]:\n try:\n return obj.units()\n except KeyError as e:\n logging.error(str(e))\n return []", "def get(self, *args):\n return _libsbml.ListOfUnits_get(self, *args)", "def units(self):\n return self._units", "def units(self):\n return self._units", "def units(inventory):\n return inventory.reduce(convert.get_units)", "def get_player_items(self):\n return self.player.items", "def get_units(status, application_name):\n units = []\n for unit_name, unit in iter_units(status):\n if unit_name.startswith('{}/'.format(application_name)):\n units.append((unit_name, unit,))\n return units", "def getUnits(self):\n return _libsbml.Species_getUnits(self)", "def get_unit_names(status, application_name):\n return [name for name, unit in get_units(status, application_name)]", "def buildUnits(self, obs, UnitName, Quantity):\n \n \"\"\"drones, overlords, zerglings, roaches, hydralisks, corrupters, queen(may need own function)\"\"\" \n actions.FUNCTIONS.select_larva(\"select\")\n if (UnitName == \"drone\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Drone_quick(\"now\")\n if (UnitName == \"overlord\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Overlord_quick(\"now\")\n if (UnitName == \"zergling\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Zergling_quick(\"now\")\n if (UnitName == \"Roach\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Roach_quick(\"now\")\n if (UnitName == \"hydralisks\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Hydralisk_quick(\"now\")\n if (UnitName == \"corruptor\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Corruptor_quick(\"now\")\n \"\"\"UnitsForControlGroup: [#drone, #zergling, #roaches, #hydralisks]\"\"\"", "def unit_display_names(self) -> localedata.LocaleDataDict:\n return self._data['unit_display_names']", "def unit_classes(self):\n return self._unit_classes", "def units(self):\n pass", "def units(self):\n return self.children()", "def units(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBulletUnitArgs']]]]:\n return pulumi.get(self, \"units\")", "def subunits(self):\n\n return [get_target_by_id(id_) for id_ in self._subunit_ids]", "def get_idle_units(self, n):\n status = self.get_status() \n unitindices = []\n for i in xrange(len(status)):\n if status[i] == 'idle':\n unitindices.append(i)\n if len(unitindices) == n:\n break\n return unitindices", "def getListOfUnitDefinitions(self, *args):\n return _libsbml.Model_getListOfUnitDefinitions(self, *args)", "def get_units(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_units\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result", "def list_power_supply_units(self):\n\n doc = self.client.enumerate(uris.CIM_PowerSupply)\n\n psus = doc.find('.//s:Body/wsen:EnumerateResponse/wsman:Items',\n wsman.NS_MAP)\n\n return [self._parse_psus(psu) for psu in psus]", "def _player_list(self):\n game = self.ctrl.game\n return game.players[self.i_to_player_id(0)], game.players[self.i_to_player_id(1)]", "def enumerateUnits(self):\n return self._lowLevelEnumerateUnits()", "def get_song_list(self):\n return self.song_list", "def get_players():\n return [Mpris_Player(item)\n for item in Mpris_Utils.get_session().list_names()\n if re.match(Mpris_Interfaces.MEDIA_PLAYER, item) > 0]", "def getPlayerList(self):\n return(self.playerList)" ]
[ "0.7038853", "0.69471025", "0.6792572", "0.674546", "0.65399146", "0.6447552", "0.6128354", "0.60884583", "0.60884583", "0.60749876", "0.6040792", "0.60030854", "0.59831977", "0.58550435", "0.5834707", "0.57983744", "0.5797732", "0.57909584", "0.5786042", "0.57521987", "0.57261926", "0.57251996", "0.5724398", "0.57231855", "0.56733227", "0.56730247", "0.5646802", "0.56390494", "0.5630128", "0.56131697" ]
0.6965238
1
Returns the type of aircraft in this flight.
def aircraft_type(self) -> FlyingType: return self.units[0].unit_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAircraft(self, code):\n \t\n return self.aircraftDict[code.upper()]", "def landing_airport(self, **kwargs: Any) -> \"Airport\":\n\n from ..core.distance import guess_airport\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n data = self.data.sort_values(\"timestamp\")\n return guess_airport(data.iloc[-1], **kwargs)", "def orbital_type(self):\n return self.name[0].upper()", "async def getArmedType(self):\n armed_type = await self.director.getItemVariableValue(\n self.item_id, \"ARMED_TYPE\"\n )\n return armed_type", "def takeoff_airport(self, **kwargs: Any) -> \"Airport\":\n\n from ..core.distance import guess_airport\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n data = self.data.sort_values(\"timestamp\")\n return guess_airport(data.iloc[0], **kwargs)", "async def getAlarmType(self):\n alarm_type = await self.director.getItemVariableValue(\n self.item_id, \"ALARM_TYPE\"\n )\n return alarm_type", "def get_type(self):\n return self.__animal_type", "def can_attack_air(self) -> bool:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Air.value, TARGET_TYPE.Any.value]), None\n )\n return weapon is not None\n return False", "def AceType(self):\n raw_type = super(AceHeader, self).AceType\n return ACE_CLASS_TYPE_MAPPER[raw_type]", "def air_range(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Air.value, TARGET_TYPE.Any.value]), None\n )\n if weapon:\n return weapon.range\n return 0", "def get(self):\n\n if self.ansatz_type == 'farhi':\n return FarhiAnsatz(self.layers, self.sweeps_per_layer, self.activation_function)\n\n elif self.ansatz_type == 'alternating_layer_tdcnot':\n return AlternatingLayerTDCnotAnsatz(self.layers, self.sweeps_per_layer, self.activation_function)\n\n elif self.ansatz_type == 'sim_circ_13_half':\n return SimCirc13Half(self.layers, self.sweeps_per_layer, self.activation_function)\n\n elif self.ansatz_type == 'sim_circ_13':\n return SimCirc13(self.layers, self.sweeps_per_layer, self.activation_function)\n\n elif self.ansatz_type == 'sim_circ_14_half':\n return SimCirc14Half(self.layers, self.sweeps_per_layer, self.activation_function)\n\n elif self.ansatz_type == 'sim_circ_14':\n return SimCirc14(self.layers, self.sweeps_per_layer, self.activation_function)\n\n elif self.ansatz_type == 'sim_circ_15':\n return SimCirc15(self.layers, self.sweeps_per_layer, self.activation_function)\n\n elif self.ansatz_type == 'sim_circ_19':\n return SimCirc19(self.layers, self.sweeps_per_layer, self.activation_function)\n\n elif self.ansatz_type == 'abbas':\n return Abbas(self.layers, self.sweeps_per_layer, self.activation_function)\n\n elif self.ansatz_type is None or 'null':\n return NullAnsatz(self.layers, self.sweeps_per_layer, self.activation_function)\n\n else:\n raise ValueError(\"Invalid ansatz type: {}\".format(self.ansatz_type))", "def getHabitatType(self):\n\t\treturn self.habitatType", "def _spawn_aircraft() -> Tuple[float, float, float, str]:\n\n # Get aircraft coordinates.\n x = random.uniform(-CONTROL_ZONE_RADIUS, CONTROL_ZONE_RADIUS)\n y = math.sqrt(CONTROL_ZONE_RADIUS ** 2 - x ** 2)\n y = y if random.randint(0, 1) else -y\n\n ang = _get_ac_heading(x, y)\n\n return x, y, ang, \"A\"", "def get_iptype(self, refresh=False):\n return self.get_attr(ZONE_ENTRY['ZIPTYPE'], refresh)", "def operator_aircraft_info(self, apath):\r\n opfolder_path = apath.split(\"0 NEW\")[-1]\r\n opfolder = opfolder_path.replace(\"/\", \"\")\r\n opfolder = opfolder.replace(\"\\\\\", \"\")\r\n opfolder = opfolder.split(\" \")\r\n operator = opfolder[0].strip()\r\n aircraft = opfolder[1].strip()\r\n return operator, aircraft", "def getType(self):\n if (self.type == 's'):\n #suit type\n type = \"suit\"\n elif (self.type == 'b'):\n #boss type\n type = \"boss\"\n else:\n notify.error(\"Invalid DNA type: \", self.type)\n\n return type", "def air_weapon(self):\n if self._air_weapon:\n return self._air_weapon\n if self.weapons:\n self._air_weapon = next(\n (weapon for weapon in self.weapons if weapon.type in {TARGET_TYPE.Air.value, TARGET_TYPE.Any.value}),\n None,\n )\n return self._air_weapon\n return None", "def card_type(self):\n return self._card_type", "def get_asr_type(cfg):\n return cfg['ASR']['type']", "def weather_type(self):\n return self._weather_type", "def get_type(self):\n types = dict(ACTIVITY_TYPES)\n return types.get(self.activity_type, \"N/A\")", "def availability_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"availability_type\")", "def retrieve_aircraft_var(self):\n query_ac = QSqlQuery(\"SELECT immatriculation FROM Aircraft\")\n list_to_be_filled_by_aircraft = []\n while query_ac.next():\n aircraft = query_ac.value(0)\n list_to_be_filled_by_aircraft.append(aircraft)\n return list_to_be_filled_by_aircraft", "def vehicle_type():\n pass", "def asset_type(self) -> \"AssetType\":\n return self._values.get(\"asset_type\")", "def asset_type(self) -> \"AssetType\":\n return self._values.get(\"asset_type\")", "def can_attack_air(self) -> bool:\n return self.air_weapon", "def Type(self):\r\n\t\treturn self._get_attribute('type')", "def get_airplane_state(self, airplane_instance, chosen_time_and_date):\r\n\r\n chosen_airplane = airplane_instance\r\n voyages_list = self.ioAPI.load_all_voyages() # List of all voyages\r\n airplane_state = \"IDLE\" # initializes the airplane state at IDLE\r\n NOW = datetime.datetime.fromisoformat(chosen_time_and_date)\r\n\r\n for voyage in voyages_list:\r\n voyage_plane = voyage.get_plane_id()\r\n\r\n departure_out = datetime.datetime.fromisoformat(voyage.get_departure_out())\r\n arrival_out = datetime.datetime.fromisoformat(voyage.get_arrival_out())\r\n departure_home = datetime.datetime.fromisoformat(voyage.get_departure_home())\r\n arrival_home = datetime.datetime.fromisoformat(voyage.get_arrival_home())\r\n available = arrival_home + datetime.timedelta(hours = 1)\r\n \r\n if voyage_plane == chosen_airplane:\r\n if departure_out <= NOW and arrival_home > NOW:\r\n break \r\n else:\r\n return airplane_state\r\n \r\n if departure_out <= NOW and NOW <= arrival_out:\r\n airplane_state = \"Flight {} is on its way to {} and will be available again on: {}\".format(voyage.get_flight_number_out(), voyage.get_dest_id(), available)\r\n elif departure_home <= NOW and NOW <= arrival_home:\r\n airplane_state = \"Flight {} is on its way to KEF and will be available again on: {}\".format(voyage.get_flight_number_back(), available)\r\n elif arrival_out <= NOW and NOW <= departure_home:\r\n airplane_state = \"IN INTERMISSION\" \r\n\r\n return airplane_state", "def get_type(self):\n return self.type" ]
[ "0.6327988", "0.6149255", "0.6035162", "0.59869665", "0.5729768", "0.56378406", "0.55666333", "0.5497275", "0.5460754", "0.5430962", "0.53903615", "0.5380597", "0.53774005", "0.5364313", "0.535924", "0.53532493", "0.5348655", "0.5333701", "0.5304017", "0.5296175", "0.5294151", "0.51991737", "0.5156821", "0.5124142", "0.5121322", "0.5121322", "0.5119159", "0.51097274", "0.5108662", "0.5105749" ]
0.84770125
0
Returns the number of preset channels for the given radio.
def num_radio_channels(self, radio_id: int) -> int: # Note: pydcs only initializes the radio presets for client slots. return self.client_units[0].num_radio_channels(radio_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_channels():\r\n check_mixer()\r\n return sdl.Mix_GroupCount(-1)", "def num_channels(chip):\n return int(utils.readstr_all(os.path.join(_CHIP_PATH(chip), \"npwm\")))", "def num_channels(self):\n with audioread.audio_open(self.path) as f:\n return f.channels", "def n_channels(self):\n return len(self.channels)", "def get_num_channels(self):\r\n check_mixer()\r\n return sdl.Mix_GroupCount(self._chunk_tag)", "def n_channels(self):\n return self._n_channels", "def channel_size(self):\n if self.channels is None:\n return 0\n return self.channels.size", "def num_channels(self):\n return 3", "def num_of_channels(self) -> int:\n return len(self.non_zero_channels())", "def get_num_channels(self):\n return _uhd_swig.rx_streamer_get_num_channels(self)", "def channels(self) -> int:\n return len(self._channel_arrays)", "def get_num_channels(self):\n return _uhd_swig.tx_streamer_get_num_channels(self)", "def get_n_channels(self): \n return self.n_out_channels", "def nchans(self):\n return self.bw / self.bw_chan", "def get_num_cards(self):\n \n return self._hand.get_size()", "def get_num_channels(x):\n return x.get_shape().as_list()[-1]", "def get_img_channels(self, pipeline_cfg: RVPipelineConfig) -> int:\n all_scenes = pipeline_cfg.dataset.all_scenes\n if len(all_scenes) == 0:\n return 3\n for scene_cfg in all_scenes:\n if scene_cfg.raster_source.channel_order is not None:\n return len(scene_cfg.raster_source.channel_order)\n log.info(\n 'Could not determine number of image channels from '\n 'DataConfig.img_channels or RasterSourceConfig.channel_order. '\n 'Building first scene to figure it out. This might take some '\n 'time. To avoid this, specify one of the above.')\n with get_tmp_dir() as tmp_dir:\n scene = all_scenes[0].build(\n pipeline_cfg.dataset.class_config,\n tmp_dir,\n use_transformers=True)\n img_channels = scene.raster_source.num_channels\n return img_channels", "def getNchan(self):\n return self.shape(squeeze=False)[2]", "def alloc_for_radio(self, radio: Radio) -> RadioFrequency:\n allocator = self.radio_allocators[radio]\n try:\n while (channel := next(allocator)) in self.allocated_channels:\n pass\n self.reserve(channel)\n return channel\n except StopIteration:\n raise OutOfChannelsError(radio)", "def channel_count(self):\n index = self._ordered_input_names.index('channel_count')\n return self._inputs[index]", "def get_control_count(cmd):\n return len(cmd.control_qubits)", "def comchans(self, nick):\n comchannels = 0\n for chan in self.chandb:\n if nick in chan:\n comchannels += 1\n return comchannels", "def countChannels(channels):\n if (channels == ''):\n return 0\n tokens = channels.split(',')\n nspw = len(tokens)\n count = {}\n for i in range(nspw):\n string = tokens[i].split(':')\n if (len(string) == 2):\n spw,string = string\n else:\n string = string[0]\n spw = 0\n ranges = string.split(';')\n for r in ranges:\n c0 = int(r.split('~')[0])\n c1 = int(r.split('~')[1])\n if (c0 > c1):\n casalogPost(\"Invalid channel range: c0 > c1 (%d > %d)\" % (c0,c1))\n return\n channels = [1+int(r.split('~')[1])-int(r.split('~')[0]) for r in ranges]\n count[spw] = np.sum(channels)\n if (nspw == 1):\n count = count[spw]\n return(count)", "async def voice_channel_name_length(\n channel: P('channel', 'Select a voice channel', channel_types = [ChannelType.guild_voice])\n):\n return len(channel.name)", "def get_circuit_size(self, circuit: ACircuit, recursive: bool = False) -> Tuple[int, int]:", "def manage_channels(_) -> int:\n return 1 << 4", "def manage_channels(_) -> int:\n return 1 << 4", "def get_size(channels):\n\n if channels not in (1, 2):\n raise ValueError('Wrong channels value. Must be equal to 1 or 2')\n\n return _get_size(channels)", "def channels(self) -> int:\n return self._channels", "def _get_nr_of_bits(self):\n return sum(self._size_var)" ]
[ "0.70029294", "0.6583076", "0.6431523", "0.6311275", "0.61871827", "0.61813056", "0.6068922", "0.6058218", "0.6054343", "0.6016357", "0.5975814", "0.5950489", "0.5895036", "0.5827683", "0.5824527", "0.57661426", "0.5753626", "0.5744624", "0.5721213", "0.5715618", "0.568809", "0.5622671", "0.5621975", "0.555096", "0.55446804", "0.54730546", "0.54730546", "0.5459269", "0.5433006", "0.53798354" ]
0.7514637
0
Returns the radio and channel number for the given frequency.
def channel_for( self, frequency: RadioFrequency) -> Optional[ChannelAssignment]: return self.frequency_to_channel_map.get(frequency, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_frequency(self):\r\n _debug('simq03b_api.get_frequency')\r\n \r\n x = self.query('SOUR:FREQ:CW?')\r\n if x == None: return None\r\n return float(x)", "def get_frequency(self, c, channel=-1):\n if (channel == -1):\n channel = self.guess_channel()\n\n try:\n frequency = self.binding.get_frequency_num(channel)\n return frequency * THz;\n except Exception, e:\n return self.handle_wavemeter_error(e)", "def freq_to_chan(frequency,bandwidth,n_chans):\n if frequency < 0:\n frequency = bandwidth + frequency\n if frequency > bandwidth:\n raise RuntimeError(\"that frequency is too high.\")\n return round(float(frequency)/bandwidth*n_chans) % n_chans", "def fft_index(fft, frequency):\n\treturn 2 * int(len(fft) * frequency / AUDIO_RATE) # Not entirely clear on why I need to multiply by 2 here. I don't need to if I use fft instead of rfft, but then I get a bunch of crazy high frequency FFT data, or is it complex numbers or something...", "def get_frequency(self):\r\n x = self.query('SOUR:FREQ:CW?')\r\n if x == None: return None\r\n return float(x)", "def get_frequency(self):\r\n x = self.query('SOUR:FREQ:CW?')\r\n if x == None: return None\r\n return float(x)", "def get_rx_freq(self, rx_freq):\n\t\treturn self._rx_freq", "def get_cw_freq(self):\n return self.get_frequency(self.synth)", "def get_freqmult(self,):\n\n BYTE0 = (self._read('FR1')[0] & 0b01111100) >> 2\n self.freqmult = BYTE0\n\n if BYTE0 == 0:\n return 1\n else:\n return (BYTE0)", "def get_frequency(self):\r\n # print '*********in get freq'\r\n self.cntr.run('FREQ 1')\r\n f_0_ = self.cntr.get_measurements(1)\r\n self.f_0 = f_0_[0]\r\n self.cntr.run('FREQ 2')\r\n f_rep_ = self.cntr.get_measurements(1)\r\n self.f_rep = f_rep_[0]", "def get_frequency(self,):\n\n # TODO: Find way to appropriately reconvert the frequency to its initial\n # TODO: Value or alert that the value is APPROXIMATE\n FTW = int (0)\n freq = int(0)\n\n FTW_bytes = self._read('CFTW0')\n FTW = FTW.from_bytes(FTW_bytes,'big')\n freq = FTW*self.clock_freq/2**32\n\n print('Latest frequency set: ', \"{:.2e}\".format(freq), 'Hz')\n print(['%.2e' % elem for elem in self.frequencies])\n\n return self.frequencies", "def get_frequency(self):\r\n x = self.query('FREQ?')\r\n if x == None: return None\r\n return float(x)", "def get_frequency(self, **kwargs):\n #self.resource.clear()\n channel = kwargs.get(\"channel\", self.active_channel)\n use_log = \"LOG\" in self.scpi.query_sweep_type(channel).upper()\n f_start = self.scpi.query_f_start(channel)\n f_stop = self.scpi.query_f_stop(channel)\n f_npoints = self.scpi.query_sweep_n_points(channel)\n if use_log:\n freq = np.logspace(np.log10(f_start), np.log10(f_stop), f_npoints)\n else:\n freq = np.linspace(f_start, f_stop, f_npoints)\n\n frequency = skrf.Frequency.from_f(freq, unit=\"Hz\")\n frequency.unit = kwargs.get(\"f_unit\", \"Hz\")\n return frequency", "def get_frequency(self):\r\n return self.f", "def get_tone_frequency(self):\n return self.tone_frequency", "def getFreq(self,):\n\t\treturn self.freq;", "def get_freq(self):\n return self.freq", "def tone_to_freq(tone):\n return math.pow(2, (tone - 69.0) / 12.0) * 440.0", "def get_frequency(self):\r\n return self._api.get_frequency()", "def get_frequency(self, c):\n yield self.wm.write(\":READ:POW?\\r\\n\")\n yield self.wm.write(\":READ:FREQ?\\r\\n\")\n freq = yield self.wm.read_very_eager()\n if freq != '':\n\n temp = freq.split()\n temp = map(float,temp)\n temp.sort()\n if temp[len(temp)-1] >40.0:\n freq = temp[len(temp)-1]\n self.freq_changed((freq))\n self.freq = freq\n if temp[0] < 40.0:\n amp = temp[0]\n self.amp_changed((amp))\n self.amp = amp\n returnValue(self.freq)", "def chan2freq( rs, chan, nureference):\n dx = rs.bandwidthHz/float(rs.refChan)\n dchan = float( chan - rs.refChan)\n freq = rs.centerFreqHz + (dx*dchan)\n return freq", "def fft_frequency(fft, index):\n\treturn index * AUDIO_RATE / len(fft) / 2 # Same as in fft_index, see above", "def clnvr_freq(record):\n try:\n esp = float(re.search(r\"(AF_ESP=)([0-9,.e-]+)\", record[7]).group(2))\n except:\n esp = -1\n\n try:\n exac = float(re.search(r\"(AF_EXAC=)([0-9,.e-]+)\", record[7]).group(2))\n except:\n exac = -1\n\n try:\n tgp = float(re.search(r\"(AF_TGP=)([0-9,.e-]+)\", record[7]).group(2))\n except:\n tgp = -1\n\n return max(esp, exac, tgp)", "def freq(self, frequency: Optional[int]):", "def frequency(self):\n return self._get('frequency')", "def get_freq_details(diagnostics_dir, verbose=False):\n metafile_science = find_metadata_file(diagnostics_dir, 'mslist-scienceData*txt', verbose=False)\n if not metafile_science:\n return None, None, None\n\n with open(metafile_science, 'r') as mslist_file:\n lines = mslist_file.readlines()\n\n in_spw_block = False\n for line in lines:\n if in_spw_block:\n parts = line.split()\n chan_width = float(parts[10])*1000. # convert kHz to Hz\n cfreq = parts[12] #MHz\n nchan = parts[7]\n break\n else:\n in_spw_block = line.find('Frame') >= 0\n\n return chan_width, cfreq, nchan", "def get_frequency(rg = None):\n if isinstance(rg, int):\n rg = [rg]\n elif rg is None:\n rg = _cpu.get_online_cpus()\n\n freqs = _cpu.get_frequencies()\n minfs = _cpu.get_min_freq()\n maxfs = _cpu.get_max_freq()\n\n print(\"CPU Current (MHz) Minimum (MHz) Maximum (MHz)\")\n for core in rg:\n freq = int(freqs[core]/1000) if core in freqs else '???'\n minf = int(minfs[core]/1000) if core in minfs else '???'\n maxf = int(maxfs[core]/1000) if core in maxfs else '???'\n print(f\"{core:<3} {freq:<13} {minf:<13} {maxf:<13}\")", "def get_ao_manual_control_freq( channel ):\n freq = float64(0)\n CALL('GetPhysicalChanAOManualControlFreq', channel, byref(freq))\n return freq.value", "def getNfftNo(fs):\n\tdN = {# fs:(Nfft,No)\n\t\t 10:(4096,3072), # for 1 Hz LPF (fsNew=10)\n\t\t 12:(2048,1024), # for 2 Hz LPF (fsNew=12)\n\t\t 50:(4096,2048), # for 6 Hz LPF (fsNew=50)\n\t\t62.5:(1024, 512),\n\t\t 250:(2048, 0),\n\t\t 500:(4096, 0),\n\t\t1000:(8192, 0)\n\t}\n\tNfft,No = dN.get(fs, (False,False))\n\tif not Nfft: raise '\\nunaccounted for sample rate = %f\\n' % fs\n\treturn Nfft,No", "def _get_cpu_name_count_frequency() -> Tuple[str, int, int]:\n cpu_info = get_cpu_info()\n frequency, what = cpu_info[\"hz_actual\"]\n assert what == 0, \"I have no idea what this is, but it seem to always be 0...\"\n return cpu_info[\"brand_raw\"], cpu_info[\"count\"], frequency" ]
[ "0.6490108", "0.6414235", "0.6379573", "0.6330263", "0.6316427", "0.6316427", "0.62827206", "0.61941063", "0.6178761", "0.61771286", "0.61676437", "0.6102844", "0.60944504", "0.5997203", "0.59923995", "0.5978198", "0.5965196", "0.5921212", "0.59142876", "0.58896494", "0.58850616", "0.5876547", "0.5802412", "0.57512885", "0.57262146", "0.570838", "0.56721884", "0.5671212", "0.5666192", "0.5663132" ]
0.6561942
0
Assigns a preset radio channel to the given frequency.
def assign_channel(self, radio_id: int, channel_id: int, frequency: RadioFrequency) -> None: for unit in self.client_units: unit.set_radio_channel_preset(radio_id, channel_id, frequency.mhz) # One frequency could be bound to multiple channels. Prefer the first, # since with the current implementation it will be the lowest numbered # channel. if frequency not in self.frequency_to_channel_map: self.frequency_to_channel_map[frequency] = ChannelAssignment( radio_id, channel_id )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_frequency(self, frequency):\n\n if frequency == 1:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 0)\n if frequency == 2:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 0)\n if frequency == 3:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 1)\n if frequency == 4:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 1)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return", "def setCarrierFrequency(self, frequency):\n if self._params['modulationMode'] != \"IQMixer\":\n print \"WARNING ! Carrier Frequency change also Tone Frequency in %s mode\" % self._params['modulationMode']\n self._MWSource.setFrequency(frequency)", "def set_freq_channel(self, value):\n # Check format\n if value > 0xFF:\n raise StationException(\"Frequency channels must be 1-byte length\")\n # Switch to command mode if necessary\n if self._sermode == SerialModem.Mode.DATA:\n self.enter_command_mode()\n # Run AT command\n response = self.run_at_command(\"ATCH=\" + \"{0:02X}\".format(value) + \"\\r\")\n if response is None:\n return False\n if response[0:2] == \"OK\":\n self.freq_channel = value\n return True\n return False", "def set_frequency(self):\n\t\t\"\"\"For Frequency Prescalar-0\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PSC0, PCA9530_2C_1_PSC0_USERDEFINED)\n\t\t\n\t\t\"\"\"For Frequency Prescalar-1\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PSC1, PCA9530_2C_1_PSC1_USERDEFINED)", "def configure_freq(self, channel):\n if 0 < channel <= 3:\n self.write(\":CONF:FREQ (@{0})\".format(channel))", "def set_frequency(miner: Miner, login, frequency):\n #default for S9 is 550\n #\"bitmain-freq\" : \"550\",\n commands = get_changeconfigcommands(getminerfilename(miner), 'bitmain-freq', frequency)\n sendcommands_and_restart(miner, login, commands)", "def set_frequency(self, f=1e9):\r\n _debug('simq03b_api.set_frequency')\r\n \r\n self.write('SOUR:FREQ:CW '+str(f))", "def set_frequency(self, new_freq):\n self.freq = new_freq\n self.ts_resample()", "def set_frequency(self, pin, frequency):\n raise NotImplementedError", "def set_frequency(self, f=1e9):\r\n self.write('SOUR:FREQ:CW '+str(f))", "def set_frequency(self, f=1e9):\r\n self.write('SOUR:FREQ:CW '+str(f))", "def set_frequency(self, newval):\n rest_val = str(int(round(newval * 65536.0, 1)))\n return self._setAttr(\"frequency\", rest_val)", "def set_pwm_freq(self, freq_hz):\n prescaleval = 25000000.0 # 25MHz\n prescaleval /= 4096.0 # 12-bit\n prescaleval /= float(freq_hz)\n prescaleval -= 1.0\n prescale = int(math.floor(prescaleval + 0.5))\n oldmode = self.i2cBus.read_byte_data(self.address, MODE1)\n newmode = (oldmode & 0x7F) | 0x10 # sleep\n self.i2cBus.write_byte_data(self.address, MODE1, newmode) # go to sleep\n self.i2cBus.write_byte_data(self.address, PRESCALE, prescale)\n self.i2cBus.write_byte_data(self.address, MODE1, oldmode)\n time.sleep(0.005)\n self.i2cBus.write_byte_data(self.address, MODE1, oldmode | 0x80)", "def set_frequency(self):\r\n def move_synth(delta_f_synth):\r\n sign_delta_f_synth = int(delta_f_synth/abs(delta_f_synth))\r\n stepsize_Hz = int(10)\r\n num_steps = int(abs(delta_f_synth)/stepsize_Hz)\r\n remainder_Hz = round(abs(delta_f_synth)%stepsize_Hz,1)\r\n self.synth.set_incr(stepsize_Hz, 'Hz')\r\n for nn in range(num_steps): # slowly move the synth by delta_f_synth in stepsize steps\r\n self.synth.walk(sign_delta_f_synth)\r\n time.sleep(0.1)\r\n self.synth.set_incr(remainder_Hz, 'Hz')\r\n self.synth.walk(sign_delta_f_synth)\r\n time.sleep(0.1)\r\n \r\n def get_delta_f_synth():\r\n #get latest f_rep,f_0\r\n self.get_frequency() \r\n #calculate required f_rep to get desired PA_freq. switches n and frep in above eq.\r\n f_rep_goal = (self.setfrequency - self.sign_lock * self.f_lock - self.sign_0 * self.f_0) / self.n\r\n # print 'f_rep_goal = %.0f Hz'%f_rep_goal\r\n # lock uses 3rd harmonic so synth must be set to *3\r\n delta_f_synth = (f_rep_goal - self.f_rep)*3 \r\n delta_f_synth = round(delta_f_synth,1)\r\n # print 'delta_f_synth = %.1f Hz'%delta_f_synth\r\n return delta_f_synth\r\n \r\n iteration = 0\r\n delta_f_synth = get_delta_f_synth()\r\n while abs(delta_f_synth) > self.synth_tol:\r\n move_synth(delta_f_synth)\r\n delta_f_synth = get_delta_f_synth()\r\n iteration += 1\r\n if iteration > self.max_iteration:\r\n # print 'REACHED MAX ITERATION: delta_f_synth = %.1f'%delta_f_synth\r\n break", "def set_initfreq(self, ch, f):\n\t\tif _PM_FREQ_MIN <= f <= _PM_FREQ_MAX:\n\t\t\tif ch == 1:\n\t\t\t\tself.init_freq_ch1 = int(f);\n\t\t\telif ch == 2:\n\t\t\t\tself.init_freq_ch2 = int(f);\n\t\t\telse:\n\t\t\t\traise ValueError(\"Invalid channel number\")\n\t\telse:\n\t\t\traise ValueError(\"Initial frequency is not within the valid range.\")", "def set_frequency(self, f=1e9):\r\n return self._api.set_frequency(f)", "def frequency(self, frequency: int):\n\n self._frequency = frequency", "def set_frequency(self, f=1e9):\r\n self.write('FREQ '+str(f))", "def setFreq(self,newfreq):\n\t\tself.freq = newfreq;", "def frequency(self, frequency: int):\n self._freq = freq", "def preset_callback(channel):\n \n global currentRadio\n global preset_sw, preset_list\n \n for i in range(6):\n preset_sw[i][1] = GPIO.input(preset_sw[i][0])\n sleep(2)\n \n for i in range(6):\n if (preset_sw[i][1]) == 0:\n if preset_list[i] != currentRadio:\n currentRadio = preset_list[i]\n chooseRadio(currentRadio)\n break\n songInfo()", "def set_pwm_freq(self, servo_frequency: int):\n prescaleval = float(self._frequency)\n prescaleval /= float(self._resolution)\n prescaleval /= float(servo_frequency)\n prescaleval -= 1\n logger.info('Setting PWM frequency to %d Hz', servo_frequency)\n logger.info('Estimated pre-scale: %f', prescaleval)\n prescale = int(math.floor(prescaleval + 0.5))\n logger.info('Final pre-scale: %d', prescale)\n oldmode = self._device.readU8(MODE1)\n newmode = (oldmode & 0x7F) | 0x10 # sleep\n self._device.write8(MODE1, newmode) # go to sleep\n self._device.write8(PRESCALE, prescale)\n self._device.write8(MODE1, oldmode)\n time.sleep(0.005)\n self._device.write8(MODE1, oldmode | 0x80)", "def setfrequency(self, value):\n self.instrument.write('FREQ {0}'.format(value))", "def frequency(self, frequency):\n\n self._frequency = frequency", "def frequency(self, freq):\n self.set_frequency(f'{freq}' if self._is_min_max(freq) else f'{freq}HZ')", "def change_frequency(self, frequency):\n self.frequency = frequency\n self.change_backlog(self.backlog)", "def set_sg_freq():\n freq = request.params.get(\"freq\", 0, type=float)\n output = request.params.get(\"output\", 1, type=int)\n retval = RP_LIB.rp_GenFreq(output, ctypes.c_float(freq))\n if retval != 0:\n LOG.error(\"Failed to set signal generator frequency. Error code: %s\", ERROR_CODES[retval])", "def set_freq_hz(self, freq=None):\n if freq is None:\n freq = 1000000 * self.def_freq\n self.instr.write('F1 ' + str(freq) + ' H')\n time.sleep(self.sleep_time)", "def UpdateFrequency(self, newfreq):\n\n if self.strategy:\n setattr(self.strategy, managers.UTICK, newfreq)", "def from_frequency(frequency:float, detune=0) -> 'Pitch':\n return Pitch(1200*np.log2(frequency/440) + detune)" ]
[ "0.6795375", "0.6594799", "0.65484196", "0.6417588", "0.63847566", "0.6349733", "0.6272557", "0.62402856", "0.62306625", "0.6048603", "0.6048603", "0.60219395", "0.60067165", "0.6002149", "0.59168196", "0.59141624", "0.5895643", "0.58917177", "0.58648485", "0.5853643", "0.58495444", "0.58142674", "0.5808407", "0.5794533", "0.57902944", "0.5787836", "0.5760366", "0.5738031", "0.5675829", "0.56585777" ]
0.67176265
1